]> git.proxmox.com Git - mirror_lxc.git/blob - src/lxc/cgroups/cgfsng.c
cgfs: remove redundancy utils
[mirror_lxc.git] / src / lxc / cgroups / cgfsng.c
1 /*
2 * lxc: linux Container library
3 *
4 * Copyright © 2016 Canonical Ltd.
5 *
6 * Authors:
7 * Serge Hallyn <serge.hallyn@ubuntu.com>
8 * Christian Brauner <christian.brauner@ubuntu.com>
9 *
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25 /*
26 * cgfs-ng.c: this is a new, simplified implementation of a filesystem
27 * cgroup backend. The original cgfs.c was designed to be as flexible
28 * as possible. It would try to find cgroup filesystems no matter where
29 * or how you had them mounted, and deduce the most usable mount for
30 * each controller.
31 *
32 * This new implementation assumes that cgroup filesystems are mounted
33 * under /sys/fs/cgroup/clist where clist is either the controller, or
34 * a comma-separated list of controllers.
35 */
36
37 #ifndef _GNU_SOURCE
38 #define _GNU_SOURCE 1
39 #endif
40 #include <ctype.h>
41 #include <dirent.h>
42 #include <errno.h>
43 #include <grp.h>
44 #include <linux/kdev_t.h>
45 #include <linux/types.h>
46 #include <stdint.h>
47 #include <stdio.h>
48 #include <stdlib.h>
49 #include <string.h>
50 #include <sys/types.h>
51 #include <unistd.h>
52
53 #include "caps.h"
54 #include "cgroup.h"
55 #include "cgroup_utils.h"
56 #include "commands.h"
57 #include "conf.h"
58 #include "config.h"
59 #include "log.h"
60 #include "macro.h"
61 #include "storage/storage.h"
62 #include "utils.h"
63
64 #ifndef HAVE_STRLCPY
65 #include "include/strlcpy.h"
66 #endif
67
68 #ifndef HAVE_STRLCAT
69 #include "include/strlcat.h"
70 #endif
71
72 lxc_log_define(cgfsng, cgroup);
73
74 static void free_string_list(char **clist)
75 {
76 int i;
77
78 if (!clist)
79 return;
80
81 for (i = 0; clist[i]; i++)
82 free(clist[i]);
83
84 free(clist);
85 }
86
87 /* Given a pointer to a null-terminated array of pointers, realloc to add one
88 * entry, and point the new entry to NULL. Do not fail. Return the index to the
89 * second-to-last entry - that is, the one which is now available for use
90 * (keeping the list null-terminated).
91 */
92 static int append_null_to_list(void ***list)
93 {
94 int newentry = 0;
95
96 if (*list)
97 for (; (*list)[newentry]; newentry++)
98 ;
99
100 *list = must_realloc(*list, (newentry + 2) * sizeof(void **));
101 (*list)[newentry + 1] = NULL;
102 return newentry;
103 }
104
105 /* Given a null-terminated array of strings, check whether @entry is one of the
106 * strings.
107 */
108 static bool string_in_list(char **list, const char *entry)
109 {
110 int i;
111
112 if (!list)
113 return false;
114
115 for (i = 0; list[i]; i++)
116 if (strcmp(list[i], entry) == 0)
117 return true;
118
119 return false;
120 }
121
122 /* Return a copy of @entry prepending "name=", i.e. turn "systemd" into
123 * "name=systemd". Do not fail.
124 */
125 static char *cg_legacy_must_prefix_named(char *entry)
126 {
127 size_t len;
128 char *prefixed;
129
130 len = strlen(entry);
131 prefixed = must_realloc(NULL, len + 6);
132
133 memcpy(prefixed, "name=", STRLITERALLEN("name="));
134 memcpy(prefixed + STRLITERALLEN("name="), entry, len);
135 prefixed[len + 5] = '\0';
136
137 return prefixed;
138 }
139
140 /* Append an entry to the clist. Do not fail. @clist must be NULL the first time
141 * we are called.
142 *
143 * We also handle named subsystems here. Any controller which is not a kernel
144 * subsystem, we prefix "name=". Any which is both a kernel and named subsystem,
145 * we refuse to use because we're not sure which we have here.
146 * (TODO: We could work around this in some cases by just remounting to be
147 * unambiguous, or by comparing mountpoint contents with current cgroup.)
148 *
149 * The last entry will always be NULL.
150 */
151 static void must_append_controller(char **klist, char **nlist, char ***clist,
152 char *entry)
153 {
154 int newentry;
155 char *copy;
156
157 if (string_in_list(klist, entry) && string_in_list(nlist, entry)) {
158 ERROR("Refusing to use ambiguous controller \"%s\"", entry);
159 ERROR("It is both a named and kernel subsystem");
160 return;
161 }
162
163 newentry = append_null_to_list((void ***)clist);
164
165 if (strncmp(entry, "name=", 5) == 0)
166 copy = must_copy_string(entry);
167 else if (string_in_list(klist, entry))
168 copy = must_copy_string(entry);
169 else
170 copy = cg_legacy_must_prefix_named(entry);
171
172 (*clist)[newentry] = copy;
173 }
174
175 /* Given a handler's cgroup data, return the struct hierarchy for the controller
176 * @c, or NULL if there is none.
177 */
178 struct hierarchy *get_hierarchy(struct cgroup_ops *ops, const char *controller)
179 {
180 int i;
181
182 errno = ENOENT;
183
184 if (!ops->hierarchies) {
185 TRACE("There are no useable cgroup controllers");
186 return NULL;
187 }
188
189 for (i = 0; ops->hierarchies[i]; i++) {
190 if (!controller) {
191 /* This is the empty unified hierarchy. */
192 if (ops->hierarchies[i]->controllers &&
193 !ops->hierarchies[i]->controllers[0])
194 return ops->hierarchies[i];
195
196 continue;
197 }
198
199 if (string_in_list(ops->hierarchies[i]->controllers, controller))
200 return ops->hierarchies[i];
201 }
202
203 if (controller)
204 WARN("There is no useable %s controller", controller);
205 else
206 WARN("There is no empty unified cgroup hierarchy");
207
208 return NULL;
209 }
210
211 #define BATCH_SIZE 50
212 static void batch_realloc(char **mem, size_t oldlen, size_t newlen)
213 {
214 int newbatches = (newlen / BATCH_SIZE) + 1;
215 int oldbatches = (oldlen / BATCH_SIZE) + 1;
216
217 if (!*mem || newbatches > oldbatches) {
218 *mem = must_realloc(*mem, newbatches * BATCH_SIZE);
219 }
220 }
221
222 static void append_line(char **dest, size_t oldlen, char *new, size_t newlen)
223 {
224 size_t full = oldlen + newlen;
225
226 batch_realloc(dest, oldlen, full + 1);
227
228 memcpy(*dest + oldlen, new, newlen + 1);
229 }
230
231 /* Slurp in a whole file */
232 static char *read_file(const char *fnam)
233 {
234 FILE *f;
235 char *line = NULL, *buf = NULL;
236 size_t len = 0, fulllen = 0;
237 int linelen;
238
239 f = fopen(fnam, "r");
240 if (!f)
241 return NULL;
242 while ((linelen = getline(&line, &len, f)) != -1) {
243 append_line(&buf, fulllen, line, linelen);
244 fulllen += linelen;
245 }
246 fclose(f);
247 free(line);
248 return buf;
249 }
250
251 /* Taken over modified from the kernel sources. */
252 #define NBITS 32 /* bits in uint32_t */
253 #define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d))
254 #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, NBITS)
255
256 static void set_bit(unsigned bit, uint32_t *bitarr)
257 {
258 bitarr[bit / NBITS] |= (1 << (bit % NBITS));
259 }
260
261 static void clear_bit(unsigned bit, uint32_t *bitarr)
262 {
263 bitarr[bit / NBITS] &= ~(1 << (bit % NBITS));
264 }
265
266 static bool is_set(unsigned bit, uint32_t *bitarr)
267 {
268 return (bitarr[bit / NBITS] & (1 << (bit % NBITS))) != 0;
269 }
270
271 /* Create cpumask from cpulist aka turn:
272 *
273 * 0,2-3
274 *
275 * into bit array
276 *
277 * 1 0 1 1
278 */
279 static uint32_t *lxc_cpumask(char *buf, size_t nbits)
280 {
281 char *token;
282 size_t arrlen;
283 uint32_t *bitarr;
284
285 arrlen = BITS_TO_LONGS(nbits);
286 bitarr = calloc(arrlen, sizeof(uint32_t));
287 if (!bitarr)
288 return NULL;
289
290 lxc_iterate_parts(token, buf, ",") {
291 errno = 0;
292 unsigned end, start;
293 char *range;
294
295 start = strtoul(token, NULL, 0);
296 end = start;
297 range = strchr(token, '-');
298 if (range)
299 end = strtoul(range + 1, NULL, 0);
300
301 if (!(start <= end)) {
302 free(bitarr);
303 return NULL;
304 }
305
306 if (end >= nbits) {
307 free(bitarr);
308 return NULL;
309 }
310
311 while (start <= end)
312 set_bit(start++, bitarr);
313 }
314
315 return bitarr;
316 }
317
318 /* Turn cpumask into simple, comma-separated cpulist. */
319 static char *lxc_cpumask_to_cpulist(uint32_t *bitarr, size_t nbits)
320 {
321 int ret;
322 size_t i;
323 char **cpulist = NULL;
324 char numstr[INTTYPE_TO_STRLEN(size_t)] = {0};
325
326 for (i = 0; i <= nbits; i++) {
327 if (!is_set(i, bitarr))
328 continue;
329
330 ret = snprintf(numstr, sizeof(numstr), "%zu", i);
331 if (ret < 0 || (size_t)ret >= sizeof(numstr)) {
332 lxc_free_array((void **)cpulist, free);
333 return NULL;
334 }
335
336 ret = lxc_append_string(&cpulist, numstr);
337 if (ret < 0) {
338 lxc_free_array((void **)cpulist, free);
339 return NULL;
340 }
341 }
342
343 if (!cpulist)
344 return NULL;
345
346 return lxc_string_join(",", (const char **)cpulist, false);
347 }
348
349 static ssize_t get_max_cpus(char *cpulist)
350 {
351 char *c1, *c2;
352 char *maxcpus = cpulist;
353 size_t cpus = 0;
354
355 c1 = strrchr(maxcpus, ',');
356 if (c1)
357 c1++;
358
359 c2 = strrchr(maxcpus, '-');
360 if (c2)
361 c2++;
362
363 if (!c1 && !c2)
364 c1 = maxcpus;
365 else if (c1 > c2)
366 c2 = c1;
367 else if (c1 < c2)
368 c1 = c2;
369 else if (!c1 && c2)
370 c1 = c2;
371
372 errno = 0;
373 cpus = strtoul(c1, NULL, 0);
374 if (errno != 0)
375 return -1;
376
377 return cpus;
378 }
379
380 #define __ISOL_CPUS "/sys/devices/system/cpu/isolated"
381 static bool cg_legacy_filter_and_set_cpus(char *path, bool am_initialized)
382 {
383 int ret;
384 ssize_t i;
385 char *lastslash, *fpath, oldv;
386 ssize_t maxisol = 0, maxposs = 0;
387 char *cpulist = NULL, *isolcpus = NULL, *posscpus = NULL;
388 uint32_t *isolmask = NULL, *possmask = NULL;
389 bool bret = false, flipped_bit = false;
390
391 lastslash = strrchr(path, '/');
392 if (!lastslash) {
393 ERROR("Failed to detect \"/\" in \"%s\"", path);
394 return bret;
395 }
396 oldv = *lastslash;
397 *lastslash = '\0';
398 fpath = must_make_path(path, "cpuset.cpus", NULL);
399 posscpus = read_file(fpath);
400 if (!posscpus) {
401 SYSERROR("Failed to read file \"%s\"", fpath);
402 goto on_error;
403 }
404
405 /* Get maximum number of cpus found in possible cpuset. */
406 maxposs = get_max_cpus(posscpus);
407 if (maxposs < 0 || maxposs >= INT_MAX - 1)
408 goto on_error;
409
410 if (!file_exists(__ISOL_CPUS)) {
411 /* This system doesn't expose isolated cpus. */
412 DEBUG("The path \""__ISOL_CPUS"\" to read isolated cpus from does not exist");
413 cpulist = posscpus;
414 /* No isolated cpus but we weren't already initialized by
415 * someone. We should simply copy the parents cpuset.cpus
416 * values.
417 */
418 if (!am_initialized) {
419 DEBUG("Copying cpu settings of parent cgroup");
420 goto copy_parent;
421 }
422 /* No isolated cpus but we were already initialized by someone.
423 * Nothing more to do for us.
424 */
425 goto on_success;
426 }
427
428 isolcpus = read_file(__ISOL_CPUS);
429 if (!isolcpus) {
430 SYSERROR("Failed to read file \""__ISOL_CPUS"\"");
431 goto on_error;
432 }
433 if (!isdigit(isolcpus[0])) {
434 TRACE("No isolated cpus detected");
435 cpulist = posscpus;
436 /* No isolated cpus but we weren't already initialized by
437 * someone. We should simply copy the parents cpuset.cpus
438 * values.
439 */
440 if (!am_initialized) {
441 DEBUG("Copying cpu settings of parent cgroup");
442 goto copy_parent;
443 }
444 /* No isolated cpus but we were already initialized by someone.
445 * Nothing more to do for us.
446 */
447 goto on_success;
448 }
449
450 /* Get maximum number of cpus found in isolated cpuset. */
451 maxisol = get_max_cpus(isolcpus);
452 if (maxisol < 0 || maxisol >= INT_MAX - 1)
453 goto on_error;
454
455 if (maxposs < maxisol)
456 maxposs = maxisol;
457 maxposs++;
458
459 possmask = lxc_cpumask(posscpus, maxposs);
460 if (!possmask) {
461 ERROR("Failed to create cpumask for possible cpus");
462 goto on_error;
463 }
464
465 isolmask = lxc_cpumask(isolcpus, maxposs);
466 if (!isolmask) {
467 ERROR("Failed to create cpumask for isolated cpus");
468 goto on_error;
469 }
470
471 for (i = 0; i <= maxposs; i++) {
472 if (!is_set(i, isolmask) || !is_set(i, possmask))
473 continue;
474
475 flipped_bit = true;
476 clear_bit(i, possmask);
477 }
478
479 if (!flipped_bit) {
480 DEBUG("No isolated cpus present in cpuset");
481 goto on_success;
482 }
483 DEBUG("Removed isolated cpus from cpuset");
484
485 cpulist = lxc_cpumask_to_cpulist(possmask, maxposs);
486 if (!cpulist) {
487 ERROR("Failed to create cpu list");
488 goto on_error;
489 }
490
491 copy_parent:
492 *lastslash = oldv;
493 free(fpath);
494 fpath = must_make_path(path, "cpuset.cpus", NULL);
495 ret = lxc_write_to_file(fpath, cpulist, strlen(cpulist), false, 0666);
496 if (ret < 0) {
497 SYSERROR("Failed to write cpu list to \"%s\"", fpath);
498 goto on_error;
499 }
500
501 on_success:
502 bret = true;
503
504 on_error:
505 free(fpath);
506
507 free(isolcpus);
508 free(isolmask);
509
510 if (posscpus != cpulist)
511 free(posscpus);
512 free(possmask);
513
514 free(cpulist);
515 return bret;
516 }
517
518 /* Copy contents of parent(@path)/@file to @path/@file */
519 static bool copy_parent_file(char *path, char *file)
520 {
521 int ret;
522 char *fpath, *lastslash, oldv;
523 int len = 0;
524 char *value = NULL;
525
526 lastslash = strrchr(path, '/');
527 if (!lastslash) {
528 ERROR("Failed to detect \"/\" in \"%s\"", path);
529 return false;
530 }
531 oldv = *lastslash;
532 *lastslash = '\0';
533 fpath = must_make_path(path, file, NULL);
534 len = lxc_read_from_file(fpath, NULL, 0);
535 if (len <= 0)
536 goto on_error;
537
538 value = must_realloc(NULL, len + 1);
539 ret = lxc_read_from_file(fpath, value, len);
540 if (ret != len)
541 goto on_error;
542 free(fpath);
543
544 *lastslash = oldv;
545 fpath = must_make_path(path, file, NULL);
546 ret = lxc_write_to_file(fpath, value, len, false, 0666);
547 if (ret < 0)
548 SYSERROR("Failed to write \"%s\" to file \"%s\"", value, fpath);
549 free(fpath);
550 free(value);
551 return ret >= 0;
552
553 on_error:
554 SYSERROR("Failed to read file \"%s\"", fpath);
555 free(fpath);
556 free(value);
557 return false;
558 }
559
560 /* Initialize the cpuset hierarchy in first directory of @gname and set
561 * cgroup.clone_children so that children inherit settings. Since the
562 * h->base_path is populated by init or ourselves, we know it is already
563 * initialized.
564 */
565 static bool cg_legacy_handle_cpuset_hierarchy(struct hierarchy *h, char *cgname)
566 {
567 int ret;
568 char v;
569 char *cgpath, *clonechildrenpath, *slash;
570
571 if (!string_in_list(h->controllers, "cpuset"))
572 return true;
573
574 if (*cgname == '/')
575 cgname++;
576 slash = strchr(cgname, '/');
577 if (slash)
578 *slash = '\0';
579
580 cgpath = must_make_path(h->mountpoint, h->container_base_path, cgname, NULL);
581 if (slash)
582 *slash = '/';
583
584 ret = mkdir(cgpath, 0755);
585 if (ret < 0) {
586 if (errno != EEXIST) {
587 SYSERROR("Failed to create directory \"%s\"", cgpath);
588 free(cgpath);
589 return false;
590 }
591 }
592
593 clonechildrenpath = must_make_path(cgpath, "cgroup.clone_children", NULL);
594 /* unified hierarchy doesn't have clone_children */
595 if (!file_exists(clonechildrenpath)) {
596 free(clonechildrenpath);
597 free(cgpath);
598 return true;
599 }
600
601 ret = lxc_read_from_file(clonechildrenpath, &v, 1);
602 if (ret < 0) {
603 SYSERROR("Failed to read file \"%s\"", clonechildrenpath);
604 free(clonechildrenpath);
605 free(cgpath);
606 return false;
607 }
608
609 /* Make sure any isolated cpus are removed from cpuset.cpus. */
610 if (!cg_legacy_filter_and_set_cpus(cgpath, v == '1')) {
611 SYSERROR("Failed to remove isolated cpus");
612 free(clonechildrenpath);
613 free(cgpath);
614 return false;
615 }
616
617 /* Already set for us by someone else. */
618 if (v == '1') {
619 DEBUG("\"cgroup.clone_children\" was already set to \"1\"");
620 free(clonechildrenpath);
621 free(cgpath);
622 return true;
623 }
624
625 /* copy parent's settings */
626 if (!copy_parent_file(cgpath, "cpuset.mems")) {
627 SYSERROR("Failed to copy \"cpuset.mems\" settings");
628 free(cgpath);
629 free(clonechildrenpath);
630 return false;
631 }
632 free(cgpath);
633
634 ret = lxc_write_to_file(clonechildrenpath, "1", 1, false, 0666);
635 if (ret < 0) {
636 /* Set clone_children so children inherit our settings */
637 SYSERROR("Failed to write 1 to \"%s\"", clonechildrenpath);
638 free(clonechildrenpath);
639 return false;
640 }
641 free(clonechildrenpath);
642 return true;
643 }
644
645 /* Given two null-terminated lists of strings, return true if any string is in
646 * both.
647 */
648 static bool controller_lists_intersect(char **l1, char **l2)
649 {
650 int i;
651
652 if (!l1 || !l2)
653 return false;
654
655 for (i = 0; l1[i]; i++) {
656 if (string_in_list(l2, l1[i]))
657 return true;
658 }
659
660 return false;
661 }
662
663 /* For a null-terminated list of controllers @clist, return true if any of those
664 * controllers is already listed the null-terminated list of hierarchies @hlist.
665 * Realistically, if one is present, all must be present.
666 */
667 static bool controller_list_is_dup(struct hierarchy **hlist, char **clist)
668 {
669 int i;
670
671 if (!hlist)
672 return false;
673
674 for (i = 0; hlist[i]; i++)
675 if (controller_lists_intersect(hlist[i]->controllers, clist))
676 return true;
677
678 return false;
679 }
680
681 /* Return true if the controller @entry is found in the null-terminated list of
682 * hierarchies @hlist.
683 */
684 static bool controller_found(struct hierarchy **hlist, char *entry)
685 {
686 int i;
687
688 if (!hlist)
689 return false;
690
691 for (i = 0; hlist[i]; i++)
692 if (string_in_list(hlist[i]->controllers, entry))
693 return true;
694
695 return false;
696 }
697
698 /* Return true if all of the controllers which we require have been found. The
699 * required list is freezer and anything in lxc.cgroup.use.
700 */
701 static bool all_controllers_found(struct cgroup_ops *ops)
702 {
703 char **cur;
704 struct hierarchy **hlist = ops->hierarchies;
705
706 if (!controller_found(hlist, "freezer")) {
707 ERROR("No freezer controller mountpoint found");
708 return false;
709 }
710
711 if (!ops->cgroup_use)
712 return true;
713
714 for (cur = ops->cgroup_use; cur && *cur; cur++)
715 if (!controller_found(hlist, *cur)) {
716 ERROR("No %s controller mountpoint found", *cur);
717 return false;
718 }
719
720 return true;
721 }
722
723 /* Get the controllers from a mountinfo line There are other ways we could get
724 * this info. For lxcfs, field 3 is /cgroup/controller-list. For cgroupfs, we
725 * could parse the mount options. But we simply assume that the mountpoint must
726 * be /sys/fs/cgroup/controller-list
727 */
728 static char **cg_hybrid_get_controllers(char **klist, char **nlist, char *line,
729 int type)
730 {
731 /* The fourth field is /sys/fs/cgroup/comma-delimited-controller-list
732 * for legacy hierarchies.
733 */
734 int i;
735 char *dup, *p2, *tok;
736 char *p = line, *sep = ",";
737 char **aret = NULL;
738
739 for (i = 0; i < 4; i++) {
740 p = strchr(p, ' ');
741 if (!p)
742 return NULL;
743 p++;
744 }
745
746 /* Note, if we change how mountinfo works, then our caller will need to
747 * verify /sys/fs/cgroup/ in this field.
748 */
749 if (strncmp(p, "/sys/fs/cgroup/", 15) != 0) {
750 ERROR("Found hierarchy not under /sys/fs/cgroup: \"%s\"", p);
751 return NULL;
752 }
753
754 p += 15;
755 p2 = strchr(p, ' ');
756 if (!p2) {
757 ERROR("Corrupt mountinfo");
758 return NULL;
759 }
760 *p2 = '\0';
761
762 if (type == CGROUP_SUPER_MAGIC) {
763 /* strdup() here for v1 hierarchies. Otherwise
764 * lxc_iterate_parts() will destroy mountpoints such as
765 * "/sys/fs/cgroup/cpu,cpuacct".
766 */
767 dup = strdup(p);
768 if (!dup)
769 return NULL;
770
771 lxc_iterate_parts(tok, dup, sep) {
772 must_append_controller(klist, nlist, &aret, tok);
773 }
774
775 free(dup);
776 }
777 *p2 = ' ';
778
779 return aret;
780 }
781
782 static char **cg_unified_make_empty_controller(void)
783 {
784 int newentry;
785 char **aret = NULL;
786
787 newentry = append_null_to_list((void ***)&aret);
788 aret[newentry] = NULL;
789 return aret;
790 }
791
792 static char **cg_unified_get_controllers(const char *file)
793 {
794 char *buf, *tok;
795 char *sep = " \t\n";
796 char **aret = NULL;
797
798 buf = read_file(file);
799 if (!buf)
800 return NULL;
801
802 lxc_iterate_parts(tok, buf, sep) {
803 int newentry;
804 char *copy;
805
806 newentry = append_null_to_list((void ***)&aret);
807 copy = must_copy_string(tok);
808 aret[newentry] = copy;
809 }
810
811 free(buf);
812 return aret;
813 }
814
815 static struct hierarchy *add_hierarchy(struct hierarchy ***h, char **clist, char *mountpoint,
816 char *container_base_path, int type)
817 {
818 struct hierarchy *new;
819 int newentry;
820
821 new = must_realloc(NULL, sizeof(*new));
822 new->controllers = clist;
823 new->mountpoint = mountpoint;
824 new->container_base_path = container_base_path;
825 new->container_full_path = NULL;
826 new->monitor_full_path = NULL;
827 new->version = type;
828
829 newentry = append_null_to_list((void ***)h);
830 (*h)[newentry] = new;
831 return new;
832 }
833
834 /* Get a copy of the mountpoint from @line, which is a line from
835 * /proc/self/mountinfo.
836 */
837 static char *cg_hybrid_get_mountpoint(char *line)
838 {
839 int i;
840 size_t len;
841 char *p2;
842 char *p = line, *sret = NULL;
843
844 for (i = 0; i < 4; i++) {
845 p = strchr(p, ' ');
846 if (!p)
847 return NULL;
848 p++;
849 }
850
851 if (strncmp(p, "/sys/fs/cgroup/", 15) != 0)
852 return NULL;
853
854 p2 = strchr(p + 15, ' ');
855 if (!p2)
856 return NULL;
857 *p2 = '\0';
858
859 len = strlen(p);
860 sret = must_realloc(NULL, len + 1);
861 memcpy(sret, p, len);
862 sret[len] = '\0';
863 return sret;
864 }
865
866 /* Given a multi-line string, return a null-terminated copy of the current line. */
867 static char *copy_to_eol(char *p)
868 {
869 char *p2 = strchr(p, '\n'), *sret;
870 size_t len;
871
872 if (!p2)
873 return NULL;
874
875 len = p2 - p;
876 sret = must_realloc(NULL, len + 1);
877 memcpy(sret, p, len);
878 sret[len] = '\0';
879 return sret;
880 }
881
882 /* cgline: pointer to character after the first ':' in a line in a \n-terminated
883 * /proc/self/cgroup file. Check whether controller c is present.
884 */
885 static bool controller_in_clist(char *cgline, char *c)
886 {
887 char *tok, *eol, *tmp;
888 size_t len;
889
890 eol = strchr(cgline, ':');
891 if (!eol)
892 return false;
893
894 len = eol - cgline;
895 tmp = alloca(len + 1);
896 memcpy(tmp, cgline, len);
897 tmp[len] = '\0';
898
899 lxc_iterate_parts(tok, tmp, ",") {
900 if (strcmp(tok, c) == 0)
901 return true;
902 }
903
904 return false;
905 }
906
907 /* @basecginfo is a copy of /proc/$$/cgroup. Return the current cgroup for
908 * @controller.
909 */
910 static char *cg_hybrid_get_current_cgroup(char *basecginfo, char *controller,
911 int type)
912 {
913 char *p = basecginfo;
914
915 for (;;) {
916 bool is_cgv2_base_cgroup = false;
917
918 /* cgroup v2 entry in "/proc/<pid>/cgroup": "0::/some/path" */
919 if ((type == CGROUP2_SUPER_MAGIC) && (*p == '0'))
920 is_cgv2_base_cgroup = true;
921
922 p = strchr(p, ':');
923 if (!p)
924 return NULL;
925 p++;
926
927 if (is_cgv2_base_cgroup || (controller && controller_in_clist(p, controller))) {
928 p = strchr(p, ':');
929 if (!p)
930 return NULL;
931 p++;
932 return copy_to_eol(p);
933 }
934
935 p = strchr(p, '\n');
936 if (!p)
937 return NULL;
938 p++;
939 }
940 }
941
942 static void must_append_string(char ***list, char *entry)
943 {
944 int newentry;
945 char *copy;
946
947 newentry = append_null_to_list((void ***)list);
948 copy = must_copy_string(entry);
949 (*list)[newentry] = copy;
950 }
951
952 static int get_existing_subsystems(char ***klist, char ***nlist)
953 {
954 FILE *f;
955 char *line = NULL;
956 size_t len = 0;
957
958 f = fopen("/proc/self/cgroup", "r");
959 if (!f)
960 return -1;
961
962 while (getline(&line, &len, f) != -1) {
963 char *p, *p2, *tok;
964 p = strchr(line, ':');
965 if (!p)
966 continue;
967 p++;
968 p2 = strchr(p, ':');
969 if (!p2)
970 continue;
971 *p2 = '\0';
972
973 /* If the kernel has cgroup v2 support, then /proc/self/cgroup
974 * contains an entry of the form:
975 *
976 * 0::/some/path
977 *
978 * In this case we use "cgroup2" as controller name.
979 */
980 if ((p2 - p) == 0) {
981 must_append_string(klist, "cgroup2");
982 continue;
983 }
984
985 lxc_iterate_parts(tok, p, ",") {
986 if (strncmp(tok, "name=", 5) == 0)
987 must_append_string(nlist, tok);
988 else
989 must_append_string(klist, tok);
990 }
991 }
992
993 free(line);
994 fclose(f);
995 return 0;
996 }
997
998 static void trim(char *s)
999 {
1000 size_t len;
1001
1002 len = strlen(s);
1003 while ((len > 1) && (s[len - 1] == '\n'))
1004 s[--len] = '\0';
1005 }
1006
1007 static void lxc_cgfsng_print_hierarchies(struct cgroup_ops *ops)
1008 {
1009 int i;
1010 struct hierarchy **it;
1011
1012 if (!ops->hierarchies) {
1013 TRACE(" No hierarchies found");
1014 return;
1015 }
1016
1017 TRACE(" Hierarchies:");
1018 for (i = 0, it = ops->hierarchies; it && *it; it++, i++) {
1019 int j;
1020 char **cit;
1021
1022 TRACE(" %d: base_cgroup: %s", i, (*it)->container_base_path ? (*it)->container_base_path : "(null)");
1023 TRACE(" mountpoint: %s", (*it)->mountpoint ? (*it)->mountpoint : "(null)");
1024 TRACE(" controllers:");
1025 for (j = 0, cit = (*it)->controllers; cit && *cit; cit++, j++)
1026 TRACE(" %d: %s", j, *cit);
1027 }
1028 }
1029
1030 static void lxc_cgfsng_print_basecg_debuginfo(char *basecginfo, char **klist,
1031 char **nlist)
1032 {
1033 int k;
1034 char **it;
1035
1036 TRACE("basecginfo is:");
1037 TRACE("%s", basecginfo);
1038
1039 for (k = 0, it = klist; it && *it; it++, k++)
1040 TRACE("kernel subsystem %d: %s", k, *it);
1041
1042 for (k = 0, it = nlist; it && *it; it++, k++)
1043 TRACE("named subsystem %d: %s", k, *it);
1044 }
1045
1046 static int cgroup_rmdir(struct hierarchy **hierarchies,
1047 const char *container_cgroup)
1048 {
1049 int i;
1050
1051 if (!container_cgroup || !hierarchies)
1052 return 0;
1053
1054 for (i = 0; hierarchies[i]; i++) {
1055 int ret;
1056 struct hierarchy *h = hierarchies[i];
1057
1058 if (!h->container_full_path)
1059 continue;
1060
1061 ret = recursive_destroy(h->container_full_path);
1062 if (ret < 0)
1063 WARN("Failed to destroy \"%s\"", h->container_full_path);
1064
1065 free(h->container_full_path);
1066 h->container_full_path = NULL;
1067 }
1068
1069 return 0;
1070 }
1071
1072 struct generic_userns_exec_data {
1073 struct hierarchy **hierarchies;
1074 const char *container_cgroup;
1075 struct lxc_conf *conf;
1076 uid_t origuid; /* target uid in parent namespace */
1077 char *path;
1078 };
1079
1080 static int cgroup_rmdir_wrapper(void *data)
1081 {
1082 int ret;
1083 struct generic_userns_exec_data *arg = data;
1084 uid_t nsuid = (arg->conf->root_nsuid_map != NULL) ? 0 : arg->conf->init_uid;
1085 gid_t nsgid = (arg->conf->root_nsgid_map != NULL) ? 0 : arg->conf->init_gid;
1086
1087 ret = setresgid(nsgid, nsgid, nsgid);
1088 if (ret < 0) {
1089 SYSERROR("Failed to setresgid(%d, %d, %d)", (int)nsgid,
1090 (int)nsgid, (int)nsgid);
1091 return -1;
1092 }
1093
1094 ret = setresuid(nsuid, nsuid, nsuid);
1095 if (ret < 0) {
1096 SYSERROR("Failed to setresuid(%d, %d, %d)", (int)nsuid,
1097 (int)nsuid, (int)nsuid);
1098 return -1;
1099 }
1100
1101 ret = setgroups(0, NULL);
1102 if (ret < 0 && errno != EPERM) {
1103 SYSERROR("Failed to setgroups(0, NULL)");
1104 return -1;
1105 }
1106
1107 return cgroup_rmdir(arg->hierarchies, arg->container_cgroup);
1108 }
1109
1110 __cgfsng_ops static void cgfsng_payload_destroy(struct cgroup_ops *ops,
1111 struct lxc_handler *handler)
1112 {
1113 int ret;
1114 struct generic_userns_exec_data wrap;
1115
1116 wrap.origuid = 0;
1117 wrap.container_cgroup = ops->container_cgroup;
1118 wrap.hierarchies = ops->hierarchies;
1119 wrap.conf = handler->conf;
1120
1121 if (handler->conf && !lxc_list_empty(&handler->conf->id_map))
1122 ret = userns_exec_1(handler->conf, cgroup_rmdir_wrapper, &wrap,
1123 "cgroup_rmdir_wrapper");
1124 else
1125 ret = cgroup_rmdir(ops->hierarchies, ops->container_cgroup);
1126 if (ret < 0) {
1127 WARN("Failed to destroy cgroups");
1128 return;
1129 }
1130 }
1131
1132 __cgfsng_ops static void cgfsng_monitor_destroy(struct cgroup_ops *ops,
1133 struct lxc_handler *handler)
1134 {
1135 int len;
1136 char *pivot_path;
1137 struct lxc_conf *conf = handler->conf;
1138 char pidstr[INTTYPE_TO_STRLEN(pid_t)];
1139
1140 if (!ops->hierarchies)
1141 return;
1142
1143 len = snprintf(pidstr, sizeof(pidstr), "%d", handler->monitor_pid);
1144 if (len < 0 || (size_t)len >= sizeof(pidstr))
1145 return;
1146
1147 for (int i = 0; ops->hierarchies[i]; i++) {
1148 int ret;
1149 char *chop;
1150 char pivot_cgroup[] = PIVOT_CGROUP;
1151 struct hierarchy *h = ops->hierarchies[i];
1152
1153 if (!h->monitor_full_path)
1154 continue;
1155
1156 if (conf && conf->cgroup_meta.dir)
1157 pivot_path = must_make_path(h->mountpoint,
1158 h->container_base_path,
1159 conf->cgroup_meta.dir,
1160 PIVOT_CGROUP,
1161 "cgroup.procs", NULL);
1162 else
1163 pivot_path = must_make_path(h->mountpoint,
1164 h->container_base_path,
1165 PIVOT_CGROUP,
1166 "cgroup.procs", NULL);
1167
1168 chop = strrchr(pivot_path, '/');
1169 if (chop)
1170 *chop = '\0';
1171
1172 /*
1173 * Make sure not to pass in the ro string literal PIVOT_CGROUP
1174 * here.
1175 */
1176 if (!cg_legacy_handle_cpuset_hierarchy(h, pivot_cgroup)) {
1177 WARN("Failed to handle legacy cpuset controller");
1178 goto next;
1179 }
1180
1181 ret = mkdir_p(pivot_path, 0755);
1182 if (ret < 0 && errno != EEXIST) {
1183 SYSWARN("Failed to create cgroup \"%s\"\n", pivot_path);
1184 goto next;
1185 }
1186
1187 if (chop)
1188 *chop = '/';
1189
1190 /* Move ourselves into the pivot cgroup to delete our own
1191 * cgroup.
1192 */
1193 ret = lxc_write_to_file(pivot_path, pidstr, len, false, 0666);
1194 if (ret != 0) {
1195 SYSWARN("Failed to move monitor %s to \"%s\"\n", pidstr, pivot_path);
1196 goto next;
1197 }
1198
1199 ret = recursive_destroy(h->monitor_full_path);
1200 if (ret < 0)
1201 WARN("Failed to destroy \"%s\"", h->monitor_full_path);
1202
1203 next:
1204 free(pivot_path);
1205 }
1206 }
1207
1208 static bool cg_unified_create_cgroup(struct hierarchy *h, char *cgname)
1209 {
1210 size_t i, parts_len;
1211 char **it;
1212 size_t full_len = 0;
1213 char *add_controllers = NULL, *cgroup = NULL;
1214 char **parts = NULL;
1215 bool bret = false;
1216
1217 if (h->version != CGROUP2_SUPER_MAGIC)
1218 return true;
1219
1220 if (!h->controllers)
1221 return true;
1222
1223 /* For now we simply enable all controllers that we have detected by
1224 * creating a string like "+memory +pids +cpu +io".
1225 * TODO: In the near future we might want to support "-<controller>"
1226 * etc. but whether supporting semantics like this make sense will need
1227 * some thinking.
1228 */
1229 for (it = h->controllers; it && *it; it++) {
1230 full_len += strlen(*it) + 2;
1231 add_controllers = must_realloc(add_controllers, full_len + 1);
1232
1233 if (h->controllers[0] == *it)
1234 add_controllers[0] = '\0';
1235
1236 (void)strlcat(add_controllers, "+", full_len + 1);
1237 (void)strlcat(add_controllers, *it, full_len + 1);
1238
1239 if ((it + 1) && *(it + 1))
1240 (void)strlcat(add_controllers, " ", full_len + 1);
1241 }
1242
1243 parts = lxc_string_split(cgname, '/');
1244 if (!parts)
1245 goto on_error;
1246
1247 parts_len = lxc_array_len((void **)parts);
1248 if (parts_len > 0)
1249 parts_len--;
1250
1251 cgroup = must_make_path(h->mountpoint, h->container_base_path, NULL);
1252 for (i = 0; i < parts_len; i++) {
1253 int ret;
1254 char *target;
1255
1256 cgroup = must_append_path(cgroup, parts[i], NULL);
1257 target = must_make_path(cgroup, "cgroup.subtree_control", NULL);
1258 ret = lxc_write_to_file(target, add_controllers, full_len, false, 0666);
1259 free(target);
1260 if (ret < 0) {
1261 SYSERROR("Could not enable \"%s\" controllers in the "
1262 "unified cgroup \"%s\"", add_controllers, cgroup);
1263 goto on_error;
1264 }
1265 }
1266
1267 bret = true;
1268
1269 on_error:
1270 lxc_free_array((void **)parts, free);
1271 free(add_controllers);
1272 free(cgroup);
1273 return bret;
1274 }
1275
1276 static int mkdir_eexist_on_last(const char *dir, mode_t mode)
1277 {
1278 const char *tmp = dir;
1279 const char *orig = dir;
1280 size_t orig_len;
1281
1282 orig_len = strlen(dir);
1283 do {
1284 int ret;
1285 size_t cur_len;
1286 char *makeme;
1287
1288 dir = tmp + strspn(tmp, "/");
1289 tmp = dir + strcspn(dir, "/");
1290
1291 errno = ENOMEM;
1292 cur_len = dir - orig;
1293 makeme = strndup(orig, cur_len);
1294 if (!makeme)
1295 return -1;
1296
1297 ret = mkdir(makeme, mode);
1298 if (ret < 0) {
1299 if ((errno != EEXIST) || (orig_len == cur_len)) {
1300 SYSERROR("Failed to create directory \"%s\"", makeme);
1301 free(makeme);
1302 return -1;
1303 }
1304 }
1305 free(makeme);
1306
1307 } while (tmp != dir);
1308
1309 return 0;
1310 }
1311
1312 static bool monitor_create_path_for_hierarchy(struct hierarchy *h, char *cgname)
1313 {
1314 int ret;
1315
1316 if (!cg_legacy_handle_cpuset_hierarchy(h, cgname)) {
1317 ERROR("Failed to handle legacy cpuset controller");
1318 return false;
1319 }
1320
1321 h->monitor_full_path = must_make_path(h->mountpoint, h->container_base_path, cgname, NULL);
1322 ret = mkdir_eexist_on_last(h->monitor_full_path, 0755);
1323 if (ret < 0) {
1324 ERROR("Failed to create cgroup \"%s\"", h->monitor_full_path);
1325 return false;
1326 }
1327
1328 return cg_unified_create_cgroup(h, cgname);
1329 }
1330
1331 static bool container_create_path_for_hierarchy(struct hierarchy *h, char *cgname)
1332 {
1333 int ret;
1334
1335 if (!cg_legacy_handle_cpuset_hierarchy(h, cgname)) {
1336 ERROR("Failed to handle legacy cpuset controller");
1337 return false;
1338 }
1339
1340 h->container_full_path = must_make_path(h->mountpoint, h->container_base_path, cgname, NULL);
1341 ret = mkdir_eexist_on_last(h->container_full_path, 0755);
1342 if (ret < 0) {
1343 ERROR("Failed to create cgroup \"%s\"", h->container_full_path);
1344 return false;
1345 }
1346
1347 return cg_unified_create_cgroup(h, cgname);
1348 }
1349
1350 static void remove_path_for_hierarchy(struct hierarchy *h, char *cgname, bool monitor)
1351 {
1352 int ret;
1353 char *full_path;
1354
1355 if (monitor)
1356 full_path = h->monitor_full_path;
1357 else
1358 full_path = h->container_full_path;
1359
1360 ret = rmdir(full_path);
1361 if (ret < 0)
1362 SYSERROR("Failed to rmdir(\"%s\") from failed creation attempt", full_path);
1363
1364 free(full_path);
1365
1366 if (monitor)
1367 h->monitor_full_path = NULL;
1368 else
1369 h->container_full_path = NULL;
1370 }
1371
1372 __cgfsng_ops static inline bool cgfsng_monitor_create(struct cgroup_ops *ops,
1373 struct lxc_handler *handler)
1374 {
1375 char *monitor_cgroup, *offset, *tmp;
1376 int i, idx = 0;
1377 size_t len;
1378 bool bret = false;
1379 struct lxc_conf *conf = handler->conf;
1380
1381 if (!conf)
1382 return bret;
1383
1384 if (conf->cgroup_meta.dir)
1385 tmp = lxc_string_join("/",
1386 (const char *[]){conf->cgroup_meta.dir,
1387 ops->monitor_pattern,
1388 handler->name, NULL},
1389 false);
1390 else
1391 tmp = must_make_path(ops->monitor_pattern, handler->name, NULL);
1392 if (!tmp)
1393 return bret;
1394
1395 len = strlen(tmp) + 5; /* leave room for -NNN\0 */
1396 monitor_cgroup = must_realloc(tmp, len);
1397 offset = monitor_cgroup + len - 5;
1398 *offset = 0;
1399
1400 do {
1401 if (idx) {
1402 int ret = snprintf(offset, 5, "-%d", idx);
1403 if (ret < 0 || (size_t)ret >= 5)
1404 goto on_error;
1405 }
1406
1407 for (i = 0; ops->hierarchies[i]; i++) {
1408 if (!monitor_create_path_for_hierarchy(ops->hierarchies[i], monitor_cgroup)) {
1409 ERROR("Failed to create cgroup \"%s\"", ops->hierarchies[i]->monitor_full_path);
1410 free(ops->hierarchies[i]->container_full_path);
1411 ops->hierarchies[i]->container_full_path = NULL;
1412
1413 for (int j = 0; j < i; j++)
1414 remove_path_for_hierarchy(ops->hierarchies[j], monitor_cgroup, true);
1415
1416 idx++;
1417 break;
1418 }
1419 }
1420 } while (ops->hierarchies[i] && idx > 0 && idx < 1000);
1421
1422 if (idx < 1000) {
1423 bret = true;
1424 INFO("The monitor process uses \"%s\" as cgroup", monitor_cgroup);
1425 }
1426
1427 on_error:
1428 free(monitor_cgroup);
1429
1430 return bret;
1431 }
1432
1433 /* Try to create the same cgroup in all hierarchies. Start with cgroup_pattern;
1434 * next cgroup_pattern-1, -2, ..., -999.
1435 */
1436 __cgfsng_ops static inline bool cgfsng_payload_create(struct cgroup_ops *ops,
1437 struct lxc_handler *handler)
1438 {
1439 int i;
1440 size_t len;
1441 char *container_cgroup, *offset, *tmp;
1442 int idx = 0;
1443 struct lxc_conf *conf = handler->conf;
1444
1445 if (ops->container_cgroup) {
1446 WARN("cgfsng_create called a second time: %s", ops->container_cgroup);
1447 return false;
1448 }
1449
1450 if (!conf)
1451 return false;
1452
1453 if (conf->cgroup_meta.dir)
1454 tmp = lxc_string_join("/", (const char *[]){conf->cgroup_meta.dir, handler->name, NULL}, false);
1455 else
1456 tmp = lxc_string_replace("%n", handler->name, ops->cgroup_pattern);
1457 if (!tmp) {
1458 ERROR("Failed expanding cgroup name pattern");
1459 return false;
1460 }
1461
1462 len = strlen(tmp) + 5; /* leave room for -NNN\0 */
1463 container_cgroup = must_realloc(NULL, len);
1464 (void)strlcpy(container_cgroup, tmp, len);
1465 free(tmp);
1466 offset = container_cgroup + len - 5;
1467
1468 again:
1469 if (idx == 1000) {
1470 ERROR("Too many conflicting cgroup names");
1471 goto out_free;
1472 }
1473
1474 if (idx) {
1475 int ret;
1476
1477 ret = snprintf(offset, 5, "-%d", idx);
1478 if (ret < 0 || (size_t)ret >= 5) {
1479 FILE *f = fopen("/dev/null", "w");
1480 if (f) {
1481 fprintf(f, "Workaround for GCC7 bug: "
1482 "https://gcc.gnu.org/bugzilla/"
1483 "show_bug.cgi?id=78969");
1484 fclose(f);
1485 }
1486 }
1487 }
1488
1489 for (i = 0; ops->hierarchies[i]; i++) {
1490 if (!container_create_path_for_hierarchy(ops->hierarchies[i], container_cgroup)) {
1491 ERROR("Failed to create cgroup \"%s\"", ops->hierarchies[i]->container_full_path);
1492 free(ops->hierarchies[i]->container_full_path);
1493 ops->hierarchies[i]->container_full_path = NULL;
1494 for (int j = 0; j < i; j++)
1495 remove_path_for_hierarchy(ops->hierarchies[j], container_cgroup, false);
1496 idx++;
1497 goto again;
1498 }
1499 }
1500
1501 ops->container_cgroup = container_cgroup;
1502 INFO("The container uses \"%s\" as cgroup", container_cgroup);
1503
1504 return true;
1505
1506 out_free:
1507 free(container_cgroup);
1508
1509 return false;
1510 }
1511
1512 __cgfsng_ops static bool __do_cgroup_enter(struct cgroup_ops *ops, pid_t pid,
1513 bool monitor)
1514 {
1515 int len;
1516 char pidstr[INTTYPE_TO_STRLEN(pid_t)];
1517
1518 len = snprintf(pidstr, sizeof(pidstr), "%d", pid);
1519 if (len < 0 || (size_t)len >= sizeof(pidstr))
1520 return false;
1521
1522 for (int i = 0; ops->hierarchies[i]; i++) {
1523 int ret;
1524 char *path;
1525
1526 if (monitor)
1527 path = must_make_path(ops->hierarchies[i]->monitor_full_path,
1528 "cgroup.procs", NULL);
1529 else
1530 path = must_make_path(ops->hierarchies[i]->container_full_path,
1531 "cgroup.procs", NULL);
1532 ret = lxc_write_to_file(path, pidstr, len, false, 0666);
1533 if (ret != 0) {
1534 SYSERROR("Failed to enter cgroup \"%s\"", path);
1535 free(path);
1536 return false;
1537 }
1538 free(path);
1539 }
1540
1541 return true;
1542 }
1543
1544 __cgfsng_ops static bool cgfsng_monitor_enter(struct cgroup_ops *ops, pid_t pid)
1545 {
1546 return __do_cgroup_enter(ops, pid, true);
1547 }
1548
1549 static bool cgfsng_payload_enter(struct cgroup_ops *ops, pid_t pid)
1550 {
1551 return __do_cgroup_enter(ops, pid, false);
1552 }
1553
1554 static int chowmod(char *path, uid_t chown_uid, gid_t chown_gid,
1555 mode_t chmod_mode)
1556 {
1557 int ret;
1558
1559 ret = chown(path, chown_uid, chown_gid);
1560 if (ret < 0) {
1561 SYSWARN("Failed to chown(%s, %d, %d)", path, (int)chown_uid, (int)chown_gid);
1562 return -1;
1563 }
1564
1565 ret = chmod(path, chmod_mode);
1566 if (ret < 0) {
1567 SYSWARN("Failed to chmod(%s, %d)", path, (int)chmod_mode);
1568 return -1;
1569 }
1570
1571 return 0;
1572 }
1573
1574 /* chgrp the container cgroups to container group. We leave
1575 * the container owner as cgroup owner. So we must make the
1576 * directories 775 so that the container can create sub-cgroups.
1577 *
1578 * Also chown the tasks and cgroup.procs files. Those may not
1579 * exist depending on kernel version.
1580 */
1581 static int chown_cgroup_wrapper(void *data)
1582 {
1583 int i, ret;
1584 uid_t destuid;
1585 struct generic_userns_exec_data *arg = data;
1586 uid_t nsuid = (arg->conf->root_nsuid_map != NULL) ? 0 : arg->conf->init_uid;
1587 gid_t nsgid = (arg->conf->root_nsgid_map != NULL) ? 0 : arg->conf->init_gid;
1588
1589 ret = setresgid(nsgid, nsgid, nsgid);
1590 if (ret < 0) {
1591 SYSERROR("Failed to setresgid(%d, %d, %d)",
1592 (int)nsgid, (int)nsgid, (int)nsgid);
1593 return -1;
1594 }
1595
1596 ret = setresuid(nsuid, nsuid, nsuid);
1597 if (ret < 0) {
1598 SYSERROR("Failed to setresuid(%d, %d, %d)",
1599 (int)nsuid, (int)nsuid, (int)nsuid);
1600 return -1;
1601 }
1602
1603 ret = setgroups(0, NULL);
1604 if (ret < 0 && errno != EPERM) {
1605 SYSERROR("Failed to setgroups(0, NULL)");
1606 return -1;
1607 }
1608
1609 destuid = get_ns_uid(arg->origuid);
1610 if (destuid == LXC_INVALID_UID)
1611 destuid = 0;
1612
1613 for (i = 0; arg->hierarchies[i]; i++) {
1614 char *fullpath;
1615 char *path = arg->hierarchies[i]->container_full_path;
1616
1617 ret = chowmod(path, destuid, nsgid, 0775);
1618 if (ret < 0)
1619 return -1;
1620
1621 /* Failures to chown() these are inconvenient but not
1622 * detrimental We leave these owned by the container launcher,
1623 * so that container root can write to the files to attach. We
1624 * chmod() them 664 so that container systemd can write to the
1625 * files (which systemd in wily insists on doing).
1626 */
1627
1628 if (arg->hierarchies[i]->version == CGROUP_SUPER_MAGIC) {
1629 fullpath = must_make_path(path, "tasks", NULL);
1630 (void)chowmod(fullpath, destuid, nsgid, 0664);
1631 free(fullpath);
1632 }
1633
1634 fullpath = must_make_path(path, "cgroup.procs", NULL);
1635 (void)chowmod(fullpath, destuid, nsgid, 0664);
1636 free(fullpath);
1637
1638 if (arg->hierarchies[i]->version != CGROUP2_SUPER_MAGIC)
1639 continue;
1640
1641 fullpath = must_make_path(path, "cgroup.subtree_control", NULL);
1642 (void)chowmod(fullpath, destuid, nsgid, 0664);
1643 free(fullpath);
1644
1645 fullpath = must_make_path(path, "cgroup.threads", NULL);
1646 (void)chowmod(fullpath, destuid, nsgid, 0664);
1647 free(fullpath);
1648 }
1649
1650 return 0;
1651 }
1652
1653 __cgfsng_ops static bool cgfsng_chown(struct cgroup_ops *ops,
1654 struct lxc_conf *conf)
1655 {
1656 struct generic_userns_exec_data wrap;
1657
1658 if (lxc_list_empty(&conf->id_map))
1659 return true;
1660
1661 wrap.origuid = geteuid();
1662 wrap.path = NULL;
1663 wrap.hierarchies = ops->hierarchies;
1664 wrap.conf = conf;
1665
1666 if (userns_exec_1(conf, chown_cgroup_wrapper, &wrap,
1667 "chown_cgroup_wrapper") < 0) {
1668 ERROR("Error requesting cgroup chown in new user namespace");
1669 return false;
1670 }
1671
1672 return true;
1673 }
1674
1675 /* cgroup-full:* is done, no need to create subdirs */
1676 static bool cg_mount_needs_subdirs(int type)
1677 {
1678 if (type >= LXC_AUTO_CGROUP_FULL_RO)
1679 return false;
1680
1681 return true;
1682 }
1683
1684 /* After $rootfs/sys/fs/container/controller/the/cg/path has been created,
1685 * remount controller ro if needed and bindmount the cgroupfs onto
1686 * control/the/cg/path.
1687 */
1688 static int cg_legacy_mount_controllers(int type, struct hierarchy *h,
1689 char *controllerpath, char *cgpath,
1690 const char *container_cgroup)
1691 {
1692 int ret, remount_flags;
1693 char *sourcepath;
1694 int flags = MS_BIND;
1695
1696 if (type == LXC_AUTO_CGROUP_RO || type == LXC_AUTO_CGROUP_MIXED) {
1697 ret = mount(controllerpath, controllerpath, "cgroup", MS_BIND, NULL);
1698 if (ret < 0) {
1699 SYSERROR("Failed to bind mount \"%s\" onto \"%s\"",
1700 controllerpath, controllerpath);
1701 return -1;
1702 }
1703
1704 remount_flags = add_required_remount_flags(controllerpath,
1705 controllerpath,
1706 flags | MS_REMOUNT);
1707 ret = mount(controllerpath, controllerpath, "cgroup",
1708 remount_flags | MS_REMOUNT | MS_BIND | MS_RDONLY,
1709 NULL);
1710 if (ret < 0) {
1711 SYSERROR("Failed to remount \"%s\" ro", controllerpath);
1712 return -1;
1713 }
1714
1715 INFO("Remounted %s read-only", controllerpath);
1716 }
1717
1718 sourcepath = must_make_path(h->mountpoint, h->container_base_path,
1719 container_cgroup, NULL);
1720 if (type == LXC_AUTO_CGROUP_RO)
1721 flags |= MS_RDONLY;
1722
1723 ret = mount(sourcepath, cgpath, "cgroup", flags, NULL);
1724 if (ret < 0) {
1725 SYSERROR("Failed to mount \"%s\" onto \"%s\"", h->controllers[0], cgpath);
1726 free(sourcepath);
1727 return -1;
1728 }
1729 INFO("Mounted \"%s\" onto \"%s\"", h->controllers[0], cgpath);
1730
1731 if (flags & MS_RDONLY) {
1732 remount_flags = add_required_remount_flags(sourcepath, cgpath,
1733 flags | MS_REMOUNT);
1734 ret = mount(sourcepath, cgpath, "cgroup", remount_flags, NULL);
1735 if (ret < 0) {
1736 SYSERROR("Failed to remount \"%s\" ro", cgpath);
1737 free(sourcepath);
1738 return -1;
1739 }
1740 INFO("Remounted %s read-only", cgpath);
1741 }
1742
1743 free(sourcepath);
1744 INFO("Completed second stage cgroup automounts for \"%s\"", cgpath);
1745 return 0;
1746 }
1747
1748 /* __cg_mount_direct
1749 *
1750 * Mount cgroup hierarchies directly without using bind-mounts. The main
1751 * uses-cases are mounting cgroup hierarchies in cgroup namespaces and mounting
1752 * cgroups for the LXC_AUTO_CGROUP_FULL option.
1753 */
1754 static int __cg_mount_direct(int type, struct hierarchy *h,
1755 const char *controllerpath)
1756 {
1757 int ret;
1758 char *controllers = NULL;
1759 char *fstype = "cgroup2";
1760 unsigned long flags = 0;
1761
1762 flags |= MS_NOSUID;
1763 flags |= MS_NOEXEC;
1764 flags |= MS_NODEV;
1765 flags |= MS_RELATIME;
1766
1767 if (type == LXC_AUTO_CGROUP_RO || type == LXC_AUTO_CGROUP_FULL_RO)
1768 flags |= MS_RDONLY;
1769
1770 if (h->version != CGROUP2_SUPER_MAGIC) {
1771 controllers = lxc_string_join(",", (const char **)h->controllers, false);
1772 if (!controllers)
1773 return -ENOMEM;
1774 fstype = "cgroup";
1775 }
1776
1777 ret = mount("cgroup", controllerpath, fstype, flags, controllers);
1778 free(controllers);
1779 if (ret < 0) {
1780 SYSERROR("Failed to mount \"%s\" with cgroup filesystem type %s", controllerpath, fstype);
1781 return -1;
1782 }
1783
1784 DEBUG("Mounted \"%s\" with cgroup filesystem type %s", controllerpath, fstype);
1785 return 0;
1786 }
1787
1788 static inline int cg_mount_in_cgroup_namespace(int type, struct hierarchy *h,
1789 const char *controllerpath)
1790 {
1791 return __cg_mount_direct(type, h, controllerpath);
1792 }
1793
1794 static inline int cg_mount_cgroup_full(int type, struct hierarchy *h,
1795 const char *controllerpath)
1796 {
1797 if (type < LXC_AUTO_CGROUP_FULL_RO || type > LXC_AUTO_CGROUP_FULL_MIXED)
1798 return 0;
1799
1800 return __cg_mount_direct(type, h, controllerpath);
1801 }
1802
1803 __cgfsng_ops static bool cgfsng_mount(struct cgroup_ops *ops,
1804 struct lxc_handler *handler,
1805 const char *root, int type)
1806 {
1807 int i, ret;
1808 char *tmpfspath = NULL;
1809 bool has_cgns = false, retval = false, wants_force_mount = false;
1810
1811 if ((type & LXC_AUTO_CGROUP_MASK) == 0)
1812 return true;
1813
1814 if (type & LXC_AUTO_CGROUP_FORCE) {
1815 type &= ~LXC_AUTO_CGROUP_FORCE;
1816 wants_force_mount = true;
1817 }
1818
1819 if (!wants_force_mount){
1820 if (!lxc_list_empty(&handler->conf->keepcaps))
1821 wants_force_mount = !in_caplist(CAP_SYS_ADMIN, &handler->conf->keepcaps);
1822 else
1823 wants_force_mount = in_caplist(CAP_SYS_ADMIN, &handler->conf->caps);
1824 }
1825
1826 has_cgns = cgns_supported();
1827 if (has_cgns && !wants_force_mount)
1828 return true;
1829
1830 if (type == LXC_AUTO_CGROUP_NOSPEC)
1831 type = LXC_AUTO_CGROUP_MIXED;
1832 else if (type == LXC_AUTO_CGROUP_FULL_NOSPEC)
1833 type = LXC_AUTO_CGROUP_FULL_MIXED;
1834
1835 /* Mount tmpfs */
1836 tmpfspath = must_make_path(root, "/sys/fs/cgroup", NULL);
1837 ret = safe_mount(NULL, tmpfspath, "tmpfs",
1838 MS_NOSUID | MS_NODEV | MS_NOEXEC | MS_RELATIME,
1839 "size=10240k,mode=755", root);
1840 if (ret < 0)
1841 goto on_error;
1842
1843 for (i = 0; ops->hierarchies[i]; i++) {
1844 char *controllerpath, *path2;
1845 struct hierarchy *h = ops->hierarchies[i];
1846 char *controller = strrchr(h->mountpoint, '/');
1847
1848 if (!controller)
1849 continue;
1850 controller++;
1851
1852 controllerpath = must_make_path(tmpfspath, controller, NULL);
1853 if (dir_exists(controllerpath)) {
1854 free(controllerpath);
1855 continue;
1856 }
1857
1858 ret = mkdir(controllerpath, 0755);
1859 if (ret < 0) {
1860 SYSERROR("Error creating cgroup path: %s", controllerpath);
1861 free(controllerpath);
1862 goto on_error;
1863 }
1864
1865 if (has_cgns && wants_force_mount) {
1866 /* If cgroup namespaces are supported but the container
1867 * will not have CAP_SYS_ADMIN after it has started we
1868 * need to mount the cgroups manually.
1869 */
1870 ret = cg_mount_in_cgroup_namespace(type, h, controllerpath);
1871 free(controllerpath);
1872 if (ret < 0)
1873 goto on_error;
1874
1875 continue;
1876 }
1877
1878 ret = cg_mount_cgroup_full(type, h, controllerpath);
1879 if (ret < 0) {
1880 free(controllerpath);
1881 goto on_error;
1882 }
1883
1884 if (!cg_mount_needs_subdirs(type)) {
1885 free(controllerpath);
1886 continue;
1887 }
1888
1889 path2 = must_make_path(controllerpath, h->container_base_path,
1890 ops->container_cgroup, NULL);
1891 ret = mkdir_p(path2, 0755);
1892 if (ret < 0) {
1893 free(controllerpath);
1894 free(path2);
1895 goto on_error;
1896 }
1897
1898 ret = cg_legacy_mount_controllers(type, h, controllerpath,
1899 path2, ops->container_cgroup);
1900 free(controllerpath);
1901 free(path2);
1902 if (ret < 0)
1903 goto on_error;
1904 }
1905 retval = true;
1906
1907 on_error:
1908 free(tmpfspath);
1909 return retval;
1910 }
1911
1912 static int recursive_count_nrtasks(char *dirname)
1913 {
1914 struct dirent *direntp;
1915 DIR *dir;
1916 int count = 0, ret;
1917 char *path;
1918
1919 dir = opendir(dirname);
1920 if (!dir)
1921 return 0;
1922
1923 while ((direntp = readdir(dir))) {
1924 struct stat mystat;
1925
1926 if (!strcmp(direntp->d_name, ".") ||
1927 !strcmp(direntp->d_name, ".."))
1928 continue;
1929
1930 path = must_make_path(dirname, direntp->d_name, NULL);
1931
1932 if (lstat(path, &mystat))
1933 goto next;
1934
1935 if (!S_ISDIR(mystat.st_mode))
1936 goto next;
1937
1938 count += recursive_count_nrtasks(path);
1939 next:
1940 free(path);
1941 }
1942
1943 path = must_make_path(dirname, "cgroup.procs", NULL);
1944 ret = lxc_count_file_lines(path);
1945 if (ret != -1)
1946 count += ret;
1947 free(path);
1948
1949 (void)closedir(dir);
1950
1951 return count;
1952 }
1953
1954 __cgfsng_ops static int cgfsng_nrtasks(struct cgroup_ops *ops)
1955 {
1956 int count;
1957 char *path;
1958
1959 if (!ops->container_cgroup || !ops->hierarchies)
1960 return -1;
1961
1962 path = must_make_path(ops->hierarchies[0]->container_full_path, NULL);
1963 count = recursive_count_nrtasks(path);
1964 free(path);
1965 return count;
1966 }
1967
1968 /* Only root needs to escape to the cgroup of its init. */
1969 __cgfsng_ops static bool cgfsng_escape(const struct cgroup_ops *ops,
1970 struct lxc_conf *conf)
1971 {
1972 int i;
1973
1974 if (conf->cgroup_meta.relative || geteuid())
1975 return true;
1976
1977 for (i = 0; ops->hierarchies[i]; i++) {
1978 int ret;
1979 char *fullpath;
1980
1981 fullpath = must_make_path(ops->hierarchies[i]->mountpoint,
1982 ops->hierarchies[i]->container_base_path,
1983 "cgroup.procs", NULL);
1984 ret = lxc_write_to_file(fullpath, "0", 2, false, 0666);
1985 if (ret != 0) {
1986 SYSERROR("Failed to escape to cgroup \"%s\"", fullpath);
1987 free(fullpath);
1988 return false;
1989 }
1990 free(fullpath);
1991 }
1992
1993 return true;
1994 }
1995
1996 __cgfsng_ops static int cgfsng_num_hierarchies(struct cgroup_ops *ops)
1997 {
1998 int i;
1999
2000 for (i = 0; ops->hierarchies[i]; i++)
2001 ;
2002
2003 return i;
2004 }
2005
2006 __cgfsng_ops static bool cgfsng_get_hierarchies(struct cgroup_ops *ops, int n, char ***out)
2007 {
2008 int i;
2009
2010 /* sanity check n */
2011 for (i = 0; i < n; i++)
2012 if (!ops->hierarchies[i])
2013 return false;
2014
2015 *out = ops->hierarchies[i]->controllers;
2016
2017 return true;
2018 }
2019
2020 #define THAWED "THAWED"
2021 #define THAWED_LEN (strlen(THAWED))
2022
2023 /* TODO: If the unified cgroup hierarchy grows a freezer controller this needs
2024 * to be adapted.
2025 */
2026 __cgfsng_ops static bool cgfsng_unfreeze(struct cgroup_ops *ops)
2027 {
2028 int ret;
2029 char *fullpath;
2030 struct hierarchy *h;
2031
2032 h = get_hierarchy(ops, "freezer");
2033 if (!h)
2034 return false;
2035
2036 fullpath = must_make_path(h->container_full_path, "freezer.state", NULL);
2037 ret = lxc_write_to_file(fullpath, THAWED, THAWED_LEN, false, 0666);
2038 free(fullpath);
2039 if (ret < 0)
2040 return false;
2041
2042 return true;
2043 }
2044
2045 __cgfsng_ops static const char *cgfsng_get_cgroup(struct cgroup_ops *ops,
2046 const char *controller)
2047 {
2048 struct hierarchy *h;
2049
2050 h = get_hierarchy(ops, controller);
2051 if (!h) {
2052 WARN("Failed to find hierarchy for controller \"%s\"",
2053 controller ? controller : "(null)");
2054 return NULL;
2055 }
2056
2057 return h->container_full_path ? h->container_full_path + strlen(h->mountpoint) : NULL;
2058 }
2059
2060 /* Given a cgroup path returned from lxc_cmd_get_cgroup_path, build a full path,
2061 * which must be freed by the caller.
2062 */
2063 static inline char *build_full_cgpath_from_monitorpath(struct hierarchy *h,
2064 const char *inpath,
2065 const char *filename)
2066 {
2067 return must_make_path(h->mountpoint, inpath, filename, NULL);
2068 }
2069
2070 /* Technically, we're always at a delegation boundary here (This is especially
2071 * true when cgroup namespaces are available.). The reasoning is that in order
2072 * for us to have been able to start a container in the first place the root
2073 * cgroup must have been a leaf node. Now, either the container's init system
2074 * has populated the cgroup and kept it as a leaf node or it has created
2075 * subtrees. In the former case we will simply attach to the leaf node we
2076 * created when we started the container in the latter case we create our own
2077 * cgroup for the attaching process.
2078 */
2079 static int __cg_unified_attach(const struct hierarchy *h, const char *name,
2080 const char *lxcpath, const char *pidstr,
2081 size_t pidstr_len, const char *controller)
2082 {
2083 int ret;
2084 size_t len;
2085 int fret = -1, idx = 0;
2086 char *base_path = NULL, *container_cgroup = NULL, *full_path = NULL;
2087
2088 container_cgroup = lxc_cmd_get_cgroup_path(name, lxcpath, controller);
2089 /* not running */
2090 if (!container_cgroup)
2091 return 0;
2092
2093 base_path = must_make_path(h->mountpoint, container_cgroup, NULL);
2094 full_path = must_make_path(base_path, "cgroup.procs", NULL);
2095 /* cgroup is populated */
2096 ret = lxc_write_to_file(full_path, pidstr, pidstr_len, false, 0666);
2097 if (ret < 0 && errno != EBUSY)
2098 goto on_error;
2099
2100 if (ret == 0)
2101 goto on_success;
2102
2103 free(full_path);
2104
2105 len = strlen(base_path) + STRLITERALLEN("/lxc-1000") +
2106 STRLITERALLEN("/cgroup-procs");
2107 full_path = must_realloc(NULL, len + 1);
2108 do {
2109 if (idx)
2110 ret = snprintf(full_path, len + 1, "%s/lxc-%d",
2111 base_path, idx);
2112 else
2113 ret = snprintf(full_path, len + 1, "%s/lxc", base_path);
2114 if (ret < 0 || (size_t)ret >= len + 1)
2115 goto on_error;
2116
2117 ret = mkdir_p(full_path, 0755);
2118 if (ret < 0 && errno != EEXIST)
2119 goto on_error;
2120
2121 (void)strlcat(full_path, "/cgroup.procs", len + 1);
2122 ret = lxc_write_to_file(full_path, pidstr, len, false, 0666);
2123 if (ret == 0)
2124 goto on_success;
2125
2126 /* this is a non-leaf node */
2127 if (errno != EBUSY)
2128 goto on_error;
2129
2130 idx++;
2131 } while (idx < 1000);
2132
2133 on_success:
2134 if (idx < 1000)
2135 fret = 0;
2136
2137 on_error:
2138 free(base_path);
2139 free(container_cgroup);
2140 free(full_path);
2141
2142 return fret;
2143 }
2144
2145 __cgfsng_ops static bool cgfsng_attach(struct cgroup_ops *ops, const char *name,
2146 const char *lxcpath, pid_t pid)
2147 {
2148 int i, len, ret;
2149 char pidstr[INTTYPE_TO_STRLEN(pid_t)];
2150
2151 len = snprintf(pidstr, sizeof(pidstr), "%d", pid);
2152 if (len < 0 || (size_t)len >= sizeof(pidstr))
2153 return false;
2154
2155 for (i = 0; ops->hierarchies[i]; i++) {
2156 char *path;
2157 char *fullpath = NULL;
2158 struct hierarchy *h = ops->hierarchies[i];
2159
2160 if (h->version == CGROUP2_SUPER_MAGIC) {
2161 ret = __cg_unified_attach(h, name, lxcpath, pidstr, len,
2162 h->controllers[0]);
2163 if (ret < 0)
2164 return false;
2165
2166 continue;
2167 }
2168
2169 path = lxc_cmd_get_cgroup_path(name, lxcpath, h->controllers[0]);
2170 /* not running */
2171 if (!path)
2172 continue;
2173
2174 fullpath = build_full_cgpath_from_monitorpath(h, path, "cgroup.procs");
2175 free(path);
2176 ret = lxc_write_to_file(fullpath, pidstr, len, false, 0666);
2177 if (ret < 0) {
2178 SYSERROR("Failed to attach %d to %s", (int)pid, fullpath);
2179 free(fullpath);
2180 return false;
2181 }
2182 free(fullpath);
2183 }
2184
2185 return true;
2186 }
2187
2188 /* Called externally (i.e. from 'lxc-cgroup') to query cgroup limits. Here we
2189 * don't have a cgroup_data set up, so we ask the running container through the
2190 * commands API for the cgroup path.
2191 */
2192 __cgfsng_ops static int cgfsng_get(struct cgroup_ops *ops, const char *filename,
2193 char *value, size_t len, const char *name,
2194 const char *lxcpath)
2195 {
2196 int ret = -1;
2197 size_t controller_len;
2198 char *controller, *p, *path;
2199 struct hierarchy *h;
2200
2201 controller_len = strlen(filename);
2202 controller = alloca(controller_len + 1);
2203 (void)strlcpy(controller, filename, controller_len + 1);
2204
2205 p = strchr(controller, '.');
2206 if (p)
2207 *p = '\0';
2208
2209 path = lxc_cmd_get_cgroup_path(name, lxcpath, controller);
2210 /* not running */
2211 if (!path)
2212 return -1;
2213
2214 h = get_hierarchy(ops, controller);
2215 if (h) {
2216 char *fullpath;
2217
2218 fullpath = build_full_cgpath_from_monitorpath(h, path, filename);
2219 ret = lxc_read_from_file(fullpath, value, len);
2220 free(fullpath);
2221 }
2222 free(path);
2223
2224 return ret;
2225 }
2226
2227 /* Called externally (i.e. from 'lxc-cgroup') to set new cgroup limits. Here we
2228 * don't have a cgroup_data set up, so we ask the running container through the
2229 * commands API for the cgroup path.
2230 */
2231 __cgfsng_ops static int cgfsng_set(struct cgroup_ops *ops,
2232 const char *filename, const char *value,
2233 const char *name, const char *lxcpath)
2234 {
2235 int ret = -1;
2236 size_t controller_len;
2237 char *controller, *p, *path;
2238 struct hierarchy *h;
2239
2240 controller_len = strlen(filename);
2241 controller = alloca(controller_len + 1);
2242 (void)strlcpy(controller, filename, controller_len + 1);
2243
2244 p = strchr(controller, '.');
2245 if (p)
2246 *p = '\0';
2247
2248 path = lxc_cmd_get_cgroup_path(name, lxcpath, controller);
2249 /* not running */
2250 if (!path)
2251 return -1;
2252
2253 h = get_hierarchy(ops, controller);
2254 if (h) {
2255 char *fullpath;
2256
2257 fullpath = build_full_cgpath_from_monitorpath(h, path, filename);
2258 ret = lxc_write_to_file(fullpath, value, strlen(value), false, 0666);
2259 free(fullpath);
2260 }
2261 free(path);
2262
2263 return ret;
2264 }
2265
2266 /* take devices cgroup line
2267 * /dev/foo rwx
2268 * and convert it to a valid
2269 * type major:minor mode
2270 * line. Return <0 on error. Dest is a preallocated buffer long enough to hold
2271 * the output.
2272 */
2273 static int convert_devpath(const char *invalue, char *dest)
2274 {
2275 int n_parts;
2276 char *p, *path, type;
2277 unsigned long minor, major;
2278 struct stat sb;
2279 int ret = -EINVAL;
2280 char *mode = NULL;
2281
2282 path = must_copy_string(invalue);
2283
2284 /* Read path followed by mode. Ignore any trailing text.
2285 * A ' # comment' would be legal. Technically other text is not
2286 * legal, we could check for that if we cared to.
2287 */
2288 for (n_parts = 1, p = path; *p && n_parts < 3; p++) {
2289 if (*p != ' ')
2290 continue;
2291 *p = '\0';
2292
2293 if (n_parts != 1)
2294 break;
2295 p++;
2296 n_parts++;
2297
2298 while (*p == ' ')
2299 p++;
2300
2301 mode = p;
2302
2303 if (*p == '\0')
2304 goto out;
2305 }
2306
2307 if (n_parts == 1)
2308 goto out;
2309
2310 ret = stat(path, &sb);
2311 if (ret < 0)
2312 goto out;
2313
2314 mode_t m = sb.st_mode & S_IFMT;
2315 switch (m) {
2316 case S_IFBLK:
2317 type = 'b';
2318 break;
2319 case S_IFCHR:
2320 type = 'c';
2321 break;
2322 default:
2323 ERROR("Unsupported device type %i for \"%s\"", m, path);
2324 ret = -EINVAL;
2325 goto out;
2326 }
2327
2328 major = MAJOR(sb.st_rdev);
2329 minor = MINOR(sb.st_rdev);
2330 ret = snprintf(dest, 50, "%c %lu:%lu %s", type, major, minor, mode);
2331 if (ret < 0 || ret >= 50) {
2332 ERROR("Error on configuration value \"%c %lu:%lu %s\" (max 50 "
2333 "chars)", type, major, minor, mode);
2334 ret = -ENAMETOOLONG;
2335 goto out;
2336 }
2337 ret = 0;
2338
2339 out:
2340 free(path);
2341 return ret;
2342 }
2343
2344 /* Called from setup_limits - here we have the container's cgroup_data because
2345 * we created the cgroups.
2346 */
2347 static int cg_legacy_set_data(struct cgroup_ops *ops, const char *filename,
2348 const char *value)
2349 {
2350 size_t len;
2351 char *fullpath, *p;
2352 /* "b|c <2^64-1>:<2^64-1> r|w|m" = 47 chars max */
2353 char converted_value[50];
2354 struct hierarchy *h;
2355 int ret = 0;
2356 char *controller = NULL;
2357
2358 len = strlen(filename);
2359 controller = alloca(len + 1);
2360 (void)strlcpy(controller, filename, len + 1);
2361
2362 p = strchr(controller, '.');
2363 if (p)
2364 *p = '\0';
2365
2366 if (strcmp("devices.allow", filename) == 0 && value[0] == '/') {
2367 ret = convert_devpath(value, converted_value);
2368 if (ret < 0)
2369 return ret;
2370 value = converted_value;
2371 }
2372
2373 h = get_hierarchy(ops, controller);
2374 if (!h) {
2375 ERROR("Failed to setup limits for the \"%s\" controller. "
2376 "The controller seems to be unused by \"cgfsng\" cgroup "
2377 "driver or not enabled on the cgroup hierarchy",
2378 controller);
2379 errno = ENOENT;
2380 return -ENOENT;
2381 }
2382
2383 fullpath = must_make_path(h->container_full_path, filename, NULL);
2384 ret = lxc_write_to_file(fullpath, value, strlen(value), false, 0666);
2385 free(fullpath);
2386 return ret;
2387 }
2388
2389 static bool __cg_legacy_setup_limits(struct cgroup_ops *ops,
2390 struct lxc_list *cgroup_settings,
2391 bool do_devices)
2392 {
2393 struct lxc_list *iterator, *next, *sorted_cgroup_settings;
2394 struct lxc_cgroup *cg;
2395 bool ret = false;
2396
2397 if (lxc_list_empty(cgroup_settings))
2398 return true;
2399
2400 sorted_cgroup_settings = sort_cgroup_settings(cgroup_settings);
2401 if (!sorted_cgroup_settings)
2402 return false;
2403
2404 lxc_list_for_each(iterator, sorted_cgroup_settings) {
2405 cg = iterator->elem;
2406
2407 if (do_devices == !strncmp("devices", cg->subsystem, 7)) {
2408 if (cg_legacy_set_data(ops, cg->subsystem, cg->value)) {
2409 if (do_devices && (errno == EACCES || errno == EPERM)) {
2410 WARN("Failed to set \"%s\" to \"%s\"",
2411 cg->subsystem, cg->value);
2412 continue;
2413 }
2414 WARN("Failed to set \"%s\" to \"%s\"",
2415 cg->subsystem, cg->value);
2416 goto out;
2417 }
2418 DEBUG("Set controller \"%s\" set to \"%s\"",
2419 cg->subsystem, cg->value);
2420 }
2421 }
2422
2423 ret = true;
2424 INFO("Limits for the legacy cgroup hierarchies have been setup");
2425 out:
2426 lxc_list_for_each_safe(iterator, sorted_cgroup_settings, next) {
2427 lxc_list_del(iterator);
2428 free(iterator);
2429 }
2430 free(sorted_cgroup_settings);
2431 return ret;
2432 }
2433
2434 static bool __cg_unified_setup_limits(struct cgroup_ops *ops,
2435 struct lxc_list *cgroup_settings)
2436 {
2437 struct lxc_list *iterator;
2438 struct hierarchy *h = ops->unified;
2439
2440 if (lxc_list_empty(cgroup_settings))
2441 return true;
2442
2443 if (!h)
2444 return false;
2445
2446 lxc_list_for_each(iterator, cgroup_settings) {
2447 int ret;
2448 char *fullpath;
2449 struct lxc_cgroup *cg = iterator->elem;
2450
2451 fullpath = must_make_path(h->container_full_path, cg->subsystem, NULL);
2452 ret = lxc_write_to_file(fullpath, cg->value, strlen(cg->value), false, 0666);
2453 free(fullpath);
2454 if (ret < 0) {
2455 SYSERROR("Failed to set \"%s\" to \"%s\"",
2456 cg->subsystem, cg->value);
2457 return false;
2458 }
2459 TRACE("Set \"%s\" to \"%s\"", cg->subsystem, cg->value);
2460 }
2461
2462 INFO("Limits for the unified cgroup hierarchy have been setup");
2463 return true;
2464 }
2465
2466 __cgfsng_ops static bool cgfsng_setup_limits(struct cgroup_ops *ops,
2467 struct lxc_conf *conf,
2468 bool do_devices)
2469 {
2470 bool bret;
2471
2472 bret = __cg_legacy_setup_limits(ops, &conf->cgroup, do_devices);
2473 if (!bret)
2474 return false;
2475
2476 return __cg_unified_setup_limits(ops, &conf->cgroup2);
2477 }
2478
2479 static bool cgroup_use_wants_controllers(const struct cgroup_ops *ops,
2480 char **controllers)
2481 {
2482 char **cur_ctrl, **cur_use;
2483
2484 if (!ops->cgroup_use)
2485 return true;
2486
2487 for (cur_ctrl = controllers; cur_ctrl && *cur_ctrl; cur_ctrl++) {
2488 bool found = false;
2489
2490 for (cur_use = ops->cgroup_use; cur_use && *cur_use; cur_use++) {
2491 if (strcmp(*cur_use, *cur_ctrl) != 0)
2492 continue;
2493
2494 found = true;
2495 break;
2496 }
2497
2498 if (found)
2499 continue;
2500
2501 return false;
2502 }
2503
2504 return true;
2505 }
2506
2507 /* At startup, parse_hierarchies finds all the info we need about cgroup
2508 * mountpoints and current cgroups, and stores it in @d.
2509 */
2510 static bool cg_hybrid_init(struct cgroup_ops *ops, bool relative)
2511 {
2512 int ret;
2513 char *basecginfo;
2514 FILE *f;
2515 size_t len = 0;
2516 char *line = NULL;
2517 char **klist = NULL, **nlist = NULL;
2518
2519 /* Root spawned containers escape the current cgroup, so use init's
2520 * cgroups as our base in that case.
2521 */
2522 if (!relative && (geteuid() == 0))
2523 basecginfo = read_file("/proc/1/cgroup");
2524 else
2525 basecginfo = read_file("/proc/self/cgroup");
2526 if (!basecginfo)
2527 return false;
2528
2529 ret = get_existing_subsystems(&klist, &nlist);
2530 if (ret < 0) {
2531 ERROR("Failed to retrieve available legacy cgroup controllers");
2532 free(basecginfo);
2533 return false;
2534 }
2535
2536 f = fopen("/proc/self/mountinfo", "r");
2537 if (!f) {
2538 ERROR("Failed to open \"/proc/self/mountinfo\"");
2539 free(basecginfo);
2540 return false;
2541 }
2542
2543 lxc_cgfsng_print_basecg_debuginfo(basecginfo, klist, nlist);
2544
2545 while (getline(&line, &len, f) != -1) {
2546 int type;
2547 bool writeable;
2548 struct hierarchy *new;
2549 char *base_cgroup = NULL, *mountpoint = NULL;
2550 char **controller_list = NULL;
2551
2552 type = get_cgroup_version(line);
2553 if (type == 0)
2554 continue;
2555
2556 if (type == CGROUP2_SUPER_MAGIC && ops->unified)
2557 continue;
2558
2559 if (ops->cgroup_layout == CGROUP_LAYOUT_UNKNOWN) {
2560 if (type == CGROUP2_SUPER_MAGIC)
2561 ops->cgroup_layout = CGROUP_LAYOUT_UNIFIED;
2562 else if (type == CGROUP_SUPER_MAGIC)
2563 ops->cgroup_layout = CGROUP_LAYOUT_LEGACY;
2564 } else if (ops->cgroup_layout == CGROUP_LAYOUT_UNIFIED) {
2565 if (type == CGROUP_SUPER_MAGIC)
2566 ops->cgroup_layout = CGROUP_LAYOUT_HYBRID;
2567 } else if (ops->cgroup_layout == CGROUP_LAYOUT_LEGACY) {
2568 if (type == CGROUP2_SUPER_MAGIC)
2569 ops->cgroup_layout = CGROUP_LAYOUT_HYBRID;
2570 }
2571
2572 controller_list = cg_hybrid_get_controllers(klist, nlist, line, type);
2573 if (!controller_list && type == CGROUP_SUPER_MAGIC)
2574 continue;
2575
2576 if (type == CGROUP_SUPER_MAGIC)
2577 if (controller_list_is_dup(ops->hierarchies, controller_list))
2578 goto next;
2579
2580 mountpoint = cg_hybrid_get_mountpoint(line);
2581 if (!mountpoint) {
2582 ERROR("Failed parsing mountpoint from \"%s\"", line);
2583 goto next;
2584 }
2585
2586 if (type == CGROUP_SUPER_MAGIC)
2587 base_cgroup = cg_hybrid_get_current_cgroup(basecginfo, controller_list[0], CGROUP_SUPER_MAGIC);
2588 else
2589 base_cgroup = cg_hybrid_get_current_cgroup(basecginfo, NULL, CGROUP2_SUPER_MAGIC);
2590 if (!base_cgroup) {
2591 ERROR("Failed to find current cgroup");
2592 goto next;
2593 }
2594
2595 trim(base_cgroup);
2596 prune_init_scope(base_cgroup);
2597 if (type == CGROUP2_SUPER_MAGIC)
2598 writeable = test_writeable_v2(mountpoint, base_cgroup);
2599 else
2600 writeable = test_writeable_v1(mountpoint, base_cgroup);
2601 if (!writeable)
2602 goto next;
2603
2604 if (type == CGROUP2_SUPER_MAGIC) {
2605 char *cgv2_ctrl_path;
2606
2607 cgv2_ctrl_path = must_make_path(mountpoint, base_cgroup,
2608 "cgroup.controllers",
2609 NULL);
2610
2611 controller_list = cg_unified_get_controllers(cgv2_ctrl_path);
2612 free(cgv2_ctrl_path);
2613 if (!controller_list) {
2614 controller_list = cg_unified_make_empty_controller();
2615 TRACE("No controllers are enabled for "
2616 "delegation in the unified hierarchy");
2617 }
2618 }
2619
2620 /* Exclude all controllers that cgroup use does not want. */
2621 if (!cgroup_use_wants_controllers(ops, controller_list))
2622 goto next;
2623
2624 new = add_hierarchy(&ops->hierarchies, controller_list, mountpoint, base_cgroup, type);
2625 if (type == CGROUP2_SUPER_MAGIC && !ops->unified)
2626 ops->unified = new;
2627
2628 continue;
2629
2630 next:
2631 free_string_list(controller_list);
2632 free(mountpoint);
2633 free(base_cgroup);
2634 }
2635
2636 free_string_list(klist);
2637 free_string_list(nlist);
2638
2639 free(basecginfo);
2640
2641 fclose(f);
2642 free(line);
2643
2644 TRACE("Writable cgroup hierarchies:");
2645 lxc_cgfsng_print_hierarchies(ops);
2646
2647 /* verify that all controllers in cgroup.use and all crucial
2648 * controllers are accounted for
2649 */
2650 if (!all_controllers_found(ops))
2651 return false;
2652
2653 return true;
2654 }
2655
2656 static int cg_is_pure_unified(void)
2657 {
2658
2659 int ret;
2660 struct statfs fs;
2661
2662 ret = statfs("/sys/fs/cgroup", &fs);
2663 if (ret < 0)
2664 return -ENOMEDIUM;
2665
2666 if (is_fs_type(&fs, CGROUP2_SUPER_MAGIC))
2667 return CGROUP2_SUPER_MAGIC;
2668
2669 return 0;
2670 }
2671
2672 /* Get current cgroup from /proc/self/cgroup for the cgroupfs v2 hierarchy. */
2673 static char *cg_unified_get_current_cgroup(bool relative)
2674 {
2675 char *basecginfo, *base_cgroup;
2676 char *copy = NULL;
2677
2678 if (!relative && (geteuid() == 0))
2679 basecginfo = read_file("/proc/1/cgroup");
2680 else
2681 basecginfo = read_file("/proc/self/cgroup");
2682 if (!basecginfo)
2683 return NULL;
2684
2685 base_cgroup = strstr(basecginfo, "0::/");
2686 if (!base_cgroup)
2687 goto cleanup_on_err;
2688
2689 base_cgroup = base_cgroup + 3;
2690 copy = copy_to_eol(base_cgroup);
2691 if (!copy)
2692 goto cleanup_on_err;
2693
2694 cleanup_on_err:
2695 free(basecginfo);
2696 if (copy)
2697 trim(copy);
2698
2699 return copy;
2700 }
2701
2702 static int cg_unified_init(struct cgroup_ops *ops, bool relative)
2703 {
2704 int ret;
2705 char *mountpoint, *subtree_path;
2706 char **delegatable;
2707 char *base_cgroup = NULL;
2708
2709 ret = cg_is_pure_unified();
2710 if (ret == -ENOMEDIUM)
2711 return -ENOMEDIUM;
2712
2713 if (ret != CGROUP2_SUPER_MAGIC)
2714 return 0;
2715
2716 base_cgroup = cg_unified_get_current_cgroup(relative);
2717 if (!base_cgroup)
2718 return -EINVAL;
2719 prune_init_scope(base_cgroup);
2720
2721 /* We assume that we have already been given controllers to delegate
2722 * further down the hierarchy. If not it is up to the user to delegate
2723 * them to us.
2724 */
2725 mountpoint = must_copy_string("/sys/fs/cgroup");
2726 subtree_path = must_make_path(mountpoint, base_cgroup,
2727 "cgroup.subtree_control", NULL);
2728 delegatable = cg_unified_get_controllers(subtree_path);
2729 free(subtree_path);
2730 if (!delegatable)
2731 delegatable = cg_unified_make_empty_controller();
2732 if (!delegatable[0])
2733 TRACE("No controllers are enabled for delegation");
2734
2735 /* TODO: If the user requested specific controllers via lxc.cgroup.use
2736 * we should verify here. The reason I'm not doing it right is that I'm
2737 * not convinced that lxc.cgroup.use will be the future since it is a
2738 * global property. I much rather have an option that lets you request
2739 * controllers per container.
2740 */
2741
2742 add_hierarchy(&ops->hierarchies, delegatable, mountpoint, base_cgroup, CGROUP2_SUPER_MAGIC);
2743
2744 ops->cgroup_layout = CGROUP_LAYOUT_UNIFIED;
2745 return CGROUP2_SUPER_MAGIC;
2746 }
2747
2748 static bool cg_init(struct cgroup_ops *ops, struct lxc_conf *conf)
2749 {
2750 int ret;
2751 const char *tmp;
2752 bool relative = conf->cgroup_meta.relative;
2753
2754 tmp = lxc_global_config_value("lxc.cgroup.use");
2755 if (tmp) {
2756 char *chop, *cur, *pin;
2757
2758 pin = must_copy_string(tmp);
2759 chop = pin;
2760
2761 lxc_iterate_parts(cur, chop, ",") {
2762 must_append_string(&ops->cgroup_use, cur);
2763 }
2764
2765 free(pin);
2766 }
2767
2768 ret = cg_unified_init(ops, relative);
2769 if (ret < 0)
2770 return false;
2771
2772 if (ret == CGROUP2_SUPER_MAGIC)
2773 return true;
2774
2775 return cg_hybrid_init(ops, relative);
2776 }
2777
2778 __cgfsng_ops static bool cgfsng_data_init(struct cgroup_ops *ops)
2779 {
2780 const char *cgroup_pattern;
2781
2782 /* copy system-wide cgroup information */
2783 cgroup_pattern = lxc_global_config_value("lxc.cgroup.pattern");
2784 if (!cgroup_pattern) {
2785 /* lxc.cgroup.pattern is only NULL on error. */
2786 ERROR("Failed to retrieve cgroup pattern");
2787 return false;
2788 }
2789 ops->cgroup_pattern = must_copy_string(cgroup_pattern);
2790 ops->monitor_pattern = MONITOR_CGROUP;
2791
2792 return true;
2793 }
2794
2795 struct cgroup_ops *cgfsng_ops_init(struct lxc_conf *conf)
2796 {
2797 struct cgroup_ops *cgfsng_ops;
2798
2799 cgfsng_ops = malloc(sizeof(struct cgroup_ops));
2800 if (!cgfsng_ops)
2801 return NULL;
2802
2803 memset(cgfsng_ops, 0, sizeof(struct cgroup_ops));
2804 cgfsng_ops->cgroup_layout = CGROUP_LAYOUT_UNKNOWN;
2805
2806 if (!cg_init(cgfsng_ops, conf)) {
2807 free(cgfsng_ops);
2808 return NULL;
2809 }
2810
2811 cgfsng_ops->data_init = cgfsng_data_init;
2812 cgfsng_ops->payload_destroy = cgfsng_payload_destroy;
2813 cgfsng_ops->monitor_destroy = cgfsng_monitor_destroy;
2814 cgfsng_ops->monitor_create = cgfsng_monitor_create;
2815 cgfsng_ops->monitor_enter = cgfsng_monitor_enter;
2816 cgfsng_ops->payload_create = cgfsng_payload_create;
2817 cgfsng_ops->payload_enter = cgfsng_payload_enter;
2818 cgfsng_ops->escape = cgfsng_escape;
2819 cgfsng_ops->num_hierarchies = cgfsng_num_hierarchies;
2820 cgfsng_ops->get_hierarchies = cgfsng_get_hierarchies;
2821 cgfsng_ops->get_cgroup = cgfsng_get_cgroup;
2822 cgfsng_ops->get = cgfsng_get;
2823 cgfsng_ops->set = cgfsng_set;
2824 cgfsng_ops->unfreeze = cgfsng_unfreeze;
2825 cgfsng_ops->setup_limits = cgfsng_setup_limits;
2826 cgfsng_ops->driver = "cgfsng";
2827 cgfsng_ops->version = "1.0.0";
2828 cgfsng_ops->attach = cgfsng_attach;
2829 cgfsng_ops->chown = cgfsng_chown;
2830 cgfsng_ops->mount = cgfsng_mount;
2831 cgfsng_ops->nrtasks = cgfsng_nrtasks;
2832
2833 return cgfsng_ops;
2834 }