]> git.proxmox.com Git - mirror_lxc.git/blob - src/lxc/cgroups/cgfsng.c
7474ba140f745a2ac3fdb41623e2e5f7982e2983
[mirror_lxc.git] / src / lxc / cgroups / cgfsng.c
1 /*
2 * lxc: linux Container library
3 *
4 * Copyright © 2016 Canonical Ltd.
5 *
6 * Authors:
7 * Serge Hallyn <serge.hallyn@ubuntu.com>
8 * Christian Brauner <christian.brauner@ubuntu.com>
9 *
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25 /*
26 * cgfs-ng.c: this is a new, simplified implementation of a filesystem
27 * cgroup backend. The original cgfs.c was designed to be as flexible
28 * as possible. It would try to find cgroup filesystems no matter where
29 * or how you had them mounted, and deduce the most usable mount for
30 * each controller.
31 *
32 * This new implementation assumes that cgroup filesystems are mounted
33 * under /sys/fs/cgroup/clist where clist is either the controller, or
34 * a comma-separated list of controllers.
35 */
36
37 #ifndef _GNU_SOURCE
38 #define _GNU_SOURCE 1
39 #endif
40 #include <ctype.h>
41 #include <dirent.h>
42 #include <errno.h>
43 #include <grp.h>
44 #include <linux/kdev_t.h>
45 #include <linux/types.h>
46 #include <stdint.h>
47 #include <stdio.h>
48 #include <stdlib.h>
49 #include <string.h>
50 #include <sys/types.h>
51 #include <unistd.h>
52
53 #include "caps.h"
54 #include "cgroup.h"
55 #include "cgroup_utils.h"
56 #include "commands.h"
57 #include "conf.h"
58 #include "config.h"
59 #include "log.h"
60 #include "macro.h"
61 #include "storage/storage.h"
62 #include "utils.h"
63
64 #ifndef HAVE_STRLCPY
65 #include "include/strlcpy.h"
66 #endif
67
68 #ifndef HAVE_STRLCAT
69 #include "include/strlcat.h"
70 #endif
71
72 lxc_log_define(cgfsng, cgroup);
73
74 static void free_string_list(char **clist)
75 {
76 int i;
77
78 if (!clist)
79 return;
80
81 for (i = 0; clist[i]; i++)
82 free(clist[i]);
83
84 free(clist);
85 }
86
87 /* Allocate a pointer, do not fail. */
88 static void *must_alloc(size_t sz)
89 {
90 return must_realloc(NULL, sz);
91 }
92
93 /* Given a pointer to a null-terminated array of pointers, realloc to add one
94 * entry, and point the new entry to NULL. Do not fail. Return the index to the
95 * second-to-last entry - that is, the one which is now available for use
96 * (keeping the list null-terminated).
97 */
98 static int append_null_to_list(void ***list)
99 {
100 int newentry = 0;
101
102 if (*list)
103 for (; (*list)[newentry]; newentry++)
104 ;
105
106 *list = must_realloc(*list, (newentry + 2) * sizeof(void **));
107 (*list)[newentry + 1] = NULL;
108 return newentry;
109 }
110
111 /* Given a null-terminated array of strings, check whether @entry is one of the
112 * strings.
113 */
114 static bool string_in_list(char **list, const char *entry)
115 {
116 int i;
117
118 if (!list)
119 return false;
120
121 for (i = 0; list[i]; i++)
122 if (strcmp(list[i], entry) == 0)
123 return true;
124
125 return false;
126 }
127
128 /* Return a copy of @entry prepending "name=", i.e. turn "systemd" into
129 * "name=systemd". Do not fail.
130 */
131 static char *cg_legacy_must_prefix_named(char *entry)
132 {
133 size_t len;
134 char *prefixed;
135
136 len = strlen(entry);
137 prefixed = must_alloc(len + 6);
138
139 memcpy(prefixed, "name=", STRLITERALLEN("name="));
140 memcpy(prefixed + STRLITERALLEN("name="), entry, len);
141 prefixed[len + 5] = '\0';
142
143 return prefixed;
144 }
145
146 /* Append an entry to the clist. Do not fail. @clist must be NULL the first time
147 * we are called.
148 *
149 * We also handle named subsystems here. Any controller which is not a kernel
150 * subsystem, we prefix "name=". Any which is both a kernel and named subsystem,
151 * we refuse to use because we're not sure which we have here.
152 * (TODO: We could work around this in some cases by just remounting to be
153 * unambiguous, or by comparing mountpoint contents with current cgroup.)
154 *
155 * The last entry will always be NULL.
156 */
157 static void must_append_controller(char **klist, char **nlist, char ***clist,
158 char *entry)
159 {
160 int newentry;
161 char *copy;
162
163 if (string_in_list(klist, entry) && string_in_list(nlist, entry)) {
164 ERROR("Refusing to use ambiguous controller \"%s\"", entry);
165 ERROR("It is both a named and kernel subsystem");
166 return;
167 }
168
169 newentry = append_null_to_list((void ***)clist);
170
171 if (strncmp(entry, "name=", 5) == 0)
172 copy = must_copy_string(entry);
173 else if (string_in_list(klist, entry))
174 copy = must_copy_string(entry);
175 else
176 copy = cg_legacy_must_prefix_named(entry);
177
178 (*clist)[newentry] = copy;
179 }
180
181 /* Given a handler's cgroup data, return the struct hierarchy for the controller
182 * @c, or NULL if there is none.
183 */
184 struct hierarchy *get_hierarchy(struct cgroup_ops *ops, const char *controller)
185 {
186 int i;
187
188 errno = ENOENT;
189
190 if (!ops->hierarchies) {
191 TRACE("There are no useable cgroup controllers");
192 return NULL;
193 }
194
195 for (i = 0; ops->hierarchies[i]; i++) {
196 if (!controller) {
197 /* This is the empty unified hierarchy. */
198 if (ops->hierarchies[i]->controllers &&
199 !ops->hierarchies[i]->controllers[0])
200 return ops->hierarchies[i];
201
202 continue;
203 }
204
205 if (string_in_list(ops->hierarchies[i]->controllers, controller))
206 return ops->hierarchies[i];
207 }
208
209 if (controller)
210 WARN("There is no useable %s controller", controller);
211 else
212 WARN("There is no empty unified cgroup hierarchy");
213
214 return NULL;
215 }
216
217 #define BATCH_SIZE 50
218 static void batch_realloc(char **mem, size_t oldlen, size_t newlen)
219 {
220 int newbatches = (newlen / BATCH_SIZE) + 1;
221 int oldbatches = (oldlen / BATCH_SIZE) + 1;
222
223 if (!*mem || newbatches > oldbatches) {
224 *mem = must_realloc(*mem, newbatches * BATCH_SIZE);
225 }
226 }
227
228 static void append_line(char **dest, size_t oldlen, char *new, size_t newlen)
229 {
230 size_t full = oldlen + newlen;
231
232 batch_realloc(dest, oldlen, full + 1);
233
234 memcpy(*dest + oldlen, new, newlen + 1);
235 }
236
237 /* Slurp in a whole file */
238 static char *read_file(const char *fnam)
239 {
240 FILE *f;
241 char *line = NULL, *buf = NULL;
242 size_t len = 0, fulllen = 0;
243 int linelen;
244
245 f = fopen(fnam, "r");
246 if (!f)
247 return NULL;
248 while ((linelen = getline(&line, &len, f)) != -1) {
249 append_line(&buf, fulllen, line, linelen);
250 fulllen += linelen;
251 }
252 fclose(f);
253 free(line);
254 return buf;
255 }
256
257 /* Taken over modified from the kernel sources. */
258 #define NBITS 32 /* bits in uint32_t */
259 #define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d))
260 #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, NBITS)
261
262 static void set_bit(unsigned bit, uint32_t *bitarr)
263 {
264 bitarr[bit / NBITS] |= (1 << (bit % NBITS));
265 }
266
267 static void clear_bit(unsigned bit, uint32_t *bitarr)
268 {
269 bitarr[bit / NBITS] &= ~(1 << (bit % NBITS));
270 }
271
272 static bool is_set(unsigned bit, uint32_t *bitarr)
273 {
274 return (bitarr[bit / NBITS] & (1 << (bit % NBITS))) != 0;
275 }
276
277 /* Create cpumask from cpulist aka turn:
278 *
279 * 0,2-3
280 *
281 * into bit array
282 *
283 * 1 0 1 1
284 */
285 static uint32_t *lxc_cpumask(char *buf, size_t nbits)
286 {
287 char *token;
288 size_t arrlen;
289 uint32_t *bitarr;
290
291 arrlen = BITS_TO_LONGS(nbits);
292 bitarr = calloc(arrlen, sizeof(uint32_t));
293 if (!bitarr)
294 return NULL;
295
296 lxc_iterate_parts(token, buf, ",") {
297 errno = 0;
298 unsigned end, start;
299 char *range;
300
301 start = strtoul(token, NULL, 0);
302 end = start;
303 range = strchr(token, '-');
304 if (range)
305 end = strtoul(range + 1, NULL, 0);
306
307 if (!(start <= end)) {
308 free(bitarr);
309 return NULL;
310 }
311
312 if (end >= nbits) {
313 free(bitarr);
314 return NULL;
315 }
316
317 while (start <= end)
318 set_bit(start++, bitarr);
319 }
320
321 return bitarr;
322 }
323
324 /* Turn cpumask into simple, comma-separated cpulist. */
325 static char *lxc_cpumask_to_cpulist(uint32_t *bitarr, size_t nbits)
326 {
327 int ret;
328 size_t i;
329 char **cpulist = NULL;
330 char numstr[INTTYPE_TO_STRLEN(size_t)] = {0};
331
332 for (i = 0; i <= nbits; i++) {
333 if (!is_set(i, bitarr))
334 continue;
335
336 ret = snprintf(numstr, sizeof(numstr), "%zu", i);
337 if (ret < 0 || (size_t)ret >= sizeof(numstr)) {
338 lxc_free_array((void **)cpulist, free);
339 return NULL;
340 }
341
342 ret = lxc_append_string(&cpulist, numstr);
343 if (ret < 0) {
344 lxc_free_array((void **)cpulist, free);
345 return NULL;
346 }
347 }
348
349 if (!cpulist)
350 return NULL;
351
352 return lxc_string_join(",", (const char **)cpulist, false);
353 }
354
355 static ssize_t get_max_cpus(char *cpulist)
356 {
357 char *c1, *c2;
358 char *maxcpus = cpulist;
359 size_t cpus = 0;
360
361 c1 = strrchr(maxcpus, ',');
362 if (c1)
363 c1++;
364
365 c2 = strrchr(maxcpus, '-');
366 if (c2)
367 c2++;
368
369 if (!c1 && !c2)
370 c1 = maxcpus;
371 else if (c1 > c2)
372 c2 = c1;
373 else if (c1 < c2)
374 c1 = c2;
375 else if (!c1 && c2)
376 c1 = c2;
377
378 errno = 0;
379 cpus = strtoul(c1, NULL, 0);
380 if (errno != 0)
381 return -1;
382
383 return cpus;
384 }
385
386 #define __ISOL_CPUS "/sys/devices/system/cpu/isolated"
387 static bool cg_legacy_filter_and_set_cpus(char *path, bool am_initialized)
388 {
389 int ret;
390 ssize_t i;
391 char *lastslash, *fpath, oldv;
392 ssize_t maxisol = 0, maxposs = 0;
393 char *cpulist = NULL, *isolcpus = NULL, *posscpus = NULL;
394 uint32_t *isolmask = NULL, *possmask = NULL;
395 bool bret = false, flipped_bit = false;
396
397 lastslash = strrchr(path, '/');
398 if (!lastslash) {
399 ERROR("Failed to detect \"/\" in \"%s\"", path);
400 return bret;
401 }
402 oldv = *lastslash;
403 *lastslash = '\0';
404 fpath = must_make_path(path, "cpuset.cpus", NULL);
405 posscpus = read_file(fpath);
406 if (!posscpus) {
407 SYSERROR("Failed to read file \"%s\"", fpath);
408 goto on_error;
409 }
410
411 /* Get maximum number of cpus found in possible cpuset. */
412 maxposs = get_max_cpus(posscpus);
413 if (maxposs < 0 || maxposs >= INT_MAX - 1)
414 goto on_error;
415
416 if (!file_exists(__ISOL_CPUS)) {
417 /* This system doesn't expose isolated cpus. */
418 DEBUG("The path \""__ISOL_CPUS"\" to read isolated cpus from does not exist");
419 cpulist = posscpus;
420 /* No isolated cpus but we weren't already initialized by
421 * someone. We should simply copy the parents cpuset.cpus
422 * values.
423 */
424 if (!am_initialized) {
425 DEBUG("Copying cpu settings of parent cgroup");
426 goto copy_parent;
427 }
428 /* No isolated cpus but we were already initialized by someone.
429 * Nothing more to do for us.
430 */
431 goto on_success;
432 }
433
434 isolcpus = read_file(__ISOL_CPUS);
435 if (!isolcpus) {
436 SYSERROR("Failed to read file \""__ISOL_CPUS"\"");
437 goto on_error;
438 }
439 if (!isdigit(isolcpus[0])) {
440 TRACE("No isolated cpus detected");
441 cpulist = posscpus;
442 /* No isolated cpus but we weren't already initialized by
443 * someone. We should simply copy the parents cpuset.cpus
444 * values.
445 */
446 if (!am_initialized) {
447 DEBUG("Copying cpu settings of parent cgroup");
448 goto copy_parent;
449 }
450 /* No isolated cpus but we were already initialized by someone.
451 * Nothing more to do for us.
452 */
453 goto on_success;
454 }
455
456 /* Get maximum number of cpus found in isolated cpuset. */
457 maxisol = get_max_cpus(isolcpus);
458 if (maxisol < 0 || maxisol >= INT_MAX - 1)
459 goto on_error;
460
461 if (maxposs < maxisol)
462 maxposs = maxisol;
463 maxposs++;
464
465 possmask = lxc_cpumask(posscpus, maxposs);
466 if (!possmask) {
467 ERROR("Failed to create cpumask for possible cpus");
468 goto on_error;
469 }
470
471 isolmask = lxc_cpumask(isolcpus, maxposs);
472 if (!isolmask) {
473 ERROR("Failed to create cpumask for isolated cpus");
474 goto on_error;
475 }
476
477 for (i = 0; i <= maxposs; i++) {
478 if (!is_set(i, isolmask) || !is_set(i, possmask))
479 continue;
480
481 flipped_bit = true;
482 clear_bit(i, possmask);
483 }
484
485 if (!flipped_bit) {
486 DEBUG("No isolated cpus present in cpuset");
487 goto on_success;
488 }
489 DEBUG("Removed isolated cpus from cpuset");
490
491 cpulist = lxc_cpumask_to_cpulist(possmask, maxposs);
492 if (!cpulist) {
493 ERROR("Failed to create cpu list");
494 goto on_error;
495 }
496
497 copy_parent:
498 *lastslash = oldv;
499 free(fpath);
500 fpath = must_make_path(path, "cpuset.cpus", NULL);
501 ret = lxc_write_to_file(fpath, cpulist, strlen(cpulist), false, 0666);
502 if (ret < 0) {
503 SYSERROR("Failed to write cpu list to \"%s\"", fpath);
504 goto on_error;
505 }
506
507 on_success:
508 bret = true;
509
510 on_error:
511 free(fpath);
512
513 free(isolcpus);
514 free(isolmask);
515
516 if (posscpus != cpulist)
517 free(posscpus);
518 free(possmask);
519
520 free(cpulist);
521 return bret;
522 }
523
524 /* Copy contents of parent(@path)/@file to @path/@file */
525 static bool copy_parent_file(char *path, char *file)
526 {
527 int ret;
528 char *fpath, *lastslash, oldv;
529 int len = 0;
530 char *value = NULL;
531
532 lastslash = strrchr(path, '/');
533 if (!lastslash) {
534 ERROR("Failed to detect \"/\" in \"%s\"", path);
535 return false;
536 }
537 oldv = *lastslash;
538 *lastslash = '\0';
539 fpath = must_make_path(path, file, NULL);
540 len = lxc_read_from_file(fpath, NULL, 0);
541 if (len <= 0)
542 goto on_error;
543
544 value = must_alloc(len + 1);
545 ret = lxc_read_from_file(fpath, value, len);
546 if (ret != len)
547 goto on_error;
548 free(fpath);
549
550 *lastslash = oldv;
551 fpath = must_make_path(path, file, NULL);
552 ret = lxc_write_to_file(fpath, value, len, false, 0666);
553 if (ret < 0)
554 SYSERROR("Failed to write \"%s\" to file \"%s\"", value, fpath);
555 free(fpath);
556 free(value);
557 return ret >= 0;
558
559 on_error:
560 SYSERROR("Failed to read file \"%s\"", fpath);
561 free(fpath);
562 free(value);
563 return false;
564 }
565
566 /* Initialize the cpuset hierarchy in first directory of @gname and set
567 * cgroup.clone_children so that children inherit settings. Since the
568 * h->base_path is populated by init or ourselves, we know it is already
569 * initialized.
570 */
571 static bool cg_legacy_handle_cpuset_hierarchy(struct hierarchy *h, char *cgname)
572 {
573 int ret;
574 char v;
575 char *cgpath, *clonechildrenpath, *slash;
576
577 if (!string_in_list(h->controllers, "cpuset"))
578 return true;
579
580 if (*cgname == '/')
581 cgname++;
582 slash = strchr(cgname, '/');
583 if (slash)
584 *slash = '\0';
585
586 cgpath = must_make_path(h->mountpoint, h->container_base_path, cgname, NULL);
587 if (slash)
588 *slash = '/';
589
590 ret = mkdir(cgpath, 0755);
591 if (ret < 0) {
592 if (errno != EEXIST) {
593 SYSERROR("Failed to create directory \"%s\"", cgpath);
594 free(cgpath);
595 return false;
596 }
597 }
598
599 clonechildrenpath = must_make_path(cgpath, "cgroup.clone_children", NULL);
600 /* unified hierarchy doesn't have clone_children */
601 if (!file_exists(clonechildrenpath)) {
602 free(clonechildrenpath);
603 free(cgpath);
604 return true;
605 }
606
607 ret = lxc_read_from_file(clonechildrenpath, &v, 1);
608 if (ret < 0) {
609 SYSERROR("Failed to read file \"%s\"", clonechildrenpath);
610 free(clonechildrenpath);
611 free(cgpath);
612 return false;
613 }
614
615 /* Make sure any isolated cpus are removed from cpuset.cpus. */
616 if (!cg_legacy_filter_and_set_cpus(cgpath, v == '1')) {
617 SYSERROR("Failed to remove isolated cpus");
618 free(clonechildrenpath);
619 free(cgpath);
620 return false;
621 }
622
623 /* Already set for us by someone else. */
624 if (v == '1') {
625 DEBUG("\"cgroup.clone_children\" was already set to \"1\"");
626 free(clonechildrenpath);
627 free(cgpath);
628 return true;
629 }
630
631 /* copy parent's settings */
632 if (!copy_parent_file(cgpath, "cpuset.mems")) {
633 SYSERROR("Failed to copy \"cpuset.mems\" settings");
634 free(cgpath);
635 free(clonechildrenpath);
636 return false;
637 }
638 free(cgpath);
639
640 ret = lxc_write_to_file(clonechildrenpath, "1", 1, false, 0666);
641 if (ret < 0) {
642 /* Set clone_children so children inherit our settings */
643 SYSERROR("Failed to write 1 to \"%s\"", clonechildrenpath);
644 free(clonechildrenpath);
645 return false;
646 }
647 free(clonechildrenpath);
648 return true;
649 }
650
651 /* Given two null-terminated lists of strings, return true if any string is in
652 * both.
653 */
654 static bool controller_lists_intersect(char **l1, char **l2)
655 {
656 int i;
657
658 if (!l1 || !l2)
659 return false;
660
661 for (i = 0; l1[i]; i++) {
662 if (string_in_list(l2, l1[i]))
663 return true;
664 }
665
666 return false;
667 }
668
669 /* For a null-terminated list of controllers @clist, return true if any of those
670 * controllers is already listed the null-terminated list of hierarchies @hlist.
671 * Realistically, if one is present, all must be present.
672 */
673 static bool controller_list_is_dup(struct hierarchy **hlist, char **clist)
674 {
675 int i;
676
677 if (!hlist)
678 return false;
679
680 for (i = 0; hlist[i]; i++)
681 if (controller_lists_intersect(hlist[i]->controllers, clist))
682 return true;
683
684 return false;
685 }
686
687 /* Return true if the controller @entry is found in the null-terminated list of
688 * hierarchies @hlist.
689 */
690 static bool controller_found(struct hierarchy **hlist, char *entry)
691 {
692 int i;
693
694 if (!hlist)
695 return false;
696
697 for (i = 0; hlist[i]; i++)
698 if (string_in_list(hlist[i]->controllers, entry))
699 return true;
700
701 return false;
702 }
703
704 /* Return true if all of the controllers which we require have been found. The
705 * required list is freezer and anything in lxc.cgroup.use.
706 */
707 static bool all_controllers_found(struct cgroup_ops *ops)
708 {
709 char **cur;
710 struct hierarchy **hlist = ops->hierarchies;
711
712 if (!controller_found(hlist, "freezer")) {
713 ERROR("No freezer controller mountpoint found");
714 return false;
715 }
716
717 if (!ops->cgroup_use)
718 return true;
719
720 for (cur = ops->cgroup_use; cur && *cur; cur++)
721 if (!controller_found(hlist, *cur)) {
722 ERROR("No %s controller mountpoint found", *cur);
723 return false;
724 }
725
726 return true;
727 }
728
729 /* Get the controllers from a mountinfo line There are other ways we could get
730 * this info. For lxcfs, field 3 is /cgroup/controller-list. For cgroupfs, we
731 * could parse the mount options. But we simply assume that the mountpoint must
732 * be /sys/fs/cgroup/controller-list
733 */
734 static char **cg_hybrid_get_controllers(char **klist, char **nlist, char *line,
735 int type)
736 {
737 /* The fourth field is /sys/fs/cgroup/comma-delimited-controller-list
738 * for legacy hierarchies.
739 */
740 int i;
741 char *dup, *p2, *tok;
742 char *p = line, *sep = ",";
743 char **aret = NULL;
744
745 for (i = 0; i < 4; i++) {
746 p = strchr(p, ' ');
747 if (!p)
748 return NULL;
749 p++;
750 }
751
752 /* Note, if we change how mountinfo works, then our caller will need to
753 * verify /sys/fs/cgroup/ in this field.
754 */
755 if (strncmp(p, "/sys/fs/cgroup/", 15) != 0) {
756 ERROR("Found hierarchy not under /sys/fs/cgroup: \"%s\"", p);
757 return NULL;
758 }
759
760 p += 15;
761 p2 = strchr(p, ' ');
762 if (!p2) {
763 ERROR("Corrupt mountinfo");
764 return NULL;
765 }
766 *p2 = '\0';
767
768 if (type == CGROUP_SUPER_MAGIC) {
769 /* strdup() here for v1 hierarchies. Otherwise
770 * lxc_iterate_parts() will destroy mountpoints such as
771 * "/sys/fs/cgroup/cpu,cpuacct".
772 */
773 dup = strdup(p);
774 if (!dup)
775 return NULL;
776
777 lxc_iterate_parts(tok, dup, sep) {
778 must_append_controller(klist, nlist, &aret, tok);
779 }
780
781 free(dup);
782 }
783 *p2 = ' ';
784
785 return aret;
786 }
787
788 static char **cg_unified_make_empty_controller(void)
789 {
790 int newentry;
791 char **aret = NULL;
792
793 newentry = append_null_to_list((void ***)&aret);
794 aret[newentry] = NULL;
795 return aret;
796 }
797
798 static char **cg_unified_get_controllers(const char *file)
799 {
800 char *buf, *tok;
801 char *sep = " \t\n";
802 char **aret = NULL;
803
804 buf = read_file(file);
805 if (!buf)
806 return NULL;
807
808 lxc_iterate_parts(tok, buf, sep) {
809 int newentry;
810 char *copy;
811
812 newentry = append_null_to_list((void ***)&aret);
813 copy = must_copy_string(tok);
814 aret[newentry] = copy;
815 }
816
817 free(buf);
818 return aret;
819 }
820
821 static struct hierarchy *add_hierarchy(struct hierarchy ***h, char **clist, char *mountpoint,
822 char *container_base_path, int type)
823 {
824 struct hierarchy *new;
825 int newentry;
826
827 new = must_alloc(sizeof(*new));
828 new->controllers = clist;
829 new->mountpoint = mountpoint;
830 new->container_base_path = container_base_path;
831 new->container_full_path = NULL;
832 new->monitor_full_path = NULL;
833 new->version = type;
834
835 newentry = append_null_to_list((void ***)h);
836 (*h)[newentry] = new;
837 return new;
838 }
839
840 /* Get a copy of the mountpoint from @line, which is a line from
841 * /proc/self/mountinfo.
842 */
843 static char *cg_hybrid_get_mountpoint(char *line)
844 {
845 int i;
846 size_t len;
847 char *p2;
848 char *p = line, *sret = NULL;
849
850 for (i = 0; i < 4; i++) {
851 p = strchr(p, ' ');
852 if (!p)
853 return NULL;
854 p++;
855 }
856
857 if (strncmp(p, "/sys/fs/cgroup/", 15) != 0)
858 return NULL;
859
860 p2 = strchr(p + 15, ' ');
861 if (!p2)
862 return NULL;
863 *p2 = '\0';
864
865 len = strlen(p);
866 sret = must_alloc(len + 1);
867 memcpy(sret, p, len);
868 sret[len] = '\0';
869 return sret;
870 }
871
872 /* Given a multi-line string, return a null-terminated copy of the current line. */
873 static char *copy_to_eol(char *p)
874 {
875 char *p2 = strchr(p, '\n'), *sret;
876 size_t len;
877
878 if (!p2)
879 return NULL;
880
881 len = p2 - p;
882 sret = must_alloc(len + 1);
883 memcpy(sret, p, len);
884 sret[len] = '\0';
885 return sret;
886 }
887
888 /* cgline: pointer to character after the first ':' in a line in a \n-terminated
889 * /proc/self/cgroup file. Check whether controller c is present.
890 */
891 static bool controller_in_clist(char *cgline, char *c)
892 {
893 char *tok, *eol, *tmp;
894 size_t len;
895
896 eol = strchr(cgline, ':');
897 if (!eol)
898 return false;
899
900 len = eol - cgline;
901 tmp = alloca(len + 1);
902 memcpy(tmp, cgline, len);
903 tmp[len] = '\0';
904
905 lxc_iterate_parts(tok, tmp, ",") {
906 if (strcmp(tok, c) == 0)
907 return true;
908 }
909
910 return false;
911 }
912
913 /* @basecginfo is a copy of /proc/$$/cgroup. Return the current cgroup for
914 * @controller.
915 */
916 static char *cg_hybrid_get_current_cgroup(char *basecginfo, char *controller,
917 int type)
918 {
919 char *p = basecginfo;
920
921 for (;;) {
922 bool is_cgv2_base_cgroup = false;
923
924 /* cgroup v2 entry in "/proc/<pid>/cgroup": "0::/some/path" */
925 if ((type == CGROUP2_SUPER_MAGIC) && (*p == '0'))
926 is_cgv2_base_cgroup = true;
927
928 p = strchr(p, ':');
929 if (!p)
930 return NULL;
931 p++;
932
933 if (is_cgv2_base_cgroup || (controller && controller_in_clist(p, controller))) {
934 p = strchr(p, ':');
935 if (!p)
936 return NULL;
937 p++;
938 return copy_to_eol(p);
939 }
940
941 p = strchr(p, '\n');
942 if (!p)
943 return NULL;
944 p++;
945 }
946 }
947
948 static void must_append_string(char ***list, char *entry)
949 {
950 int newentry;
951 char *copy;
952
953 newentry = append_null_to_list((void ***)list);
954 copy = must_copy_string(entry);
955 (*list)[newentry] = copy;
956 }
957
958 static int get_existing_subsystems(char ***klist, char ***nlist)
959 {
960 FILE *f;
961 char *line = NULL;
962 size_t len = 0;
963
964 f = fopen("/proc/self/cgroup", "r");
965 if (!f)
966 return -1;
967
968 while (getline(&line, &len, f) != -1) {
969 char *p, *p2, *tok;
970 p = strchr(line, ':');
971 if (!p)
972 continue;
973 p++;
974 p2 = strchr(p, ':');
975 if (!p2)
976 continue;
977 *p2 = '\0';
978
979 /* If the kernel has cgroup v2 support, then /proc/self/cgroup
980 * contains an entry of the form:
981 *
982 * 0::/some/path
983 *
984 * In this case we use "cgroup2" as controller name.
985 */
986 if ((p2 - p) == 0) {
987 must_append_string(klist, "cgroup2");
988 continue;
989 }
990
991 lxc_iterate_parts(tok, p, ",") {
992 if (strncmp(tok, "name=", 5) == 0)
993 must_append_string(nlist, tok);
994 else
995 must_append_string(klist, tok);
996 }
997 }
998
999 free(line);
1000 fclose(f);
1001 return 0;
1002 }
1003
1004 static void trim(char *s)
1005 {
1006 size_t len;
1007
1008 len = strlen(s);
1009 while ((len > 1) && (s[len - 1] == '\n'))
1010 s[--len] = '\0';
1011 }
1012
1013 static void lxc_cgfsng_print_hierarchies(struct cgroup_ops *ops)
1014 {
1015 int i;
1016 struct hierarchy **it;
1017
1018 if (!ops->hierarchies) {
1019 TRACE(" No hierarchies found");
1020 return;
1021 }
1022
1023 TRACE(" Hierarchies:");
1024 for (i = 0, it = ops->hierarchies; it && *it; it++, i++) {
1025 int j;
1026 char **cit;
1027
1028 TRACE(" %d: base_cgroup: %s", i, (*it)->container_base_path ? (*it)->container_base_path : "(null)");
1029 TRACE(" mountpoint: %s", (*it)->mountpoint ? (*it)->mountpoint : "(null)");
1030 TRACE(" controllers:");
1031 for (j = 0, cit = (*it)->controllers; cit && *cit; cit++, j++)
1032 TRACE(" %d: %s", j, *cit);
1033 }
1034 }
1035
1036 static void lxc_cgfsng_print_basecg_debuginfo(char *basecginfo, char **klist,
1037 char **nlist)
1038 {
1039 int k;
1040 char **it;
1041
1042 TRACE("basecginfo is:");
1043 TRACE("%s", basecginfo);
1044
1045 for (k = 0, it = klist; it && *it; it++, k++)
1046 TRACE("kernel subsystem %d: %s", k, *it);
1047
1048 for (k = 0, it = nlist; it && *it; it++, k++)
1049 TRACE("named subsystem %d: %s", k, *it);
1050 }
1051
1052 static int cgroup_rmdir(struct hierarchy **hierarchies,
1053 const char *container_cgroup)
1054 {
1055 int i;
1056
1057 if (!container_cgroup || !hierarchies)
1058 return 0;
1059
1060 for (i = 0; hierarchies[i]; i++) {
1061 int ret;
1062 struct hierarchy *h = hierarchies[i];
1063
1064 if (!h->container_full_path)
1065 continue;
1066
1067 ret = recursive_destroy(h->container_full_path);
1068 if (ret < 0)
1069 WARN("Failed to destroy \"%s\"", h->container_full_path);
1070
1071 free(h->container_full_path);
1072 h->container_full_path = NULL;
1073 }
1074
1075 return 0;
1076 }
1077
1078 struct generic_userns_exec_data {
1079 struct hierarchy **hierarchies;
1080 const char *container_cgroup;
1081 struct lxc_conf *conf;
1082 uid_t origuid; /* target uid in parent namespace */
1083 char *path;
1084 };
1085
1086 static int cgroup_rmdir_wrapper(void *data)
1087 {
1088 int ret;
1089 struct generic_userns_exec_data *arg = data;
1090 uid_t nsuid = (arg->conf->root_nsuid_map != NULL) ? 0 : arg->conf->init_uid;
1091 gid_t nsgid = (arg->conf->root_nsgid_map != NULL) ? 0 : arg->conf->init_gid;
1092
1093 ret = setresgid(nsgid, nsgid, nsgid);
1094 if (ret < 0) {
1095 SYSERROR("Failed to setresgid(%d, %d, %d)", (int)nsgid,
1096 (int)nsgid, (int)nsgid);
1097 return -1;
1098 }
1099
1100 ret = setresuid(nsuid, nsuid, nsuid);
1101 if (ret < 0) {
1102 SYSERROR("Failed to setresuid(%d, %d, %d)", (int)nsuid,
1103 (int)nsuid, (int)nsuid);
1104 return -1;
1105 }
1106
1107 ret = setgroups(0, NULL);
1108 if (ret < 0 && errno != EPERM) {
1109 SYSERROR("Failed to setgroups(0, NULL)");
1110 return -1;
1111 }
1112
1113 return cgroup_rmdir(arg->hierarchies, arg->container_cgroup);
1114 }
1115
1116 __cgfsng_ops static void cgfsng_payload_destroy(struct cgroup_ops *ops,
1117 struct lxc_handler *handler)
1118 {
1119 int ret;
1120 struct generic_userns_exec_data wrap;
1121
1122 wrap.origuid = 0;
1123 wrap.container_cgroup = ops->container_cgroup;
1124 wrap.hierarchies = ops->hierarchies;
1125 wrap.conf = handler->conf;
1126
1127 if (handler->conf && !lxc_list_empty(&handler->conf->id_map))
1128 ret = userns_exec_1(handler->conf, cgroup_rmdir_wrapper, &wrap,
1129 "cgroup_rmdir_wrapper");
1130 else
1131 ret = cgroup_rmdir(ops->hierarchies, ops->container_cgroup);
1132 if (ret < 0) {
1133 WARN("Failed to destroy cgroups");
1134 return;
1135 }
1136 }
1137
1138 __cgfsng_ops static void cgfsng_monitor_destroy(struct cgroup_ops *ops,
1139 struct lxc_handler *handler)
1140 {
1141 int len;
1142 char *pivot_path;
1143 struct lxc_conf *conf = handler->conf;
1144 char pidstr[INTTYPE_TO_STRLEN(pid_t)];
1145
1146 if (!ops->hierarchies)
1147 return;
1148
1149 len = snprintf(pidstr, sizeof(pidstr), "%d", handler->monitor_pid);
1150 if (len < 0 || (size_t)len >= sizeof(pidstr))
1151 return;
1152
1153 for (int i = 0; ops->hierarchies[i]; i++) {
1154 int ret;
1155 char *chop;
1156 char pivot_cgroup[] = PIVOT_CGROUP;
1157 struct hierarchy *h = ops->hierarchies[i];
1158
1159 if (!h->monitor_full_path)
1160 continue;
1161
1162 if (conf && conf->cgroup_meta.dir)
1163 pivot_path = must_make_path(h->mountpoint,
1164 h->container_base_path,
1165 conf->cgroup_meta.dir,
1166 PIVOT_CGROUP,
1167 "cgroup.procs", NULL);
1168 else
1169 pivot_path = must_make_path(h->mountpoint,
1170 h->container_base_path,
1171 PIVOT_CGROUP,
1172 "cgroup.procs", NULL);
1173
1174 chop = strrchr(pivot_path, '/');
1175 if (chop)
1176 *chop = '\0';
1177
1178 /*
1179 * Make sure not to pass in the ro string literal PIVOT_CGROUP
1180 * here.
1181 */
1182 if (!cg_legacy_handle_cpuset_hierarchy(h, pivot_cgroup)) {
1183 WARN("Failed to handle legacy cpuset controller");
1184 goto next;
1185 }
1186
1187 ret = mkdir_p(pivot_path, 0755);
1188 if (ret < 0 && errno != EEXIST) {
1189 SYSWARN("Failed to create cgroup \"%s\"\n", pivot_path);
1190 goto next;
1191 }
1192
1193 if (chop)
1194 *chop = '/';
1195
1196 /* Move ourselves into the pivot cgroup to delete our own
1197 * cgroup.
1198 */
1199 ret = lxc_write_to_file(pivot_path, pidstr, len, false, 0666);
1200 if (ret != 0) {
1201 SYSWARN("Failed to move monitor %s to \"%s\"\n", pidstr, pivot_path);
1202 goto next;
1203 }
1204
1205 ret = recursive_destroy(h->monitor_full_path);
1206 if (ret < 0)
1207 WARN("Failed to destroy \"%s\"", h->monitor_full_path);
1208
1209 next:
1210 free(pivot_path);
1211 }
1212 }
1213
1214 static bool cg_unified_create_cgroup(struct hierarchy *h, char *cgname)
1215 {
1216 size_t i, parts_len;
1217 char **it;
1218 size_t full_len = 0;
1219 char *add_controllers = NULL, *cgroup = NULL;
1220 char **parts = NULL;
1221 bool bret = false;
1222
1223 if (h->version != CGROUP2_SUPER_MAGIC)
1224 return true;
1225
1226 if (!h->controllers)
1227 return true;
1228
1229 /* For now we simply enable all controllers that we have detected by
1230 * creating a string like "+memory +pids +cpu +io".
1231 * TODO: In the near future we might want to support "-<controller>"
1232 * etc. but whether supporting semantics like this make sense will need
1233 * some thinking.
1234 */
1235 for (it = h->controllers; it && *it; it++) {
1236 full_len += strlen(*it) + 2;
1237 add_controllers = must_realloc(add_controllers, full_len + 1);
1238
1239 if (h->controllers[0] == *it)
1240 add_controllers[0] = '\0';
1241
1242 (void)strlcat(add_controllers, "+", full_len + 1);
1243 (void)strlcat(add_controllers, *it, full_len + 1);
1244
1245 if ((it + 1) && *(it + 1))
1246 (void)strlcat(add_controllers, " ", full_len + 1);
1247 }
1248
1249 parts = lxc_string_split(cgname, '/');
1250 if (!parts)
1251 goto on_error;
1252
1253 parts_len = lxc_array_len((void **)parts);
1254 if (parts_len > 0)
1255 parts_len--;
1256
1257 cgroup = must_make_path(h->mountpoint, h->container_base_path, NULL);
1258 for (i = 0; i < parts_len; i++) {
1259 int ret;
1260 char *target;
1261
1262 cgroup = must_append_path(cgroup, parts[i], NULL);
1263 target = must_make_path(cgroup, "cgroup.subtree_control", NULL);
1264 ret = lxc_write_to_file(target, add_controllers, full_len, false, 0666);
1265 free(target);
1266 if (ret < 0) {
1267 SYSERROR("Could not enable \"%s\" controllers in the "
1268 "unified cgroup \"%s\"", add_controllers, cgroup);
1269 goto on_error;
1270 }
1271 }
1272
1273 bret = true;
1274
1275 on_error:
1276 lxc_free_array((void **)parts, free);
1277 free(add_controllers);
1278 free(cgroup);
1279 return bret;
1280 }
1281
1282 static int mkdir_eexist_on_last(const char *dir, mode_t mode)
1283 {
1284 const char *tmp = dir;
1285 const char *orig = dir;
1286 size_t orig_len;
1287
1288 orig_len = strlen(dir);
1289 do {
1290 int ret;
1291 size_t cur_len;
1292 char *makeme;
1293
1294 dir = tmp + strspn(tmp, "/");
1295 tmp = dir + strcspn(dir, "/");
1296
1297 errno = ENOMEM;
1298 cur_len = dir - orig;
1299 makeme = strndup(orig, cur_len);
1300 if (!makeme)
1301 return -1;
1302
1303 ret = mkdir(makeme, mode);
1304 if (ret < 0) {
1305 if ((errno != EEXIST) || (orig_len == cur_len)) {
1306 SYSERROR("Failed to create directory \"%s\"", makeme);
1307 free(makeme);
1308 return -1;
1309 }
1310 }
1311 free(makeme);
1312
1313 } while (tmp != dir);
1314
1315 return 0;
1316 }
1317
1318 static bool monitor_create_path_for_hierarchy(struct hierarchy *h, char *cgname)
1319 {
1320 int ret;
1321
1322 if (!cg_legacy_handle_cpuset_hierarchy(h, cgname)) {
1323 ERROR("Failed to handle legacy cpuset controller");
1324 return false;
1325 }
1326
1327 h->monitor_full_path = must_make_path(h->mountpoint, h->container_base_path, cgname, NULL);
1328 ret = mkdir_eexist_on_last(h->monitor_full_path, 0755);
1329 if (ret < 0) {
1330 ERROR("Failed to create cgroup \"%s\"", h->monitor_full_path);
1331 return false;
1332 }
1333
1334 return cg_unified_create_cgroup(h, cgname);
1335 }
1336
1337 static bool container_create_path_for_hierarchy(struct hierarchy *h, char *cgname)
1338 {
1339 int ret;
1340
1341 if (!cg_legacy_handle_cpuset_hierarchy(h, cgname)) {
1342 ERROR("Failed to handle legacy cpuset controller");
1343 return false;
1344 }
1345
1346 h->container_full_path = must_make_path(h->mountpoint, h->container_base_path, cgname, NULL);
1347 ret = mkdir_eexist_on_last(h->container_full_path, 0755);
1348 if (ret < 0) {
1349 ERROR("Failed to create cgroup \"%s\"", h->container_full_path);
1350 return false;
1351 }
1352
1353 return cg_unified_create_cgroup(h, cgname);
1354 }
1355
1356 static void remove_path_for_hierarchy(struct hierarchy *h, char *cgname, bool monitor)
1357 {
1358 int ret;
1359 char *full_path;
1360
1361 if (monitor)
1362 full_path = h->monitor_full_path;
1363 else
1364 full_path = h->container_full_path;
1365
1366 ret = rmdir(full_path);
1367 if (ret < 0)
1368 SYSERROR("Failed to rmdir(\"%s\") from failed creation attempt", full_path);
1369
1370 free(full_path);
1371
1372 if (monitor)
1373 h->monitor_full_path = NULL;
1374 else
1375 h->container_full_path = NULL;
1376 }
1377
1378 __cgfsng_ops static inline bool cgfsng_monitor_create(struct cgroup_ops *ops,
1379 struct lxc_handler *handler)
1380 {
1381 char *monitor_cgroup, *offset, *tmp;
1382 int i, idx = 0;
1383 size_t len;
1384 bool bret = false;
1385 struct lxc_conf *conf = handler->conf;
1386
1387 if (!conf)
1388 return bret;
1389
1390 if (conf->cgroup_meta.dir)
1391 tmp = lxc_string_join("/",
1392 (const char *[]){conf->cgroup_meta.dir,
1393 ops->monitor_pattern,
1394 handler->name, NULL},
1395 false);
1396 else
1397 tmp = must_make_path(ops->monitor_pattern, handler->name, NULL);
1398 if (!tmp)
1399 return bret;
1400
1401 len = strlen(tmp) + 5; /* leave room for -NNN\0 */
1402 monitor_cgroup = must_realloc(tmp, len);
1403 offset = monitor_cgroup + len - 5;
1404 *offset = 0;
1405
1406 do {
1407 if (idx) {
1408 int ret = snprintf(offset, 5, "-%d", idx);
1409 if (ret < 0 || (size_t)ret >= 5)
1410 goto on_error;
1411 }
1412
1413 for (i = 0; ops->hierarchies[i]; i++) {
1414 if (!monitor_create_path_for_hierarchy(ops->hierarchies[i], monitor_cgroup)) {
1415 ERROR("Failed to create cgroup \"%s\"", ops->hierarchies[i]->monitor_full_path);
1416 free(ops->hierarchies[i]->container_full_path);
1417 ops->hierarchies[i]->container_full_path = NULL;
1418
1419 for (int j = 0; j < i; j++)
1420 remove_path_for_hierarchy(ops->hierarchies[j], monitor_cgroup, true);
1421
1422 idx++;
1423 break;
1424 }
1425 }
1426 } while (ops->hierarchies[i] && idx > 0 && idx < 1000);
1427
1428 if (idx < 1000) {
1429 bret = true;
1430 INFO("The monitor process uses \"%s\" as cgroup", monitor_cgroup);
1431 }
1432
1433 on_error:
1434 free(monitor_cgroup);
1435
1436 return bret;
1437 }
1438
1439 /* Try to create the same cgroup in all hierarchies. Start with cgroup_pattern;
1440 * next cgroup_pattern-1, -2, ..., -999.
1441 */
1442 __cgfsng_ops static inline bool cgfsng_payload_create(struct cgroup_ops *ops,
1443 struct lxc_handler *handler)
1444 {
1445 int i;
1446 size_t len;
1447 char *container_cgroup, *offset, *tmp;
1448 int idx = 0;
1449 struct lxc_conf *conf = handler->conf;
1450
1451 if (ops->container_cgroup) {
1452 WARN("cgfsng_create called a second time: %s", ops->container_cgroup);
1453 return false;
1454 }
1455
1456 if (!conf)
1457 return false;
1458
1459 if (conf->cgroup_meta.dir)
1460 tmp = lxc_string_join("/", (const char *[]){conf->cgroup_meta.dir, handler->name, NULL}, false);
1461 else
1462 tmp = lxc_string_replace("%n", handler->name, ops->cgroup_pattern);
1463 if (!tmp) {
1464 ERROR("Failed expanding cgroup name pattern");
1465 return false;
1466 }
1467
1468 len = strlen(tmp) + 5; /* leave room for -NNN\0 */
1469 container_cgroup = must_alloc(len);
1470 (void)strlcpy(container_cgroup, tmp, len);
1471 free(tmp);
1472 offset = container_cgroup + len - 5;
1473
1474 again:
1475 if (idx == 1000) {
1476 ERROR("Too many conflicting cgroup names");
1477 goto out_free;
1478 }
1479
1480 if (idx) {
1481 int ret;
1482
1483 ret = snprintf(offset, 5, "-%d", idx);
1484 if (ret < 0 || (size_t)ret >= 5) {
1485 FILE *f = fopen("/dev/null", "w");
1486 if (f) {
1487 fprintf(f, "Workaround for GCC7 bug: "
1488 "https://gcc.gnu.org/bugzilla/"
1489 "show_bug.cgi?id=78969");
1490 fclose(f);
1491 }
1492 }
1493 }
1494
1495 for (i = 0; ops->hierarchies[i]; i++) {
1496 if (!container_create_path_for_hierarchy(ops->hierarchies[i], container_cgroup)) {
1497 ERROR("Failed to create cgroup \"%s\"", ops->hierarchies[i]->container_full_path);
1498 free(ops->hierarchies[i]->container_full_path);
1499 ops->hierarchies[i]->container_full_path = NULL;
1500 for (int j = 0; j < i; j++)
1501 remove_path_for_hierarchy(ops->hierarchies[j], container_cgroup, false);
1502 idx++;
1503 goto again;
1504 }
1505 }
1506
1507 ops->container_cgroup = container_cgroup;
1508 INFO("The container uses \"%s\" as cgroup", container_cgroup);
1509
1510 return true;
1511
1512 out_free:
1513 free(container_cgroup);
1514
1515 return false;
1516 }
1517
1518 __cgfsng_ops static bool __do_cgroup_enter(struct cgroup_ops *ops, pid_t pid,
1519 bool monitor)
1520 {
1521 int len;
1522 char pidstr[INTTYPE_TO_STRLEN(pid_t)];
1523
1524 len = snprintf(pidstr, sizeof(pidstr), "%d", pid);
1525 if (len < 0 || (size_t)len >= sizeof(pidstr))
1526 return false;
1527
1528 for (int i = 0; ops->hierarchies[i]; i++) {
1529 int ret;
1530 char *path;
1531
1532 if (monitor)
1533 path = must_make_path(ops->hierarchies[i]->monitor_full_path,
1534 "cgroup.procs", NULL);
1535 else
1536 path = must_make_path(ops->hierarchies[i]->container_full_path,
1537 "cgroup.procs", NULL);
1538 ret = lxc_write_to_file(path, pidstr, len, false, 0666);
1539 if (ret != 0) {
1540 SYSERROR("Failed to enter cgroup \"%s\"", path);
1541 free(path);
1542 return false;
1543 }
1544 free(path);
1545 }
1546
1547 return true;
1548 }
1549
1550 __cgfsng_ops static bool cgfsng_monitor_enter(struct cgroup_ops *ops, pid_t pid)
1551 {
1552 return __do_cgroup_enter(ops, pid, true);
1553 }
1554
1555 static bool cgfsng_payload_enter(struct cgroup_ops *ops, pid_t pid)
1556 {
1557 return __do_cgroup_enter(ops, pid, false);
1558 }
1559
1560 static int chowmod(char *path, uid_t chown_uid, gid_t chown_gid,
1561 mode_t chmod_mode)
1562 {
1563 int ret;
1564
1565 ret = chown(path, chown_uid, chown_gid);
1566 if (ret < 0) {
1567 SYSWARN("Failed to chown(%s, %d, %d)", path, (int)chown_uid, (int)chown_gid);
1568 return -1;
1569 }
1570
1571 ret = chmod(path, chmod_mode);
1572 if (ret < 0) {
1573 SYSWARN("Failed to chmod(%s, %d)", path, (int)chmod_mode);
1574 return -1;
1575 }
1576
1577 return 0;
1578 }
1579
1580 /* chgrp the container cgroups to container group. We leave
1581 * the container owner as cgroup owner. So we must make the
1582 * directories 775 so that the container can create sub-cgroups.
1583 *
1584 * Also chown the tasks and cgroup.procs files. Those may not
1585 * exist depending on kernel version.
1586 */
1587 static int chown_cgroup_wrapper(void *data)
1588 {
1589 int i, ret;
1590 uid_t destuid;
1591 struct generic_userns_exec_data *arg = data;
1592 uid_t nsuid = (arg->conf->root_nsuid_map != NULL) ? 0 : arg->conf->init_uid;
1593 gid_t nsgid = (arg->conf->root_nsgid_map != NULL) ? 0 : arg->conf->init_gid;
1594
1595 ret = setresgid(nsgid, nsgid, nsgid);
1596 if (ret < 0) {
1597 SYSERROR("Failed to setresgid(%d, %d, %d)",
1598 (int)nsgid, (int)nsgid, (int)nsgid);
1599 return -1;
1600 }
1601
1602 ret = setresuid(nsuid, nsuid, nsuid);
1603 if (ret < 0) {
1604 SYSERROR("Failed to setresuid(%d, %d, %d)",
1605 (int)nsuid, (int)nsuid, (int)nsuid);
1606 return -1;
1607 }
1608
1609 ret = setgroups(0, NULL);
1610 if (ret < 0 && errno != EPERM) {
1611 SYSERROR("Failed to setgroups(0, NULL)");
1612 return -1;
1613 }
1614
1615 destuid = get_ns_uid(arg->origuid);
1616 if (destuid == LXC_INVALID_UID)
1617 destuid = 0;
1618
1619 for (i = 0; arg->hierarchies[i]; i++) {
1620 char *fullpath;
1621 char *path = arg->hierarchies[i]->container_full_path;
1622
1623 ret = chowmod(path, destuid, nsgid, 0775);
1624 if (ret < 0)
1625 return -1;
1626
1627 /* Failures to chown() these are inconvenient but not
1628 * detrimental We leave these owned by the container launcher,
1629 * so that container root can write to the files to attach. We
1630 * chmod() them 664 so that container systemd can write to the
1631 * files (which systemd in wily insists on doing).
1632 */
1633
1634 if (arg->hierarchies[i]->version == CGROUP_SUPER_MAGIC) {
1635 fullpath = must_make_path(path, "tasks", NULL);
1636 (void)chowmod(fullpath, destuid, nsgid, 0664);
1637 free(fullpath);
1638 }
1639
1640 fullpath = must_make_path(path, "cgroup.procs", NULL);
1641 (void)chowmod(fullpath, destuid, nsgid, 0664);
1642 free(fullpath);
1643
1644 if (arg->hierarchies[i]->version != CGROUP2_SUPER_MAGIC)
1645 continue;
1646
1647 fullpath = must_make_path(path, "cgroup.subtree_control", NULL);
1648 (void)chowmod(fullpath, destuid, nsgid, 0664);
1649 free(fullpath);
1650
1651 fullpath = must_make_path(path, "cgroup.threads", NULL);
1652 (void)chowmod(fullpath, destuid, nsgid, 0664);
1653 free(fullpath);
1654 }
1655
1656 return 0;
1657 }
1658
1659 __cgfsng_ops static bool cgfsng_chown(struct cgroup_ops *ops,
1660 struct lxc_conf *conf)
1661 {
1662 struct generic_userns_exec_data wrap;
1663
1664 if (lxc_list_empty(&conf->id_map))
1665 return true;
1666
1667 wrap.origuid = geteuid();
1668 wrap.path = NULL;
1669 wrap.hierarchies = ops->hierarchies;
1670 wrap.conf = conf;
1671
1672 if (userns_exec_1(conf, chown_cgroup_wrapper, &wrap,
1673 "chown_cgroup_wrapper") < 0) {
1674 ERROR("Error requesting cgroup chown in new user namespace");
1675 return false;
1676 }
1677
1678 return true;
1679 }
1680
1681 /* cgroup-full:* is done, no need to create subdirs */
1682 static bool cg_mount_needs_subdirs(int type)
1683 {
1684 if (type >= LXC_AUTO_CGROUP_FULL_RO)
1685 return false;
1686
1687 return true;
1688 }
1689
1690 /* After $rootfs/sys/fs/container/controller/the/cg/path has been created,
1691 * remount controller ro if needed and bindmount the cgroupfs onto
1692 * control/the/cg/path.
1693 */
1694 static int cg_legacy_mount_controllers(int type, struct hierarchy *h,
1695 char *controllerpath, char *cgpath,
1696 const char *container_cgroup)
1697 {
1698 int ret, remount_flags;
1699 char *sourcepath;
1700 int flags = MS_BIND;
1701
1702 if (type == LXC_AUTO_CGROUP_RO || type == LXC_AUTO_CGROUP_MIXED) {
1703 ret = mount(controllerpath, controllerpath, "cgroup", MS_BIND, NULL);
1704 if (ret < 0) {
1705 SYSERROR("Failed to bind mount \"%s\" onto \"%s\"",
1706 controllerpath, controllerpath);
1707 return -1;
1708 }
1709
1710 remount_flags = add_required_remount_flags(controllerpath,
1711 controllerpath,
1712 flags | MS_REMOUNT);
1713 ret = mount(controllerpath, controllerpath, "cgroup",
1714 remount_flags | MS_REMOUNT | MS_BIND | MS_RDONLY,
1715 NULL);
1716 if (ret < 0) {
1717 SYSERROR("Failed to remount \"%s\" ro", controllerpath);
1718 return -1;
1719 }
1720
1721 INFO("Remounted %s read-only", controllerpath);
1722 }
1723
1724 sourcepath = must_make_path(h->mountpoint, h->container_base_path,
1725 container_cgroup, NULL);
1726 if (type == LXC_AUTO_CGROUP_RO)
1727 flags |= MS_RDONLY;
1728
1729 ret = mount(sourcepath, cgpath, "cgroup", flags, NULL);
1730 if (ret < 0) {
1731 SYSERROR("Failed to mount \"%s\" onto \"%s\"", h->controllers[0], cgpath);
1732 free(sourcepath);
1733 return -1;
1734 }
1735 INFO("Mounted \"%s\" onto \"%s\"", h->controllers[0], cgpath);
1736
1737 if (flags & MS_RDONLY) {
1738 remount_flags = add_required_remount_flags(sourcepath, cgpath,
1739 flags | MS_REMOUNT);
1740 ret = mount(sourcepath, cgpath, "cgroup", remount_flags, NULL);
1741 if (ret < 0) {
1742 SYSERROR("Failed to remount \"%s\" ro", cgpath);
1743 free(sourcepath);
1744 return -1;
1745 }
1746 INFO("Remounted %s read-only", cgpath);
1747 }
1748
1749 free(sourcepath);
1750 INFO("Completed second stage cgroup automounts for \"%s\"", cgpath);
1751 return 0;
1752 }
1753
1754 /* __cg_mount_direct
1755 *
1756 * Mount cgroup hierarchies directly without using bind-mounts. The main
1757 * uses-cases are mounting cgroup hierarchies in cgroup namespaces and mounting
1758 * cgroups for the LXC_AUTO_CGROUP_FULL option.
1759 */
1760 static int __cg_mount_direct(int type, struct hierarchy *h,
1761 const char *controllerpath)
1762 {
1763 int ret;
1764 char *controllers = NULL;
1765 char *fstype = "cgroup2";
1766 unsigned long flags = 0;
1767
1768 flags |= MS_NOSUID;
1769 flags |= MS_NOEXEC;
1770 flags |= MS_NODEV;
1771 flags |= MS_RELATIME;
1772
1773 if (type == LXC_AUTO_CGROUP_RO || type == LXC_AUTO_CGROUP_FULL_RO)
1774 flags |= MS_RDONLY;
1775
1776 if (h->version != CGROUP2_SUPER_MAGIC) {
1777 controllers = lxc_string_join(",", (const char **)h->controllers, false);
1778 if (!controllers)
1779 return -ENOMEM;
1780 fstype = "cgroup";
1781 }
1782
1783 ret = mount("cgroup", controllerpath, fstype, flags, controllers);
1784 free(controllers);
1785 if (ret < 0) {
1786 SYSERROR("Failed to mount \"%s\" with cgroup filesystem type %s", controllerpath, fstype);
1787 return -1;
1788 }
1789
1790 DEBUG("Mounted \"%s\" with cgroup filesystem type %s", controllerpath, fstype);
1791 return 0;
1792 }
1793
1794 static inline int cg_mount_in_cgroup_namespace(int type, struct hierarchy *h,
1795 const char *controllerpath)
1796 {
1797 return __cg_mount_direct(type, h, controllerpath);
1798 }
1799
1800 static inline int cg_mount_cgroup_full(int type, struct hierarchy *h,
1801 const char *controllerpath)
1802 {
1803 if (type < LXC_AUTO_CGROUP_FULL_RO || type > LXC_AUTO_CGROUP_FULL_MIXED)
1804 return 0;
1805
1806 return __cg_mount_direct(type, h, controllerpath);
1807 }
1808
1809 __cgfsng_ops static bool cgfsng_mount(struct cgroup_ops *ops,
1810 struct lxc_handler *handler,
1811 const char *root, int type)
1812 {
1813 int i, ret;
1814 char *tmpfspath = NULL;
1815 bool has_cgns = false, retval = false, wants_force_mount = false;
1816
1817 if ((type & LXC_AUTO_CGROUP_MASK) == 0)
1818 return true;
1819
1820 if (type & LXC_AUTO_CGROUP_FORCE) {
1821 type &= ~LXC_AUTO_CGROUP_FORCE;
1822 wants_force_mount = true;
1823 }
1824
1825 if (!wants_force_mount){
1826 if (!lxc_list_empty(&handler->conf->keepcaps))
1827 wants_force_mount = !in_caplist(CAP_SYS_ADMIN, &handler->conf->keepcaps);
1828 else
1829 wants_force_mount = in_caplist(CAP_SYS_ADMIN, &handler->conf->caps);
1830 }
1831
1832 has_cgns = cgns_supported();
1833 if (has_cgns && !wants_force_mount)
1834 return true;
1835
1836 if (type == LXC_AUTO_CGROUP_NOSPEC)
1837 type = LXC_AUTO_CGROUP_MIXED;
1838 else if (type == LXC_AUTO_CGROUP_FULL_NOSPEC)
1839 type = LXC_AUTO_CGROUP_FULL_MIXED;
1840
1841 /* Mount tmpfs */
1842 tmpfspath = must_make_path(root, "/sys/fs/cgroup", NULL);
1843 ret = safe_mount(NULL, tmpfspath, "tmpfs",
1844 MS_NOSUID | MS_NODEV | MS_NOEXEC | MS_RELATIME,
1845 "size=10240k,mode=755", root);
1846 if (ret < 0)
1847 goto on_error;
1848
1849 for (i = 0; ops->hierarchies[i]; i++) {
1850 char *controllerpath, *path2;
1851 struct hierarchy *h = ops->hierarchies[i];
1852 char *controller = strrchr(h->mountpoint, '/');
1853
1854 if (!controller)
1855 continue;
1856 controller++;
1857
1858 controllerpath = must_make_path(tmpfspath, controller, NULL);
1859 if (dir_exists(controllerpath)) {
1860 free(controllerpath);
1861 continue;
1862 }
1863
1864 ret = mkdir(controllerpath, 0755);
1865 if (ret < 0) {
1866 SYSERROR("Error creating cgroup path: %s", controllerpath);
1867 free(controllerpath);
1868 goto on_error;
1869 }
1870
1871 if (has_cgns && wants_force_mount) {
1872 /* If cgroup namespaces are supported but the container
1873 * will not have CAP_SYS_ADMIN after it has started we
1874 * need to mount the cgroups manually.
1875 */
1876 ret = cg_mount_in_cgroup_namespace(type, h, controllerpath);
1877 free(controllerpath);
1878 if (ret < 0)
1879 goto on_error;
1880
1881 continue;
1882 }
1883
1884 ret = cg_mount_cgroup_full(type, h, controllerpath);
1885 if (ret < 0) {
1886 free(controllerpath);
1887 goto on_error;
1888 }
1889
1890 if (!cg_mount_needs_subdirs(type)) {
1891 free(controllerpath);
1892 continue;
1893 }
1894
1895 path2 = must_make_path(controllerpath, h->container_base_path,
1896 ops->container_cgroup, NULL);
1897 ret = mkdir_p(path2, 0755);
1898 if (ret < 0) {
1899 free(controllerpath);
1900 free(path2);
1901 goto on_error;
1902 }
1903
1904 ret = cg_legacy_mount_controllers(type, h, controllerpath,
1905 path2, ops->container_cgroup);
1906 free(controllerpath);
1907 free(path2);
1908 if (ret < 0)
1909 goto on_error;
1910 }
1911 retval = true;
1912
1913 on_error:
1914 free(tmpfspath);
1915 return retval;
1916 }
1917
1918 static int recursive_count_nrtasks(char *dirname)
1919 {
1920 struct dirent *direntp;
1921 DIR *dir;
1922 int count = 0, ret;
1923 char *path;
1924
1925 dir = opendir(dirname);
1926 if (!dir)
1927 return 0;
1928
1929 while ((direntp = readdir(dir))) {
1930 struct stat mystat;
1931
1932 if (!strcmp(direntp->d_name, ".") ||
1933 !strcmp(direntp->d_name, ".."))
1934 continue;
1935
1936 path = must_make_path(dirname, direntp->d_name, NULL);
1937
1938 if (lstat(path, &mystat))
1939 goto next;
1940
1941 if (!S_ISDIR(mystat.st_mode))
1942 goto next;
1943
1944 count += recursive_count_nrtasks(path);
1945 next:
1946 free(path);
1947 }
1948
1949 path = must_make_path(dirname, "cgroup.procs", NULL);
1950 ret = lxc_count_file_lines(path);
1951 if (ret != -1)
1952 count += ret;
1953 free(path);
1954
1955 (void)closedir(dir);
1956
1957 return count;
1958 }
1959
1960 __cgfsng_ops static int cgfsng_nrtasks(struct cgroup_ops *ops)
1961 {
1962 int count;
1963 char *path;
1964
1965 if (!ops->container_cgroup || !ops->hierarchies)
1966 return -1;
1967
1968 path = must_make_path(ops->hierarchies[0]->container_full_path, NULL);
1969 count = recursive_count_nrtasks(path);
1970 free(path);
1971 return count;
1972 }
1973
1974 /* Only root needs to escape to the cgroup of its init. */
1975 __cgfsng_ops static bool cgfsng_escape(const struct cgroup_ops *ops,
1976 struct lxc_conf *conf)
1977 {
1978 int i;
1979
1980 if (conf->cgroup_meta.relative || geteuid())
1981 return true;
1982
1983 for (i = 0; ops->hierarchies[i]; i++) {
1984 int ret;
1985 char *fullpath;
1986
1987 fullpath = must_make_path(ops->hierarchies[i]->mountpoint,
1988 ops->hierarchies[i]->container_base_path,
1989 "cgroup.procs", NULL);
1990 ret = lxc_write_to_file(fullpath, "0", 2, false, 0666);
1991 if (ret != 0) {
1992 SYSERROR("Failed to escape to cgroup \"%s\"", fullpath);
1993 free(fullpath);
1994 return false;
1995 }
1996 free(fullpath);
1997 }
1998
1999 return true;
2000 }
2001
2002 __cgfsng_ops static int cgfsng_num_hierarchies(struct cgroup_ops *ops)
2003 {
2004 int i;
2005
2006 for (i = 0; ops->hierarchies[i]; i++)
2007 ;
2008
2009 return i;
2010 }
2011
2012 __cgfsng_ops static bool cgfsng_get_hierarchies(struct cgroup_ops *ops, int n, char ***out)
2013 {
2014 int i;
2015
2016 /* sanity check n */
2017 for (i = 0; i < n; i++)
2018 if (!ops->hierarchies[i])
2019 return false;
2020
2021 *out = ops->hierarchies[i]->controllers;
2022
2023 return true;
2024 }
2025
2026 #define THAWED "THAWED"
2027 #define THAWED_LEN (strlen(THAWED))
2028
2029 /* TODO: If the unified cgroup hierarchy grows a freezer controller this needs
2030 * to be adapted.
2031 */
2032 __cgfsng_ops static bool cgfsng_unfreeze(struct cgroup_ops *ops)
2033 {
2034 int ret;
2035 char *fullpath;
2036 struct hierarchy *h;
2037
2038 h = get_hierarchy(ops, "freezer");
2039 if (!h)
2040 return false;
2041
2042 fullpath = must_make_path(h->container_full_path, "freezer.state", NULL);
2043 ret = lxc_write_to_file(fullpath, THAWED, THAWED_LEN, false, 0666);
2044 free(fullpath);
2045 if (ret < 0)
2046 return false;
2047
2048 return true;
2049 }
2050
2051 __cgfsng_ops static const char *cgfsng_get_cgroup(struct cgroup_ops *ops,
2052 const char *controller)
2053 {
2054 struct hierarchy *h;
2055
2056 h = get_hierarchy(ops, controller);
2057 if (!h) {
2058 WARN("Failed to find hierarchy for controller \"%s\"",
2059 controller ? controller : "(null)");
2060 return NULL;
2061 }
2062
2063 return h->container_full_path ? h->container_full_path + strlen(h->mountpoint) : NULL;
2064 }
2065
2066 /* Given a cgroup path returned from lxc_cmd_get_cgroup_path, build a full path,
2067 * which must be freed by the caller.
2068 */
2069 static inline char *build_full_cgpath_from_monitorpath(struct hierarchy *h,
2070 const char *inpath,
2071 const char *filename)
2072 {
2073 return must_make_path(h->mountpoint, inpath, filename, NULL);
2074 }
2075
2076 /* Technically, we're always at a delegation boundary here (This is especially
2077 * true when cgroup namespaces are available.). The reasoning is that in order
2078 * for us to have been able to start a container in the first place the root
2079 * cgroup must have been a leaf node. Now, either the container's init system
2080 * has populated the cgroup and kept it as a leaf node or it has created
2081 * subtrees. In the former case we will simply attach to the leaf node we
2082 * created when we started the container in the latter case we create our own
2083 * cgroup for the attaching process.
2084 */
2085 static int __cg_unified_attach(const struct hierarchy *h, const char *name,
2086 const char *lxcpath, const char *pidstr,
2087 size_t pidstr_len, const char *controller)
2088 {
2089 int ret;
2090 size_t len;
2091 int fret = -1, idx = 0;
2092 char *base_path = NULL, *container_cgroup = NULL, *full_path = NULL;
2093
2094 container_cgroup = lxc_cmd_get_cgroup_path(name, lxcpath, controller);
2095 /* not running */
2096 if (!container_cgroup)
2097 return 0;
2098
2099 base_path = must_make_path(h->mountpoint, container_cgroup, NULL);
2100 full_path = must_make_path(base_path, "cgroup.procs", NULL);
2101 /* cgroup is populated */
2102 ret = lxc_write_to_file(full_path, pidstr, pidstr_len, false, 0666);
2103 if (ret < 0 && errno != EBUSY)
2104 goto on_error;
2105
2106 if (ret == 0)
2107 goto on_success;
2108
2109 free(full_path);
2110
2111 len = strlen(base_path) + STRLITERALLEN("/lxc-1000") +
2112 STRLITERALLEN("/cgroup-procs");
2113 full_path = must_alloc(len + 1);
2114 do {
2115 if (idx)
2116 ret = snprintf(full_path, len + 1, "%s/lxc-%d",
2117 base_path, idx);
2118 else
2119 ret = snprintf(full_path, len + 1, "%s/lxc", base_path);
2120 if (ret < 0 || (size_t)ret >= len + 1)
2121 goto on_error;
2122
2123 ret = mkdir_p(full_path, 0755);
2124 if (ret < 0 && errno != EEXIST)
2125 goto on_error;
2126
2127 (void)strlcat(full_path, "/cgroup.procs", len + 1);
2128 ret = lxc_write_to_file(full_path, pidstr, len, false, 0666);
2129 if (ret == 0)
2130 goto on_success;
2131
2132 /* this is a non-leaf node */
2133 if (errno != EBUSY)
2134 goto on_error;
2135
2136 idx++;
2137 } while (idx < 1000);
2138
2139 on_success:
2140 if (idx < 1000)
2141 fret = 0;
2142
2143 on_error:
2144 free(base_path);
2145 free(container_cgroup);
2146 free(full_path);
2147
2148 return fret;
2149 }
2150
2151 __cgfsng_ops static bool cgfsng_attach(struct cgroup_ops *ops, const char *name,
2152 const char *lxcpath, pid_t pid)
2153 {
2154 int i, len, ret;
2155 char pidstr[INTTYPE_TO_STRLEN(pid_t)];
2156
2157 len = snprintf(pidstr, sizeof(pidstr), "%d", pid);
2158 if (len < 0 || (size_t)len >= sizeof(pidstr))
2159 return false;
2160
2161 for (i = 0; ops->hierarchies[i]; i++) {
2162 char *path;
2163 char *fullpath = NULL;
2164 struct hierarchy *h = ops->hierarchies[i];
2165
2166 if (h->version == CGROUP2_SUPER_MAGIC) {
2167 ret = __cg_unified_attach(h, name, lxcpath, pidstr, len,
2168 h->controllers[0]);
2169 if (ret < 0)
2170 return false;
2171
2172 continue;
2173 }
2174
2175 path = lxc_cmd_get_cgroup_path(name, lxcpath, h->controllers[0]);
2176 /* not running */
2177 if (!path)
2178 continue;
2179
2180 fullpath = build_full_cgpath_from_monitorpath(h, path, "cgroup.procs");
2181 free(path);
2182 ret = lxc_write_to_file(fullpath, pidstr, len, false, 0666);
2183 if (ret < 0) {
2184 SYSERROR("Failed to attach %d to %s", (int)pid, fullpath);
2185 free(fullpath);
2186 return false;
2187 }
2188 free(fullpath);
2189 }
2190
2191 return true;
2192 }
2193
2194 /* Called externally (i.e. from 'lxc-cgroup') to query cgroup limits. Here we
2195 * don't have a cgroup_data set up, so we ask the running container through the
2196 * commands API for the cgroup path.
2197 */
2198 __cgfsng_ops static int cgfsng_get(struct cgroup_ops *ops, const char *filename,
2199 char *value, size_t len, const char *name,
2200 const char *lxcpath)
2201 {
2202 int ret = -1;
2203 size_t controller_len;
2204 char *controller, *p, *path;
2205 struct hierarchy *h;
2206
2207 controller_len = strlen(filename);
2208 controller = alloca(controller_len + 1);
2209 (void)strlcpy(controller, filename, controller_len + 1);
2210
2211 p = strchr(controller, '.');
2212 if (p)
2213 *p = '\0';
2214
2215 path = lxc_cmd_get_cgroup_path(name, lxcpath, controller);
2216 /* not running */
2217 if (!path)
2218 return -1;
2219
2220 h = get_hierarchy(ops, controller);
2221 if (h) {
2222 char *fullpath;
2223
2224 fullpath = build_full_cgpath_from_monitorpath(h, path, filename);
2225 ret = lxc_read_from_file(fullpath, value, len);
2226 free(fullpath);
2227 }
2228 free(path);
2229
2230 return ret;
2231 }
2232
2233 /* Called externally (i.e. from 'lxc-cgroup') to set new cgroup limits. Here we
2234 * don't have a cgroup_data set up, so we ask the running container through the
2235 * commands API for the cgroup path.
2236 */
2237 __cgfsng_ops static int cgfsng_set(struct cgroup_ops *ops,
2238 const char *filename, const char *value,
2239 const char *name, const char *lxcpath)
2240 {
2241 int ret = -1;
2242 size_t controller_len;
2243 char *controller, *p, *path;
2244 struct hierarchy *h;
2245
2246 controller_len = strlen(filename);
2247 controller = alloca(controller_len + 1);
2248 (void)strlcpy(controller, filename, controller_len + 1);
2249
2250 p = strchr(controller, '.');
2251 if (p)
2252 *p = '\0';
2253
2254 path = lxc_cmd_get_cgroup_path(name, lxcpath, controller);
2255 /* not running */
2256 if (!path)
2257 return -1;
2258
2259 h = get_hierarchy(ops, controller);
2260 if (h) {
2261 char *fullpath;
2262
2263 fullpath = build_full_cgpath_from_monitorpath(h, path, filename);
2264 ret = lxc_write_to_file(fullpath, value, strlen(value), false, 0666);
2265 free(fullpath);
2266 }
2267 free(path);
2268
2269 return ret;
2270 }
2271
2272 /* take devices cgroup line
2273 * /dev/foo rwx
2274 * and convert it to a valid
2275 * type major:minor mode
2276 * line. Return <0 on error. Dest is a preallocated buffer long enough to hold
2277 * the output.
2278 */
2279 static int convert_devpath(const char *invalue, char *dest)
2280 {
2281 int n_parts;
2282 char *p, *path, type;
2283 unsigned long minor, major;
2284 struct stat sb;
2285 int ret = -EINVAL;
2286 char *mode = NULL;
2287
2288 path = must_copy_string(invalue);
2289
2290 /* Read path followed by mode. Ignore any trailing text.
2291 * A ' # comment' would be legal. Technically other text is not
2292 * legal, we could check for that if we cared to.
2293 */
2294 for (n_parts = 1, p = path; *p && n_parts < 3; p++) {
2295 if (*p != ' ')
2296 continue;
2297 *p = '\0';
2298
2299 if (n_parts != 1)
2300 break;
2301 p++;
2302 n_parts++;
2303
2304 while (*p == ' ')
2305 p++;
2306
2307 mode = p;
2308
2309 if (*p == '\0')
2310 goto out;
2311 }
2312
2313 if (n_parts == 1)
2314 goto out;
2315
2316 ret = stat(path, &sb);
2317 if (ret < 0)
2318 goto out;
2319
2320 mode_t m = sb.st_mode & S_IFMT;
2321 switch (m) {
2322 case S_IFBLK:
2323 type = 'b';
2324 break;
2325 case S_IFCHR:
2326 type = 'c';
2327 break;
2328 default:
2329 ERROR("Unsupported device type %i for \"%s\"", m, path);
2330 ret = -EINVAL;
2331 goto out;
2332 }
2333
2334 major = MAJOR(sb.st_rdev);
2335 minor = MINOR(sb.st_rdev);
2336 ret = snprintf(dest, 50, "%c %lu:%lu %s", type, major, minor, mode);
2337 if (ret < 0 || ret >= 50) {
2338 ERROR("Error on configuration value \"%c %lu:%lu %s\" (max 50 "
2339 "chars)", type, major, minor, mode);
2340 ret = -ENAMETOOLONG;
2341 goto out;
2342 }
2343 ret = 0;
2344
2345 out:
2346 free(path);
2347 return ret;
2348 }
2349
2350 /* Called from setup_limits - here we have the container's cgroup_data because
2351 * we created the cgroups.
2352 */
2353 static int cg_legacy_set_data(struct cgroup_ops *ops, const char *filename,
2354 const char *value)
2355 {
2356 size_t len;
2357 char *fullpath, *p;
2358 /* "b|c <2^64-1>:<2^64-1> r|w|m" = 47 chars max */
2359 char converted_value[50];
2360 struct hierarchy *h;
2361 int ret = 0;
2362 char *controller = NULL;
2363
2364 len = strlen(filename);
2365 controller = alloca(len + 1);
2366 (void)strlcpy(controller, filename, len + 1);
2367
2368 p = strchr(controller, '.');
2369 if (p)
2370 *p = '\0';
2371
2372 if (strcmp("devices.allow", filename) == 0 && value[0] == '/') {
2373 ret = convert_devpath(value, converted_value);
2374 if (ret < 0)
2375 return ret;
2376 value = converted_value;
2377 }
2378
2379 h = get_hierarchy(ops, controller);
2380 if (!h) {
2381 ERROR("Failed to setup limits for the \"%s\" controller. "
2382 "The controller seems to be unused by \"cgfsng\" cgroup "
2383 "driver or not enabled on the cgroup hierarchy",
2384 controller);
2385 errno = ENOENT;
2386 return -ENOENT;
2387 }
2388
2389 fullpath = must_make_path(h->container_full_path, filename, NULL);
2390 ret = lxc_write_to_file(fullpath, value, strlen(value), false, 0666);
2391 free(fullpath);
2392 return ret;
2393 }
2394
2395 static bool __cg_legacy_setup_limits(struct cgroup_ops *ops,
2396 struct lxc_list *cgroup_settings,
2397 bool do_devices)
2398 {
2399 struct lxc_list *iterator, *next, *sorted_cgroup_settings;
2400 struct lxc_cgroup *cg;
2401 bool ret = false;
2402
2403 if (lxc_list_empty(cgroup_settings))
2404 return true;
2405
2406 sorted_cgroup_settings = sort_cgroup_settings(cgroup_settings);
2407 if (!sorted_cgroup_settings)
2408 return false;
2409
2410 lxc_list_for_each(iterator, sorted_cgroup_settings) {
2411 cg = iterator->elem;
2412
2413 if (do_devices == !strncmp("devices", cg->subsystem, 7)) {
2414 if (cg_legacy_set_data(ops, cg->subsystem, cg->value)) {
2415 if (do_devices && (errno == EACCES || errno == EPERM)) {
2416 WARN("Failed to set \"%s\" to \"%s\"",
2417 cg->subsystem, cg->value);
2418 continue;
2419 }
2420 WARN("Failed to set \"%s\" to \"%s\"",
2421 cg->subsystem, cg->value);
2422 goto out;
2423 }
2424 DEBUG("Set controller \"%s\" set to \"%s\"",
2425 cg->subsystem, cg->value);
2426 }
2427 }
2428
2429 ret = true;
2430 INFO("Limits for the legacy cgroup hierarchies have been setup");
2431 out:
2432 lxc_list_for_each_safe(iterator, sorted_cgroup_settings, next) {
2433 lxc_list_del(iterator);
2434 free(iterator);
2435 }
2436 free(sorted_cgroup_settings);
2437 return ret;
2438 }
2439
2440 static bool __cg_unified_setup_limits(struct cgroup_ops *ops,
2441 struct lxc_list *cgroup_settings)
2442 {
2443 struct lxc_list *iterator;
2444 struct hierarchy *h = ops->unified;
2445
2446 if (lxc_list_empty(cgroup_settings))
2447 return true;
2448
2449 if (!h)
2450 return false;
2451
2452 lxc_list_for_each(iterator, cgroup_settings) {
2453 int ret;
2454 char *fullpath;
2455 struct lxc_cgroup *cg = iterator->elem;
2456
2457 fullpath = must_make_path(h->container_full_path, cg->subsystem, NULL);
2458 ret = lxc_write_to_file(fullpath, cg->value, strlen(cg->value), false, 0666);
2459 free(fullpath);
2460 if (ret < 0) {
2461 SYSERROR("Failed to set \"%s\" to \"%s\"",
2462 cg->subsystem, cg->value);
2463 return false;
2464 }
2465 TRACE("Set \"%s\" to \"%s\"", cg->subsystem, cg->value);
2466 }
2467
2468 INFO("Limits for the unified cgroup hierarchy have been setup");
2469 return true;
2470 }
2471
2472 __cgfsng_ops static bool cgfsng_setup_limits(struct cgroup_ops *ops,
2473 struct lxc_conf *conf,
2474 bool do_devices)
2475 {
2476 bool bret;
2477
2478 bret = __cg_legacy_setup_limits(ops, &conf->cgroup, do_devices);
2479 if (!bret)
2480 return false;
2481
2482 return __cg_unified_setup_limits(ops, &conf->cgroup2);
2483 }
2484
2485 static bool cgroup_use_wants_controllers(const struct cgroup_ops *ops,
2486 char **controllers)
2487 {
2488 char **cur_ctrl, **cur_use;
2489
2490 if (!ops->cgroup_use)
2491 return true;
2492
2493 for (cur_ctrl = controllers; cur_ctrl && *cur_ctrl; cur_ctrl++) {
2494 bool found = false;
2495
2496 for (cur_use = ops->cgroup_use; cur_use && *cur_use; cur_use++) {
2497 if (strcmp(*cur_use, *cur_ctrl) != 0)
2498 continue;
2499
2500 found = true;
2501 break;
2502 }
2503
2504 if (found)
2505 continue;
2506
2507 return false;
2508 }
2509
2510 return true;
2511 }
2512
2513 /* At startup, parse_hierarchies finds all the info we need about cgroup
2514 * mountpoints and current cgroups, and stores it in @d.
2515 */
2516 static bool cg_hybrid_init(struct cgroup_ops *ops, bool relative)
2517 {
2518 int ret;
2519 char *basecginfo;
2520 FILE *f;
2521 size_t len = 0;
2522 char *line = NULL;
2523 char **klist = NULL, **nlist = NULL;
2524
2525 /* Root spawned containers escape the current cgroup, so use init's
2526 * cgroups as our base in that case.
2527 */
2528 if (!relative && (geteuid() == 0))
2529 basecginfo = read_file("/proc/1/cgroup");
2530 else
2531 basecginfo = read_file("/proc/self/cgroup");
2532 if (!basecginfo)
2533 return false;
2534
2535 ret = get_existing_subsystems(&klist, &nlist);
2536 if (ret < 0) {
2537 ERROR("Failed to retrieve available legacy cgroup controllers");
2538 free(basecginfo);
2539 return false;
2540 }
2541
2542 f = fopen("/proc/self/mountinfo", "r");
2543 if (!f) {
2544 ERROR("Failed to open \"/proc/self/mountinfo\"");
2545 free(basecginfo);
2546 return false;
2547 }
2548
2549 lxc_cgfsng_print_basecg_debuginfo(basecginfo, klist, nlist);
2550
2551 while (getline(&line, &len, f) != -1) {
2552 int type;
2553 bool writeable;
2554 struct hierarchy *new;
2555 char *base_cgroup = NULL, *mountpoint = NULL;
2556 char **controller_list = NULL;
2557
2558 type = get_cgroup_version(line);
2559 if (type == 0)
2560 continue;
2561
2562 if (type == CGROUP2_SUPER_MAGIC && ops->unified)
2563 continue;
2564
2565 if (ops->cgroup_layout == CGROUP_LAYOUT_UNKNOWN) {
2566 if (type == CGROUP2_SUPER_MAGIC)
2567 ops->cgroup_layout = CGROUP_LAYOUT_UNIFIED;
2568 else if (type == CGROUP_SUPER_MAGIC)
2569 ops->cgroup_layout = CGROUP_LAYOUT_LEGACY;
2570 } else if (ops->cgroup_layout == CGROUP_LAYOUT_UNIFIED) {
2571 if (type == CGROUP_SUPER_MAGIC)
2572 ops->cgroup_layout = CGROUP_LAYOUT_HYBRID;
2573 } else if (ops->cgroup_layout == CGROUP_LAYOUT_LEGACY) {
2574 if (type == CGROUP2_SUPER_MAGIC)
2575 ops->cgroup_layout = CGROUP_LAYOUT_HYBRID;
2576 }
2577
2578 controller_list = cg_hybrid_get_controllers(klist, nlist, line, type);
2579 if (!controller_list && type == CGROUP_SUPER_MAGIC)
2580 continue;
2581
2582 if (type == CGROUP_SUPER_MAGIC)
2583 if (controller_list_is_dup(ops->hierarchies, controller_list))
2584 goto next;
2585
2586 mountpoint = cg_hybrid_get_mountpoint(line);
2587 if (!mountpoint) {
2588 ERROR("Failed parsing mountpoint from \"%s\"", line);
2589 goto next;
2590 }
2591
2592 if (type == CGROUP_SUPER_MAGIC)
2593 base_cgroup = cg_hybrid_get_current_cgroup(basecginfo, controller_list[0], CGROUP_SUPER_MAGIC);
2594 else
2595 base_cgroup = cg_hybrid_get_current_cgroup(basecginfo, NULL, CGROUP2_SUPER_MAGIC);
2596 if (!base_cgroup) {
2597 ERROR("Failed to find current cgroup");
2598 goto next;
2599 }
2600
2601 trim(base_cgroup);
2602 prune_init_scope(base_cgroup);
2603 if (type == CGROUP2_SUPER_MAGIC)
2604 writeable = test_writeable_v2(mountpoint, base_cgroup);
2605 else
2606 writeable = test_writeable_v1(mountpoint, base_cgroup);
2607 if (!writeable)
2608 goto next;
2609
2610 if (type == CGROUP2_SUPER_MAGIC) {
2611 char *cgv2_ctrl_path;
2612
2613 cgv2_ctrl_path = must_make_path(mountpoint, base_cgroup,
2614 "cgroup.controllers",
2615 NULL);
2616
2617 controller_list = cg_unified_get_controllers(cgv2_ctrl_path);
2618 free(cgv2_ctrl_path);
2619 if (!controller_list) {
2620 controller_list = cg_unified_make_empty_controller();
2621 TRACE("No controllers are enabled for "
2622 "delegation in the unified hierarchy");
2623 }
2624 }
2625
2626 /* Exclude all controllers that cgroup use does not want. */
2627 if (!cgroup_use_wants_controllers(ops, controller_list))
2628 goto next;
2629
2630 new = add_hierarchy(&ops->hierarchies, controller_list, mountpoint, base_cgroup, type);
2631 if (type == CGROUP2_SUPER_MAGIC && !ops->unified)
2632 ops->unified = new;
2633
2634 continue;
2635
2636 next:
2637 free_string_list(controller_list);
2638 free(mountpoint);
2639 free(base_cgroup);
2640 }
2641
2642 free_string_list(klist);
2643 free_string_list(nlist);
2644
2645 free(basecginfo);
2646
2647 fclose(f);
2648 free(line);
2649
2650 TRACE("Writable cgroup hierarchies:");
2651 lxc_cgfsng_print_hierarchies(ops);
2652
2653 /* verify that all controllers in cgroup.use and all crucial
2654 * controllers are accounted for
2655 */
2656 if (!all_controllers_found(ops))
2657 return false;
2658
2659 return true;
2660 }
2661
2662 static int cg_is_pure_unified(void)
2663 {
2664
2665 int ret;
2666 struct statfs fs;
2667
2668 ret = statfs("/sys/fs/cgroup", &fs);
2669 if (ret < 0)
2670 return -ENOMEDIUM;
2671
2672 if (is_fs_type(&fs, CGROUP2_SUPER_MAGIC))
2673 return CGROUP2_SUPER_MAGIC;
2674
2675 return 0;
2676 }
2677
2678 /* Get current cgroup from /proc/self/cgroup for the cgroupfs v2 hierarchy. */
2679 static char *cg_unified_get_current_cgroup(bool relative)
2680 {
2681 char *basecginfo, *base_cgroup;
2682 char *copy = NULL;
2683
2684 if (!relative && (geteuid() == 0))
2685 basecginfo = read_file("/proc/1/cgroup");
2686 else
2687 basecginfo = read_file("/proc/self/cgroup");
2688 if (!basecginfo)
2689 return NULL;
2690
2691 base_cgroup = strstr(basecginfo, "0::/");
2692 if (!base_cgroup)
2693 goto cleanup_on_err;
2694
2695 base_cgroup = base_cgroup + 3;
2696 copy = copy_to_eol(base_cgroup);
2697 if (!copy)
2698 goto cleanup_on_err;
2699
2700 cleanup_on_err:
2701 free(basecginfo);
2702 if (copy)
2703 trim(copy);
2704
2705 return copy;
2706 }
2707
2708 static int cg_unified_init(struct cgroup_ops *ops, bool relative)
2709 {
2710 int ret;
2711 char *mountpoint, *subtree_path;
2712 char **delegatable;
2713 char *base_cgroup = NULL;
2714
2715 ret = cg_is_pure_unified();
2716 if (ret == -ENOMEDIUM)
2717 return -ENOMEDIUM;
2718
2719 if (ret != CGROUP2_SUPER_MAGIC)
2720 return 0;
2721
2722 base_cgroup = cg_unified_get_current_cgroup(relative);
2723 if (!base_cgroup)
2724 return -EINVAL;
2725 prune_init_scope(base_cgroup);
2726
2727 /* We assume that we have already been given controllers to delegate
2728 * further down the hierarchy. If not it is up to the user to delegate
2729 * them to us.
2730 */
2731 mountpoint = must_copy_string("/sys/fs/cgroup");
2732 subtree_path = must_make_path(mountpoint, base_cgroup,
2733 "cgroup.subtree_control", NULL);
2734 delegatable = cg_unified_get_controllers(subtree_path);
2735 free(subtree_path);
2736 if (!delegatable)
2737 delegatable = cg_unified_make_empty_controller();
2738 if (!delegatable[0])
2739 TRACE("No controllers are enabled for delegation");
2740
2741 /* TODO: If the user requested specific controllers via lxc.cgroup.use
2742 * we should verify here. The reason I'm not doing it right is that I'm
2743 * not convinced that lxc.cgroup.use will be the future since it is a
2744 * global property. I much rather have an option that lets you request
2745 * controllers per container.
2746 */
2747
2748 add_hierarchy(&ops->hierarchies, delegatable, mountpoint, base_cgroup, CGROUP2_SUPER_MAGIC);
2749
2750 ops->cgroup_layout = CGROUP_LAYOUT_UNIFIED;
2751 return CGROUP2_SUPER_MAGIC;
2752 }
2753
2754 static bool cg_init(struct cgroup_ops *ops, struct lxc_conf *conf)
2755 {
2756 int ret;
2757 const char *tmp;
2758 bool relative = conf->cgroup_meta.relative;
2759
2760 tmp = lxc_global_config_value("lxc.cgroup.use");
2761 if (tmp) {
2762 char *chop, *cur, *pin;
2763
2764 pin = must_copy_string(tmp);
2765 chop = pin;
2766
2767 lxc_iterate_parts(cur, chop, ",") {
2768 must_append_string(&ops->cgroup_use, cur);
2769 }
2770
2771 free(pin);
2772 }
2773
2774 ret = cg_unified_init(ops, relative);
2775 if (ret < 0)
2776 return false;
2777
2778 if (ret == CGROUP2_SUPER_MAGIC)
2779 return true;
2780
2781 return cg_hybrid_init(ops, relative);
2782 }
2783
2784 __cgfsng_ops static bool cgfsng_data_init(struct cgroup_ops *ops)
2785 {
2786 const char *cgroup_pattern;
2787
2788 /* copy system-wide cgroup information */
2789 cgroup_pattern = lxc_global_config_value("lxc.cgroup.pattern");
2790 if (!cgroup_pattern) {
2791 /* lxc.cgroup.pattern is only NULL on error. */
2792 ERROR("Failed to retrieve cgroup pattern");
2793 return false;
2794 }
2795 ops->cgroup_pattern = must_copy_string(cgroup_pattern);
2796 ops->monitor_pattern = MONITOR_CGROUP;
2797
2798 return true;
2799 }
2800
2801 struct cgroup_ops *cgfsng_ops_init(struct lxc_conf *conf)
2802 {
2803 struct cgroup_ops *cgfsng_ops;
2804
2805 cgfsng_ops = malloc(sizeof(struct cgroup_ops));
2806 if (!cgfsng_ops)
2807 return NULL;
2808
2809 memset(cgfsng_ops, 0, sizeof(struct cgroup_ops));
2810 cgfsng_ops->cgroup_layout = CGROUP_LAYOUT_UNKNOWN;
2811
2812 if (!cg_init(cgfsng_ops, conf)) {
2813 free(cgfsng_ops);
2814 return NULL;
2815 }
2816
2817 cgfsng_ops->data_init = cgfsng_data_init;
2818 cgfsng_ops->payload_destroy = cgfsng_payload_destroy;
2819 cgfsng_ops->monitor_destroy = cgfsng_monitor_destroy;
2820 cgfsng_ops->monitor_create = cgfsng_monitor_create;
2821 cgfsng_ops->monitor_enter = cgfsng_monitor_enter;
2822 cgfsng_ops->payload_create = cgfsng_payload_create;
2823 cgfsng_ops->payload_enter = cgfsng_payload_enter;
2824 cgfsng_ops->escape = cgfsng_escape;
2825 cgfsng_ops->num_hierarchies = cgfsng_num_hierarchies;
2826 cgfsng_ops->get_hierarchies = cgfsng_get_hierarchies;
2827 cgfsng_ops->get_cgroup = cgfsng_get_cgroup;
2828 cgfsng_ops->get = cgfsng_get;
2829 cgfsng_ops->set = cgfsng_set;
2830 cgfsng_ops->unfreeze = cgfsng_unfreeze;
2831 cgfsng_ops->setup_limits = cgfsng_setup_limits;
2832 cgfsng_ops->driver = "cgfsng";
2833 cgfsng_ops->version = "1.0.0";
2834 cgfsng_ops->attach = cgfsng_attach;
2835 cgfsng_ops->chown = cgfsng_chown;
2836 cgfsng_ops->mount = cgfsng_mount;
2837 cgfsng_ops->nrtasks = cgfsng_nrtasks;
2838
2839 return cgfsng_ops;
2840 }