2 This file is part of systemd.
4 Copyright 2013 Lennart Poettering
6 systemd is free software; you can redistribute it and/or modify it
7 under the terms of the GNU Lesser General Public License as published by
8 the Free Software Foundation; either version 2.1 of the License, or
9 (at your option) any later version.
11 systemd is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public License
17 along with systemd; If not, see <http://www.gnu.org/licenses/>.
23 #include "alloc-util.h"
24 #include "cgroup-util.h"
29 #include "parse-util.h"
30 #include "path-util.h"
31 #include "process-util.h"
33 #include "string-table.h"
34 #include "string-util.h"
36 #define CGROUP_CPU_QUOTA_PERIOD_USEC ((usec_t) 100 * USEC_PER_MSEC)
38 void cgroup_context_init(CGroupContext
*c
) {
41 /* Initialize everything to the kernel defaults, assuming the
42 * structure is preinitialized to 0 */
44 c
->cpu_shares
= CGROUP_CPU_SHARES_INVALID
;
45 c
->startup_cpu_shares
= CGROUP_CPU_SHARES_INVALID
;
46 c
->cpu_quota_per_sec_usec
= USEC_INFINITY
;
48 c
->memory_limit
= (uint64_t) -1;
50 c
->blockio_weight
= CGROUP_BLKIO_WEIGHT_INVALID
;
51 c
->startup_blockio_weight
= CGROUP_BLKIO_WEIGHT_INVALID
;
53 c
->tasks_max
= (uint64_t) -1;
56 void cgroup_context_free_device_allow(CGroupContext
*c
, CGroupDeviceAllow
*a
) {
60 LIST_REMOVE(device_allow
, c
->device_allow
, a
);
65 void cgroup_context_free_blockio_device_weight(CGroupContext
*c
, CGroupBlockIODeviceWeight
*w
) {
69 LIST_REMOVE(device_weights
, c
->blockio_device_weights
, w
);
74 void cgroup_context_free_blockio_device_bandwidth(CGroupContext
*c
, CGroupBlockIODeviceBandwidth
*b
) {
78 LIST_REMOVE(device_bandwidths
, c
->blockio_device_bandwidths
, b
);
83 void cgroup_context_done(CGroupContext
*c
) {
86 while (c
->blockio_device_weights
)
87 cgroup_context_free_blockio_device_weight(c
, c
->blockio_device_weights
);
89 while (c
->blockio_device_bandwidths
)
90 cgroup_context_free_blockio_device_bandwidth(c
, c
->blockio_device_bandwidths
);
92 while (c
->device_allow
)
93 cgroup_context_free_device_allow(c
, c
->device_allow
);
96 void cgroup_context_dump(CGroupContext
*c
, FILE* f
, const char *prefix
) {
97 CGroupBlockIODeviceBandwidth
*b
;
98 CGroupBlockIODeviceWeight
*w
;
100 char u
[FORMAT_TIMESPAN_MAX
];
105 prefix
= strempty(prefix
);
108 "%sCPUAccounting=%s\n"
109 "%sBlockIOAccounting=%s\n"
110 "%sMemoryAccounting=%s\n"
111 "%sTasksAccounting=%s\n"
112 "%sCPUShares=%" PRIu64
"\n"
113 "%sStartupCPUShares=%" PRIu64
"\n"
114 "%sCPUQuotaPerSecSec=%s\n"
115 "%sBlockIOWeight=%" PRIu64
"\n"
116 "%sStartupBlockIOWeight=%" PRIu64
"\n"
117 "%sMemoryLimit=%" PRIu64
"\n"
118 "%sTasksMax=%" PRIu64
"\n"
119 "%sDevicePolicy=%s\n"
121 prefix
, yes_no(c
->cpu_accounting
),
122 prefix
, yes_no(c
->blockio_accounting
),
123 prefix
, yes_no(c
->memory_accounting
),
124 prefix
, yes_no(c
->tasks_accounting
),
125 prefix
, c
->cpu_shares
,
126 prefix
, c
->startup_cpu_shares
,
127 prefix
, format_timespan(u
, sizeof(u
), c
->cpu_quota_per_sec_usec
, 1),
128 prefix
, c
->blockio_weight
,
129 prefix
, c
->startup_blockio_weight
,
130 prefix
, c
->memory_limit
,
131 prefix
, c
->tasks_max
,
132 prefix
, cgroup_device_policy_to_string(c
->device_policy
),
133 prefix
, yes_no(c
->delegate
));
135 LIST_FOREACH(device_allow
, a
, c
->device_allow
)
137 "%sDeviceAllow=%s %s%s%s\n",
140 a
->r
? "r" : "", a
->w
? "w" : "", a
->m
? "m" : "");
142 LIST_FOREACH(device_weights
, w
, c
->blockio_device_weights
)
144 "%sBlockIODeviceWeight=%s %" PRIu64
,
149 LIST_FOREACH(device_bandwidths
, b
, c
->blockio_device_bandwidths
) {
150 char buf
[FORMAT_BYTES_MAX
];
155 b
->read
? "BlockIOReadBandwidth" : "BlockIOWriteBandwidth",
157 format_bytes(buf
, sizeof(buf
), b
->bandwidth
));
161 static int lookup_blkio_device(const char *p
, dev_t
*dev
) {
170 return log_warning_errno(errno
, "Couldn't stat device %s: %m", p
);
172 if (S_ISBLK(st
.st_mode
))
174 else if (major(st
.st_dev
) != 0) {
175 /* If this is not a device node then find the block
176 * device this file is stored on */
179 /* If this is a partition, try to get the originating
181 block_get_whole_disk(*dev
, dev
);
183 log_warning("%s is not a block device and file system block device cannot be determined or is not local.", p
);
190 static int whitelist_device(const char *path
, const char *node
, const char *acc
) {
191 char buf
[2+DECIMAL_STR_MAX(dev_t
)*2+2+4];
198 if (stat(node
, &st
) < 0) {
199 log_warning("Couldn't stat device %s", node
);
203 if (!S_ISCHR(st
.st_mode
) && !S_ISBLK(st
.st_mode
)) {
204 log_warning("%s is not a device.", node
);
210 S_ISCHR(st
.st_mode
) ? 'c' : 'b',
211 major(st
.st_rdev
), minor(st
.st_rdev
),
214 r
= cg_set_attribute("devices", path
, "devices.allow", buf
);
216 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
, -EINVAL
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
217 "Failed to set devices.allow on %s: %m", path
);
222 static int whitelist_major(const char *path
, const char *name
, char type
, const char *acc
) {
223 _cleanup_fclose_
FILE *f
= NULL
;
230 assert(type
== 'b' || type
== 'c');
232 f
= fopen("/proc/devices", "re");
234 return log_warning_errno(errno
, "Cannot open /proc/devices to resolve %s (%c): %m", name
, type
);
236 FOREACH_LINE(line
, f
, goto fail
) {
237 char buf
[2+DECIMAL_STR_MAX(unsigned)+3+4], *p
, *w
;
242 if (type
== 'c' && streq(line
, "Character devices:")) {
247 if (type
== 'b' && streq(line
, "Block devices:")) {
262 w
= strpbrk(p
, WHITESPACE
);
267 r
= safe_atou(p
, &maj
);
274 w
+= strspn(w
, WHITESPACE
);
276 if (fnmatch(name
, w
, 0) != 0)
285 r
= cg_set_attribute("devices", path
, "devices.allow", buf
);
287 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
, -EINVAL
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
288 "Failed to set devices.allow on %s: %m", path
);
294 log_warning_errno(errno
, "Failed to read /proc/devices: %m");
298 void cgroup_context_apply(CGroupContext
*c
, CGroupMask mask
, const char *path
, ManagerState state
) {
308 /* Some cgroup attributes are not supported on the root cgroup,
309 * hence silently ignore */
310 is_root
= isempty(path
) || path_equal(path
, "/");
312 /* Make sure we don't try to display messages with an empty path. */
315 /* We generally ignore errors caused by read-only mounted
316 * cgroup trees (assuming we are running in a container then),
317 * and missing cgroups, i.e. EROFS and ENOENT. */
319 if ((mask
& CGROUP_MASK_CPU
) && !is_root
) {
320 char buf
[MAX(DECIMAL_STR_MAX(uint64_t), DECIMAL_STR_MAX(usec_t
)) + 1];
322 sprintf(buf
, "%" PRIu64
"\n",
323 IN_SET(state
, MANAGER_STARTING
, MANAGER_INITIALIZING
) && c
->startup_cpu_shares
!= CGROUP_CPU_SHARES_INVALID
? c
->startup_cpu_shares
:
324 c
->cpu_shares
!= CGROUP_CPU_SHARES_INVALID
? c
->cpu_shares
: CGROUP_CPU_SHARES_DEFAULT
);
325 r
= cg_set_attribute("cpu", path
, "cpu.shares", buf
);
327 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
328 "Failed to set cpu.shares on %s: %m", path
);
330 sprintf(buf
, USEC_FMT
"\n", CGROUP_CPU_QUOTA_PERIOD_USEC
);
331 r
= cg_set_attribute("cpu", path
, "cpu.cfs_period_us", buf
);
333 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
334 "Failed to set cpu.cfs_period_us on %s: %m", path
);
336 if (c
->cpu_quota_per_sec_usec
!= USEC_INFINITY
) {
337 sprintf(buf
, USEC_FMT
"\n", c
->cpu_quota_per_sec_usec
* CGROUP_CPU_QUOTA_PERIOD_USEC
/ USEC_PER_SEC
);
338 r
= cg_set_attribute("cpu", path
, "cpu.cfs_quota_us", buf
);
340 r
= cg_set_attribute("cpu", path
, "cpu.cfs_quota_us", "-1");
342 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
343 "Failed to set cpu.cfs_quota_us on %s: %m", path
);
346 if (mask
& CGROUP_MASK_BLKIO
) {
347 char buf
[MAX(DECIMAL_STR_MAX(uint64_t)+1,
348 DECIMAL_STR_MAX(dev_t
)*2+2+DECIMAL_STR_MAX(uint64_t)+1)];
349 CGroupBlockIODeviceWeight
*w
;
350 CGroupBlockIODeviceBandwidth
*b
;
353 sprintf(buf
, "%" PRIu64
"\n",
354 IN_SET(state
, MANAGER_STARTING
, MANAGER_INITIALIZING
) && c
->startup_blockio_weight
!= CGROUP_BLKIO_WEIGHT_INVALID
? c
->startup_blockio_weight
:
355 c
->blockio_weight
!= CGROUP_BLKIO_WEIGHT_INVALID
? c
->blockio_weight
: CGROUP_BLKIO_WEIGHT_DEFAULT
);
356 r
= cg_set_attribute("blkio", path
, "blkio.weight", buf
);
358 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
359 "Failed to set blkio.weight on %s: %m", path
);
361 /* FIXME: no way to reset this list */
362 LIST_FOREACH(device_weights
, w
, c
->blockio_device_weights
) {
365 r
= lookup_blkio_device(w
->path
, &dev
);
369 sprintf(buf
, "%u:%u %" PRIu64
"\n", major(dev
), minor(dev
), w
->weight
);
370 r
= cg_set_attribute("blkio", path
, "blkio.weight_device", buf
);
372 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
373 "Failed to set blkio.weight_device on %s: %m", path
);
377 /* FIXME: no way to reset this list */
378 LIST_FOREACH(device_bandwidths
, b
, c
->blockio_device_bandwidths
) {
382 r
= lookup_blkio_device(b
->path
, &dev
);
386 a
= b
->read
? "blkio.throttle.read_bps_device" : "blkio.throttle.write_bps_device";
388 sprintf(buf
, "%u:%u %" PRIu64
"\n", major(dev
), minor(dev
), b
->bandwidth
);
389 r
= cg_set_attribute("blkio", path
, a
, buf
);
391 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
392 "Failed to set %s on %s: %m", a
, path
);
396 if ((mask
& CGROUP_MASK_MEMORY
) && !is_root
) {
397 if (c
->memory_limit
!= (uint64_t) -1) {
398 char buf
[DECIMAL_STR_MAX(uint64_t) + 1];
400 sprintf(buf
, "%" PRIu64
"\n", c
->memory_limit
);
402 if (cg_unified() <= 0)
403 r
= cg_set_attribute("memory", path
, "memory.limit_in_bytes", buf
);
405 r
= cg_set_attribute("memory", path
, "memory.max", buf
);
408 if (cg_unified() <= 0)
409 r
= cg_set_attribute("memory", path
, "memory.limit_in_bytes", "-1");
411 r
= cg_set_attribute("memory", path
, "memory.max", "max");
415 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
416 "Failed to set memory.limit_in_bytes/memory.max on %s: %m", path
);
419 if ((mask
& CGROUP_MASK_DEVICES
) && !is_root
) {
420 CGroupDeviceAllow
*a
;
422 /* Changing the devices list of a populated cgroup
423 * might result in EINVAL, hence ignore EINVAL
426 if (c
->device_allow
|| c
->device_policy
!= CGROUP_AUTO
)
427 r
= cg_set_attribute("devices", path
, "devices.deny", "a");
429 r
= cg_set_attribute("devices", path
, "devices.allow", "a");
431 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
, -EINVAL
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
432 "Failed to reset devices.list on %s: %m", path
);
434 if (c
->device_policy
== CGROUP_CLOSED
||
435 (c
->device_policy
== CGROUP_AUTO
&& c
->device_allow
)) {
436 static const char auto_devices
[] =
437 "/dev/null\0" "rwm\0"
438 "/dev/zero\0" "rwm\0"
439 "/dev/full\0" "rwm\0"
440 "/dev/random\0" "rwm\0"
441 "/dev/urandom\0" "rwm\0"
443 "/dev/pts/ptmx\0" "rw\0"; /* /dev/pts/ptmx may not be duplicated, but accessed */
447 NULSTR_FOREACH_PAIR(x
, y
, auto_devices
)
448 whitelist_device(path
, x
, y
);
450 whitelist_major(path
, "pts", 'c', "rw");
451 whitelist_major(path
, "kdbus", 'c', "rw");
452 whitelist_major(path
, "kdbus/*", 'c', "rw");
455 LIST_FOREACH(device_allow
, a
, c
->device_allow
) {
471 if (startswith(a
->path
, "/dev/"))
472 whitelist_device(path
, a
->path
, acc
);
473 else if (startswith(a
->path
, "block-"))
474 whitelist_major(path
, a
->path
+ 6, 'b', acc
);
475 else if (startswith(a
->path
, "char-"))
476 whitelist_major(path
, a
->path
+ 5, 'c', acc
);
478 log_debug("Ignoring device %s while writing cgroup attribute.", a
->path
);
482 if ((mask
& CGROUP_MASK_PIDS
) && !is_root
) {
484 if (c
->tasks_max
!= (uint64_t) -1) {
485 char buf
[DECIMAL_STR_MAX(uint64_t) + 2];
487 sprintf(buf
, "%" PRIu64
"\n", c
->tasks_max
);
488 r
= cg_set_attribute("pids", path
, "pids.max", buf
);
490 r
= cg_set_attribute("pids", path
, "pids.max", "max");
493 log_full_errno(IN_SET(r
, -ENOENT
, -EROFS
, -EACCES
) ? LOG_DEBUG
: LOG_WARNING
, r
,
494 "Failed to set pids.max on %s: %m", path
);
498 CGroupMask
cgroup_context_get_mask(CGroupContext
*c
) {
501 /* Figure out which controllers we need */
503 if (c
->cpu_accounting
||
504 c
->cpu_shares
!= CGROUP_CPU_SHARES_INVALID
||
505 c
->startup_cpu_shares
!= CGROUP_CPU_SHARES_INVALID
||
506 c
->cpu_quota_per_sec_usec
!= USEC_INFINITY
)
507 mask
|= CGROUP_MASK_CPUACCT
| CGROUP_MASK_CPU
;
509 if (c
->blockio_accounting
||
510 c
->blockio_weight
!= CGROUP_BLKIO_WEIGHT_INVALID
||
511 c
->startup_blockio_weight
!= CGROUP_BLKIO_WEIGHT_INVALID
||
512 c
->blockio_device_weights
||
513 c
->blockio_device_bandwidths
)
514 mask
|= CGROUP_MASK_BLKIO
;
516 if (c
->memory_accounting
||
517 c
->memory_limit
!= (uint64_t) -1)
518 mask
|= CGROUP_MASK_MEMORY
;
520 if (c
->device_allow
||
521 c
->device_policy
!= CGROUP_AUTO
)
522 mask
|= CGROUP_MASK_DEVICES
;
524 if (c
->tasks_accounting
||
525 c
->tasks_max
!= (uint64_t) -1)
526 mask
|= CGROUP_MASK_PIDS
;
531 CGroupMask
unit_get_own_mask(Unit
*u
) {
534 /* Returns the mask of controllers the unit needs for itself */
536 c
= unit_get_cgroup_context(u
);
540 /* If delegation is turned on, then turn on all cgroups,
541 * unless we are on the legacy hierarchy and the process we
542 * fork into it is known to drop privileges, and hence
543 * shouldn't get access to the controllers.
545 * Note that on the unified hierarchy it is safe to delegate
546 * controllers to unprivileged services. */
551 e
= unit_get_exec_context(u
);
553 exec_context_maintains_privileges(e
) ||
555 return _CGROUP_MASK_ALL
;
558 return cgroup_context_get_mask(c
);
561 CGroupMask
unit_get_members_mask(Unit
*u
) {
564 /* Returns the mask of controllers all of the unit's children
567 if (u
->cgroup_members_mask_valid
)
568 return u
->cgroup_members_mask
;
570 u
->cgroup_members_mask
= 0;
572 if (u
->type
== UNIT_SLICE
) {
576 SET_FOREACH(member
, u
->dependencies
[UNIT_BEFORE
], i
) {
581 if (UNIT_DEREF(member
->slice
) != u
)
584 u
->cgroup_members_mask
|=
585 unit_get_own_mask(member
) |
586 unit_get_members_mask(member
);
590 u
->cgroup_members_mask_valid
= true;
591 return u
->cgroup_members_mask
;
594 CGroupMask
unit_get_siblings_mask(Unit
*u
) {
597 /* Returns the mask of controllers all of the unit's siblings
598 * require, i.e. the members mask of the unit's parent slice
599 * if there is one. */
601 if (UNIT_ISSET(u
->slice
))
602 return unit_get_members_mask(UNIT_DEREF(u
->slice
));
604 return unit_get_own_mask(u
) | unit_get_members_mask(u
);
607 CGroupMask
unit_get_subtree_mask(Unit
*u
) {
609 /* Returns the mask of this subtree, meaning of the group
610 * itself and its children. */
612 return unit_get_own_mask(u
) | unit_get_members_mask(u
);
615 CGroupMask
unit_get_target_mask(Unit
*u
) {
618 /* This returns the cgroup mask of all controllers to enable
619 * for a specific cgroup, i.e. everything it needs itself,
620 * plus all that its children need, plus all that its siblings
621 * need. This is primarily useful on the legacy cgroup
622 * hierarchy, where we need to duplicate each cgroup in each
623 * hierarchy that shall be enabled for it. */
625 mask
= unit_get_own_mask(u
) | unit_get_members_mask(u
) | unit_get_siblings_mask(u
);
626 mask
&= u
->manager
->cgroup_supported
;
631 CGroupMask
unit_get_enable_mask(Unit
*u
) {
634 /* This returns the cgroup mask of all controllers to enable
635 * for the children of a specific cgroup. This is primarily
636 * useful for the unified cgroup hierarchy, where each cgroup
637 * controls which controllers are enabled for its children. */
639 mask
= unit_get_members_mask(u
);
640 mask
&= u
->manager
->cgroup_supported
;
645 /* Recurse from a unit up through its containing slices, propagating
646 * mask bits upward. A unit is also member of itself. */
647 void unit_update_cgroup_members_masks(Unit
*u
) {
653 /* Calculate subtree mask */
654 m
= unit_get_subtree_mask(u
);
656 /* See if anything changed from the previous invocation. If
657 * not, we're done. */
658 if (u
->cgroup_subtree_mask_valid
&& m
== u
->cgroup_subtree_mask
)
662 u
->cgroup_subtree_mask_valid
&&
663 ((m
& ~u
->cgroup_subtree_mask
) != 0) &&
664 ((~m
& u
->cgroup_subtree_mask
) == 0);
666 u
->cgroup_subtree_mask
= m
;
667 u
->cgroup_subtree_mask_valid
= true;
669 if (UNIT_ISSET(u
->slice
)) {
670 Unit
*s
= UNIT_DEREF(u
->slice
);
673 /* There's more set now than before. We
674 * propagate the new mask to the parent's mask
675 * (not caring if it actually was valid or
678 s
->cgroup_members_mask
|= m
;
681 /* There's less set now than before (or we
682 * don't know), we need to recalculate
683 * everything, so let's invalidate the
684 * parent's members mask */
686 s
->cgroup_members_mask_valid
= false;
688 /* And now make sure that this change also hits our
690 unit_update_cgroup_members_masks(s
);
694 static const char *migrate_callback(CGroupMask mask
, void *userdata
) {
701 if (u
->cgroup_path
&&
702 u
->cgroup_realized
&&
703 (u
->cgroup_realized_mask
& mask
) == mask
)
704 return u
->cgroup_path
;
706 u
= UNIT_DEREF(u
->slice
);
712 char *unit_default_cgroup_path(Unit
*u
) {
713 _cleanup_free_
char *escaped
= NULL
, *slice
= NULL
;
718 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
))
719 return strdup(u
->manager
->cgroup_root
);
721 if (UNIT_ISSET(u
->slice
) && !unit_has_name(UNIT_DEREF(u
->slice
), SPECIAL_ROOT_SLICE
)) {
722 r
= cg_slice_to_path(UNIT_DEREF(u
->slice
)->id
, &slice
);
727 escaped
= cg_escape(u
->id
);
732 return strjoin(u
->manager
->cgroup_root
, "/", slice
, "/", escaped
, NULL
);
734 return strjoin(u
->manager
->cgroup_root
, "/", escaped
, NULL
);
737 int unit_set_cgroup_path(Unit
*u
, const char *path
) {
738 _cleanup_free_
char *p
= NULL
;
750 if (streq_ptr(u
->cgroup_path
, p
))
754 r
= hashmap_put(u
->manager
->cgroup_unit
, p
, u
);
759 unit_release_cgroup(u
);
767 int unit_watch_cgroup(Unit
*u
) {
768 _cleanup_free_
char *populated
= NULL
;
776 if (u
->cgroup_inotify_wd
>= 0)
779 /* Only applies to the unified hierarchy */
782 return log_unit_error_errno(u
, r
, "Failed detect wether the unified hierarchy is used: %m");
786 /* Don't watch the root slice, it's pointless. */
787 if (unit_has_name(u
, SPECIAL_ROOT_SLICE
))
790 r
= hashmap_ensure_allocated(&u
->manager
->cgroup_inotify_wd_unit
, &trivial_hash_ops
);
794 r
= cg_get_path(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, "cgroup.populated", &populated
);
798 u
->cgroup_inotify_wd
= inotify_add_watch(u
->manager
->cgroup_inotify_fd
, populated
, IN_MODIFY
);
799 if (u
->cgroup_inotify_wd
< 0) {
801 if (errno
== ENOENT
) /* If the directory is already
802 * gone we don't need to track
803 * it, so this is not an error */
806 return log_unit_error_errno(u
, errno
, "Failed to add inotify watch descriptor for control group %s: %m", u
->cgroup_path
);
809 r
= hashmap_put(u
->manager
->cgroup_inotify_wd_unit
, INT_TO_PTR(u
->cgroup_inotify_wd
), u
);
811 return log_unit_error_errno(u
, r
, "Failed to add inotify watch descriptor to hash map: %m");
816 static int unit_create_cgroup(
818 CGroupMask target_mask
,
819 CGroupMask enable_mask
) {
826 c
= unit_get_cgroup_context(u
);
830 if (!u
->cgroup_path
) {
831 _cleanup_free_
char *path
= NULL
;
833 path
= unit_default_cgroup_path(u
);
837 r
= unit_set_cgroup_path(u
, path
);
839 return log_unit_error_errno(u
, r
, "Control group %s exists already.", path
);
841 return log_unit_error_errno(u
, r
, "Failed to set unit's control group path to %s: %m", path
);
844 /* First, create our own group */
845 r
= cg_create_everywhere(u
->manager
->cgroup_supported
, target_mask
, u
->cgroup_path
);
847 return log_unit_error_errno(u
, r
, "Failed to create cgroup %s: %m", u
->cgroup_path
);
849 /* Start watching it */
850 (void) unit_watch_cgroup(u
);
852 /* Enable all controllers we need */
853 r
= cg_enable_everywhere(u
->manager
->cgroup_supported
, enable_mask
, u
->cgroup_path
);
855 log_unit_warning_errno(u
, r
, "Failed to enable controllers on cgroup %s, ignoring: %m", u
->cgroup_path
);
857 /* Keep track that this is now realized */
858 u
->cgroup_realized
= true;
859 u
->cgroup_realized_mask
= target_mask
;
861 if (u
->type
!= UNIT_SLICE
&& !c
->delegate
) {
863 /* Then, possibly move things over, but not if
864 * subgroups may contain processes, which is the case
865 * for slice and delegation units. */
866 r
= cg_migrate_everywhere(u
->manager
->cgroup_supported
, u
->cgroup_path
, u
->cgroup_path
, migrate_callback
, u
);
868 log_unit_warning_errno(u
, r
, "Failed to migrate cgroup from to %s, ignoring: %m", u
->cgroup_path
);
874 int unit_attach_pids_to_cgroup(Unit
*u
) {
878 r
= unit_realize_cgroup(u
);
882 r
= cg_attach_many_everywhere(u
->manager
->cgroup_supported
, u
->cgroup_path
, u
->pids
, migrate_callback
, u
);
889 static bool unit_has_mask_realized(Unit
*u
, CGroupMask target_mask
) {
892 return u
->cgroup_realized
&& u
->cgroup_realized_mask
== target_mask
;
895 /* Check if necessary controllers and attributes for a unit are in place.
898 * If not, create paths, move processes over, and set attributes.
900 * Returns 0 on success and < 0 on failure. */
901 static int unit_realize_cgroup_now(Unit
*u
, ManagerState state
) {
902 CGroupMask target_mask
, enable_mask
;
907 if (u
->in_cgroup_queue
) {
908 LIST_REMOVE(cgroup_queue
, u
->manager
->cgroup_queue
, u
);
909 u
->in_cgroup_queue
= false;
912 target_mask
= unit_get_target_mask(u
);
913 if (unit_has_mask_realized(u
, target_mask
))
916 /* First, realize parents */
917 if (UNIT_ISSET(u
->slice
)) {
918 r
= unit_realize_cgroup_now(UNIT_DEREF(u
->slice
), state
);
923 /* And then do the real work */
924 enable_mask
= unit_get_enable_mask(u
);
925 r
= unit_create_cgroup(u
, target_mask
, enable_mask
);
929 /* Finally, apply the necessary attributes. */
930 cgroup_context_apply(unit_get_cgroup_context(u
), target_mask
, u
->cgroup_path
, state
);
935 static void unit_add_to_cgroup_queue(Unit
*u
) {
937 if (u
->in_cgroup_queue
)
940 LIST_PREPEND(cgroup_queue
, u
->manager
->cgroup_queue
, u
);
941 u
->in_cgroup_queue
= true;
944 unsigned manager_dispatch_cgroup_queue(Manager
*m
) {
950 state
= manager_state(m
);
952 while ((i
= m
->cgroup_queue
)) {
953 assert(i
->in_cgroup_queue
);
955 r
= unit_realize_cgroup_now(i
, state
);
957 log_warning_errno(r
, "Failed to realize cgroups for queued unit %s, ignoring: %m", i
->id
);
965 static void unit_queue_siblings(Unit
*u
) {
968 /* This adds the siblings of the specified unit and the
969 * siblings of all parent units to the cgroup queue. (But
970 * neither the specified unit itself nor the parents.) */
972 while ((slice
= UNIT_DEREF(u
->slice
))) {
976 SET_FOREACH(m
, slice
->dependencies
[UNIT_BEFORE
], i
) {
980 /* Skip units that have a dependency on the slice
981 * but aren't actually in it. */
982 if (UNIT_DEREF(m
->slice
) != slice
)
985 /* No point in doing cgroup application for units
986 * without active processes. */
987 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(m
)))
990 /* If the unit doesn't need any new controllers
991 * and has current ones realized, it doesn't need
993 if (unit_has_mask_realized(m
, unit_get_target_mask(m
)))
996 unit_add_to_cgroup_queue(m
);
1003 int unit_realize_cgroup(Unit
*u
) {
1006 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
1009 /* So, here's the deal: when realizing the cgroups for this
1010 * unit, we need to first create all parents, but there's more
1011 * actually: for the weight-based controllers we also need to
1012 * make sure that all our siblings (i.e. units that are in the
1013 * same slice as we are) have cgroups, too. Otherwise, things
1014 * would become very uneven as each of their processes would
1015 * get as much resources as all our group together. This call
1016 * will synchronously create the parent cgroups, but will
1017 * defer work on the siblings to the next event loop
1020 /* Add all sibling slices to the cgroup queue. */
1021 unit_queue_siblings(u
);
1023 /* And realize this one now (and apply the values) */
1024 return unit_realize_cgroup_now(u
, manager_state(u
->manager
));
1027 void unit_release_cgroup(Unit
*u
) {
1030 /* Forgets all cgroup details for this cgroup */
1032 if (u
->cgroup_path
) {
1033 (void) hashmap_remove(u
->manager
->cgroup_unit
, u
->cgroup_path
);
1034 u
->cgroup_path
= mfree(u
->cgroup_path
);
1037 if (u
->cgroup_inotify_wd
>= 0) {
1038 if (inotify_rm_watch(u
->manager
->cgroup_inotify_fd
, u
->cgroup_inotify_wd
) < 0)
1039 log_unit_debug_errno(u
, errno
, "Failed to remove cgroup inotify watch %i for %s, ignoring", u
->cgroup_inotify_wd
, u
->id
);
1041 (void) hashmap_remove(u
->manager
->cgroup_inotify_wd_unit
, INT_TO_PTR(u
->cgroup_inotify_wd
));
1042 u
->cgroup_inotify_wd
= -1;
1046 void unit_prune_cgroup(Unit
*u
) {
1052 /* Removes the cgroup, if empty and possible, and stops watching it. */
1054 if (!u
->cgroup_path
)
1057 is_root_slice
= unit_has_name(u
, SPECIAL_ROOT_SLICE
);
1059 r
= cg_trim_everywhere(u
->manager
->cgroup_supported
, u
->cgroup_path
, !is_root_slice
);
1061 log_debug_errno(r
, "Failed to destroy cgroup %s, ignoring: %m", u
->cgroup_path
);
1068 unit_release_cgroup(u
);
1070 u
->cgroup_realized
= false;
1071 u
->cgroup_realized_mask
= 0;
1074 int unit_search_main_pid(Unit
*u
, pid_t
*ret
) {
1075 _cleanup_fclose_
FILE *f
= NULL
;
1076 pid_t pid
= 0, npid
, mypid
;
1082 if (!u
->cgroup_path
)
1085 r
= cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
, &f
);
1090 while (cg_read_pid(f
, &npid
) > 0) {
1096 /* Ignore processes that aren't our kids */
1097 if (get_process_ppid(npid
, &ppid
) >= 0 && ppid
!= mypid
)
1101 /* Dang, there's more than one daemonized PID
1102 in this group, so we don't know what process
1103 is the main process. */
1114 static int unit_watch_pids_in_path(Unit
*u
, const char *path
) {
1115 _cleanup_closedir_
DIR *d
= NULL
;
1116 _cleanup_fclose_
FILE *f
= NULL
;
1122 r
= cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER
, path
, &f
);
1128 while ((r
= cg_read_pid(f
, &pid
)) > 0) {
1129 r
= unit_watch_pid(u
, pid
);
1130 if (r
< 0 && ret
>= 0)
1134 if (r
< 0 && ret
>= 0)
1138 r
= cg_enumerate_subgroups(SYSTEMD_CGROUP_CONTROLLER
, path
, &d
);
1145 while ((r
= cg_read_subgroup(d
, &fn
)) > 0) {
1146 _cleanup_free_
char *p
= NULL
;
1148 p
= strjoin(path
, "/", fn
, NULL
);
1154 r
= unit_watch_pids_in_path(u
, p
);
1155 if (r
< 0 && ret
>= 0)
1159 if (r
< 0 && ret
>= 0)
1166 int unit_watch_all_pids(Unit
*u
) {
1169 /* Adds all PIDs from our cgroup to the set of PIDs we
1170 * watch. This is a fallback logic for cases where we do not
1171 * get reliable cgroup empty notifications: we try to use
1172 * SIGCHLD as replacement. */
1174 if (!u
->cgroup_path
)
1177 if (cg_unified() > 0) /* On unified we can use proper notifications */
1180 return unit_watch_pids_in_path(u
, u
->cgroup_path
);
1183 int unit_notify_cgroup_empty(Unit
*u
) {
1188 if (!u
->cgroup_path
)
1191 r
= cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER
, u
->cgroup_path
);
1195 unit_add_to_gc_queue(u
);
1197 if (UNIT_VTABLE(u
)->notify_cgroup_empty
)
1198 UNIT_VTABLE(u
)->notify_cgroup_empty(u
);
1203 static int on_cgroup_inotify_event(sd_event_source
*s
, int fd
, uint32_t revents
, void *userdata
) {
1204 Manager
*m
= userdata
;
1211 union inotify_event_buffer buffer
;
1212 struct inotify_event
*e
;
1215 l
= read(fd
, &buffer
, sizeof(buffer
));
1217 if (errno
== EINTR
|| errno
== EAGAIN
)
1220 return log_error_errno(errno
, "Failed to read control group inotify events: %m");
1223 FOREACH_INOTIFY_EVENT(e
, buffer
, l
) {
1227 /* Queue overflow has no watch descriptor */
1230 if (e
->mask
& IN_IGNORED
)
1231 /* The watch was just removed */
1234 u
= hashmap_get(m
->cgroup_inotify_wd_unit
, INT_TO_PTR(e
->wd
));
1235 if (!u
) /* Not that inotify might deliver
1236 * events for a watch even after it
1237 * was removed, because it was queued
1238 * before the removal. Let's ignore
1239 * this here safely. */
1242 (void) unit_notify_cgroup_empty(u
);
1247 int manager_setup_cgroup(Manager
*m
) {
1248 _cleanup_free_
char *path
= NULL
;
1255 /* 1. Determine hierarchy */
1256 m
->cgroup_root
= mfree(m
->cgroup_root
);
1257 r
= cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER
, 0, &m
->cgroup_root
);
1259 return log_error_errno(r
, "Cannot determine cgroup we are running in: %m");
1261 /* Chop off the init scope, if we are already located in it */
1262 e
= endswith(m
->cgroup_root
, "/" SPECIAL_INIT_SCOPE
);
1264 /* LEGACY: Also chop off the system slice if we are in
1265 * it. This is to support live upgrades from older systemd
1266 * versions where PID 1 was moved there. Also see
1267 * cg_get_root_path(). */
1268 if (!e
&& m
->running_as
== MANAGER_SYSTEM
) {
1269 e
= endswith(m
->cgroup_root
, "/" SPECIAL_SYSTEM_SLICE
);
1271 e
= endswith(m
->cgroup_root
, "/system"); /* even more legacy */
1276 /* And make sure to store away the root value without trailing
1277 * slash, even for the root dir, so that we can easily prepend
1279 while ((e
= endswith(m
->cgroup_root
, "/")))
1283 r
= cg_get_path(SYSTEMD_CGROUP_CONTROLLER
, m
->cgroup_root
, NULL
, &path
);
1285 return log_error_errno(r
, "Cannot find cgroup mount point: %m");
1287 unified
= cg_unified();
1289 return log_error_errno(r
, "Couldn't determine if we are running in the unified hierarchy: %m");
1291 log_debug("Unified cgroup hierarchy is located at %s.", path
);
1293 log_debug("Using cgroup controller " SYSTEMD_CGROUP_CONTROLLER
". File system hierarchy is at %s.", path
);
1296 const char *scope_path
;
1298 /* 3. Install agent */
1301 /* In the unified hierarchy we can can get
1302 * cgroup empty notifications via inotify. */
1304 m
->cgroup_inotify_event_source
= sd_event_source_unref(m
->cgroup_inotify_event_source
);
1305 safe_close(m
->cgroup_inotify_fd
);
1307 m
->cgroup_inotify_fd
= inotify_init1(IN_NONBLOCK
|IN_CLOEXEC
);
1308 if (m
->cgroup_inotify_fd
< 0)
1309 return log_error_errno(errno
, "Failed to create control group inotify object: %m");
1311 r
= sd_event_add_io(m
->event
, &m
->cgroup_inotify_event_source
, m
->cgroup_inotify_fd
, EPOLLIN
, on_cgroup_inotify_event
, m
);
1313 return log_error_errno(r
, "Failed to watch control group inotify object: %m");
1315 r
= sd_event_source_set_priority(m
->cgroup_inotify_event_source
, SD_EVENT_PRIORITY_IDLE
- 5);
1317 return log_error_errno(r
, "Failed to set priority of inotify event source: %m");
1319 (void) sd_event_source_set_description(m
->cgroup_inotify_event_source
, "cgroup-inotify");
1321 } else if (m
->running_as
== MANAGER_SYSTEM
) {
1323 /* On the legacy hierarchy we only get
1324 * notifications via cgroup agents. (Which
1325 * isn't really reliable, since it does not
1326 * generate events when control groups with
1327 * children run empty. */
1329 r
= cg_install_release_agent(SYSTEMD_CGROUP_CONTROLLER
, SYSTEMD_CGROUP_AGENT_PATH
);
1331 log_warning_errno(r
, "Failed to install release agent, ignoring: %m");
1333 log_debug("Installed release agent.");
1335 log_debug("Release agent already installed.");
1338 /* 4. Make sure we are in the special "init.scope" unit in the root slice. */
1339 scope_path
= strjoina(m
->cgroup_root
, "/" SPECIAL_INIT_SCOPE
);
1340 r
= cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER
, scope_path
, 0);
1342 return log_error_errno(r
, "Failed to create %s control group: %m", scope_path
);
1344 /* also, move all other userspace processes remaining
1345 * in the root cgroup into that scope. */
1346 r
= cg_migrate(SYSTEMD_CGROUP_CONTROLLER
, m
->cgroup_root
, SYSTEMD_CGROUP_CONTROLLER
, scope_path
, false);
1348 log_warning_errno(r
, "Couldn't move remaining userspace processes, ignoring: %m");
1350 /* 5. And pin it, so that it cannot be unmounted */
1351 safe_close(m
->pin_cgroupfs_fd
);
1352 m
->pin_cgroupfs_fd
= open(path
, O_RDONLY
|O_CLOEXEC
|O_DIRECTORY
|O_NOCTTY
|O_NONBLOCK
);
1353 if (m
->pin_cgroupfs_fd
< 0)
1354 return log_error_errno(errno
, "Failed to open pin file: %m");
1356 /* 6. Always enable hierarchical support if it exists... */
1358 (void) cg_set_attribute("memory", "/", "memory.use_hierarchy", "1");
1361 /* 7. Figure out which controllers are supported */
1362 r
= cg_mask_supported(&m
->cgroup_supported
);
1364 return log_error_errno(r
, "Failed to determine supported controllers: %m");
1366 for (c
= 0; c
< _CGROUP_CONTROLLER_MAX
; c
++)
1367 log_debug("Controller '%s' supported: %s", cgroup_controller_to_string(c
), yes_no(m
->cgroup_supported
& c
));
1372 void manager_shutdown_cgroup(Manager
*m
, bool delete) {
1375 /* We can't really delete the group, since we are in it. But
1377 if (delete && m
->cgroup_root
)
1378 (void) cg_trim(SYSTEMD_CGROUP_CONTROLLER
, m
->cgroup_root
, false);
1380 m
->cgroup_inotify_wd_unit
= hashmap_free(m
->cgroup_inotify_wd_unit
);
1382 m
->cgroup_inotify_event_source
= sd_event_source_unref(m
->cgroup_inotify_event_source
);
1383 m
->cgroup_inotify_fd
= safe_close(m
->cgroup_inotify_fd
);
1385 m
->pin_cgroupfs_fd
= safe_close(m
->pin_cgroupfs_fd
);
1387 m
->cgroup_root
= mfree(m
->cgroup_root
);
1390 Unit
* manager_get_unit_by_cgroup(Manager
*m
, const char *cgroup
) {
1397 u
= hashmap_get(m
->cgroup_unit
, cgroup
);
1401 p
= strdupa(cgroup
);
1405 e
= strrchr(p
, '/');
1407 return hashmap_get(m
->cgroup_unit
, SPECIAL_ROOT_SLICE
);
1411 u
= hashmap_get(m
->cgroup_unit
, p
);
1417 Unit
*manager_get_unit_by_pid_cgroup(Manager
*m
, pid_t pid
) {
1418 _cleanup_free_
char *cgroup
= NULL
;
1426 r
= cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER
, pid
, &cgroup
);
1430 return manager_get_unit_by_cgroup(m
, cgroup
);
1433 Unit
*manager_get_unit_by_pid(Manager
*m
, pid_t pid
) {
1442 return hashmap_get(m
->units
, SPECIAL_INIT_SCOPE
);
1444 u
= hashmap_get(m
->watch_pids1
, PID_TO_PTR(pid
));
1448 u
= hashmap_get(m
->watch_pids2
, PID_TO_PTR(pid
));
1452 return manager_get_unit_by_pid_cgroup(m
, pid
);
1455 int manager_notify_cgroup_empty(Manager
*m
, const char *cgroup
) {
1461 u
= manager_get_unit_by_cgroup(m
, cgroup
);
1465 return unit_notify_cgroup_empty(u
);
1468 int unit_get_memory_current(Unit
*u
, uint64_t *ret
) {
1469 _cleanup_free_
char *v
= NULL
;
1475 if (!u
->cgroup_path
)
1478 if ((u
->cgroup_realized_mask
& CGROUP_MASK_MEMORY
) == 0)
1481 if (cg_unified() <= 0)
1482 r
= cg_get_attribute("memory", u
->cgroup_path
, "memory.usage_in_bytes", &v
);
1484 r
= cg_get_attribute("memory", u
->cgroup_path
, "memory.current", &v
);
1490 return safe_atou64(v
, ret
);
1493 int unit_get_tasks_current(Unit
*u
, uint64_t *ret
) {
1494 _cleanup_free_
char *v
= NULL
;
1500 if (!u
->cgroup_path
)
1503 if ((u
->cgroup_realized_mask
& CGROUP_MASK_PIDS
) == 0)
1506 r
= cg_get_attribute("pids", u
->cgroup_path
, "pids.current", &v
);
1512 return safe_atou64(v
, ret
);
1515 static int unit_get_cpu_usage_raw(Unit
*u
, nsec_t
*ret
) {
1516 _cleanup_free_
char *v
= NULL
;
1523 if (!u
->cgroup_path
)
1526 if ((u
->cgroup_realized_mask
& CGROUP_MASK_CPUACCT
) == 0)
1529 r
= cg_get_attribute("cpuacct", u
->cgroup_path
, "cpuacct.usage", &v
);
1535 r
= safe_atou64(v
, &ns
);
1543 int unit_get_cpu_usage(Unit
*u
, nsec_t
*ret
) {
1547 r
= unit_get_cpu_usage_raw(u
, &ns
);
1551 if (ns
> u
->cpuacct_usage_base
)
1552 ns
-= u
->cpuacct_usage_base
;
1560 int unit_reset_cpu_usage(Unit
*u
) {
1566 r
= unit_get_cpu_usage_raw(u
, &ns
);
1568 u
->cpuacct_usage_base
= 0;
1572 u
->cpuacct_usage_base
= ns
;
1576 bool unit_cgroup_delegate(Unit
*u
) {
1581 c
= unit_get_cgroup_context(u
);
1588 void unit_invalidate_cgroup(Unit
*u
, CGroupMask m
) {
1591 if (!UNIT_HAS_CGROUP_CONTEXT(u
))
1597 if ((u
->cgroup_realized_mask
& m
) == 0)
1600 u
->cgroup_realized_mask
&= ~m
;
1601 unit_add_to_cgroup_queue(u
);
1604 void manager_invalidate_startup_units(Manager
*m
) {
1610 SET_FOREACH(u
, m
->startup_units
, i
)
1611 unit_invalidate_cgroup(u
, CGROUP_MASK_CPU
|CGROUP_MASK_BLKIO
);
1614 static const char* const cgroup_device_policy_table
[_CGROUP_DEVICE_POLICY_MAX
] = {
1615 [CGROUP_AUTO
] = "auto",
1616 [CGROUP_CLOSED
] = "closed",
1617 [CGROUP_STRICT
] = "strict",
1620 DEFINE_STRING_TABLE_LOOKUP(cgroup_device_policy
, CGroupDevicePolicy
);