]> git.proxmox.com Git - systemd.git/blob - src/core/unit.c
Update upstream source from tag 'upstream/248'
[systemd.git] / src / core / unit.c
1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
2
3 #include <errno.h>
4 #include <stdlib.h>
5 #include <sys/prctl.h>
6 #include <unistd.h>
7
8 #include "sd-id128.h"
9 #include "sd-messages.h"
10
11 #include "all-units.h"
12 #include "alloc-util.h"
13 #include "bpf-firewall.h"
14 #include "bus-common-errors.h"
15 #include "bus-util.h"
16 #include "cgroup-setup.h"
17 #include "cgroup-util.h"
18 #include "core-varlink.h"
19 #include "dbus-unit.h"
20 #include "dbus.h"
21 #include "dropin.h"
22 #include "escape.h"
23 #include "execute.h"
24 #include "fd-util.h"
25 #include "fileio-label.h"
26 #include "fileio.h"
27 #include "format-util.h"
28 #include "id128-util.h"
29 #include "install.h"
30 #include "io-util.h"
31 #include "label.h"
32 #include "load-dropin.h"
33 #include "load-fragment.h"
34 #include "log.h"
35 #include "macro.h"
36 #include "missing_audit.h"
37 #include "mkdir.h"
38 #include "path-util.h"
39 #include "process-util.h"
40 #include "rm-rf.h"
41 #include "set.h"
42 #include "signal-util.h"
43 #include "sparse-endian.h"
44 #include "special.h"
45 #include "specifier.h"
46 #include "stat-util.h"
47 #include "stdio-util.h"
48 #include "string-table.h"
49 #include "string-util.h"
50 #include "strv.h"
51 #include "terminal-util.h"
52 #include "tmpfile-util.h"
53 #include "umask-util.h"
54 #include "unit-name.h"
55 #include "unit.h"
56 #include "user-util.h"
57 #include "virt.h"
58
59 /* Thresholds for logging at INFO level about resource consumption */
60 #define MENTIONWORTHY_CPU_NSEC (1 * NSEC_PER_SEC)
61 #define MENTIONWORTHY_IO_BYTES (1024 * 1024ULL)
62 #define MENTIONWORTHY_IP_BYTES (0ULL)
63
64 /* Thresholds for logging at INFO level about resource consumption */
65 #define NOTICEWORTHY_CPU_NSEC (10*60 * NSEC_PER_SEC) /* 10 minutes */
66 #define NOTICEWORTHY_IO_BYTES (10 * 1024 * 1024ULL) /* 10 MB */
67 #define NOTICEWORTHY_IP_BYTES (128 * 1024 * 1024ULL) /* 128 MB */
68
69 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
70 [UNIT_SERVICE] = &service_vtable,
71 [UNIT_SOCKET] = &socket_vtable,
72 [UNIT_TARGET] = &target_vtable,
73 [UNIT_DEVICE] = &device_vtable,
74 [UNIT_MOUNT] = &mount_vtable,
75 [UNIT_AUTOMOUNT] = &automount_vtable,
76 [UNIT_SWAP] = &swap_vtable,
77 [UNIT_TIMER] = &timer_vtable,
78 [UNIT_PATH] = &path_vtable,
79 [UNIT_SLICE] = &slice_vtable,
80 [UNIT_SCOPE] = &scope_vtable,
81 };
82
83 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency);
84
85 Unit* unit_new(Manager *m, size_t size) {
86 Unit *u;
87
88 assert(m);
89 assert(size >= sizeof(Unit));
90
91 u = malloc0(size);
92 if (!u)
93 return NULL;
94
95 u->manager = m;
96 u->type = _UNIT_TYPE_INVALID;
97 u->default_dependencies = true;
98 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
99 u->unit_file_preset = -1;
100 u->on_failure_job_mode = JOB_REPLACE;
101 u->cgroup_control_inotify_wd = -1;
102 u->cgroup_memory_inotify_wd = -1;
103 u->job_timeout = USEC_INFINITY;
104 u->job_running_timeout = USEC_INFINITY;
105 u->ref_uid = UID_INVALID;
106 u->ref_gid = GID_INVALID;
107 u->cpu_usage_last = NSEC_INFINITY;
108 u->cgroup_invalidated_mask |= CGROUP_MASK_BPF_FIREWALL;
109 u->failure_action_exit_status = u->success_action_exit_status = -1;
110
111 u->ip_accounting_ingress_map_fd = -1;
112 u->ip_accounting_egress_map_fd = -1;
113 u->ipv4_allow_map_fd = -1;
114 u->ipv6_allow_map_fd = -1;
115 u->ipv4_deny_map_fd = -1;
116 u->ipv6_deny_map_fd = -1;
117
118 u->last_section_private = -1;
119
120 u->start_ratelimit = (RateLimit) { m->default_start_limit_interval, m->default_start_limit_burst };
121 u->auto_stop_ratelimit = (RateLimit) { 10 * USEC_PER_SEC, 16 };
122
123 for (CGroupIOAccountingMetric i = 0; i < _CGROUP_IO_ACCOUNTING_METRIC_MAX; i++)
124 u->io_accounting_last[i] = UINT64_MAX;
125
126 return u;
127 }
128
129 int unit_new_for_name(Manager *m, size_t size, const char *name, Unit **ret) {
130 _cleanup_(unit_freep) Unit *u = NULL;
131 int r;
132
133 u = unit_new(m, size);
134 if (!u)
135 return -ENOMEM;
136
137 r = unit_add_name(u, name);
138 if (r < 0)
139 return r;
140
141 *ret = TAKE_PTR(u);
142
143 return r;
144 }
145
146 bool unit_has_name(const Unit *u, const char *name) {
147 assert(u);
148 assert(name);
149
150 return streq_ptr(name, u->id) ||
151 set_contains(u->aliases, name);
152 }
153
154 static void unit_init(Unit *u) {
155 CGroupContext *cc;
156 ExecContext *ec;
157 KillContext *kc;
158
159 assert(u);
160 assert(u->manager);
161 assert(u->type >= 0);
162
163 cc = unit_get_cgroup_context(u);
164 if (cc) {
165 cgroup_context_init(cc);
166
167 /* Copy in the manager defaults into the cgroup
168 * context, _before_ the rest of the settings have
169 * been initialized */
170
171 cc->cpu_accounting = u->manager->default_cpu_accounting;
172 cc->io_accounting = u->manager->default_io_accounting;
173 cc->blockio_accounting = u->manager->default_blockio_accounting;
174 cc->memory_accounting = u->manager->default_memory_accounting;
175 cc->tasks_accounting = u->manager->default_tasks_accounting;
176 cc->ip_accounting = u->manager->default_ip_accounting;
177
178 if (u->type != UNIT_SLICE)
179 cc->tasks_max = u->manager->default_tasks_max;
180 }
181
182 ec = unit_get_exec_context(u);
183 if (ec) {
184 exec_context_init(ec);
185
186 if (MANAGER_IS_SYSTEM(u->manager))
187 ec->keyring_mode = EXEC_KEYRING_SHARED;
188 else {
189 ec->keyring_mode = EXEC_KEYRING_INHERIT;
190
191 /* User manager might have its umask redefined by PAM or UMask=. In this
192 * case let the units it manages inherit this value by default. They can
193 * still tune this value through their own unit file */
194 (void) get_process_umask(getpid_cached(), &ec->umask);
195 }
196 }
197
198 kc = unit_get_kill_context(u);
199 if (kc)
200 kill_context_init(kc);
201
202 if (UNIT_VTABLE(u)->init)
203 UNIT_VTABLE(u)->init(u);
204 }
205
206 static int unit_add_alias(Unit *u, char *donated_name) {
207 int r;
208
209 /* Make sure that u->names is allocated. We may leave u->names
210 * empty if we fail later, but this is not a problem. */
211 r = set_ensure_put(&u->aliases, &string_hash_ops, donated_name);
212 if (r < 0)
213 return r;
214 assert(r > 0);
215
216 return 0;
217 }
218
219 int unit_add_name(Unit *u, const char *text) {
220 _cleanup_free_ char *name = NULL, *instance = NULL;
221 UnitType t;
222 int r;
223
224 assert(u);
225 assert(text);
226
227 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
228 if (!u->instance)
229 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
230 "instance is not set when adding name '%s': %m", text);
231
232 r = unit_name_replace_instance(text, u->instance, &name);
233 if (r < 0)
234 return log_unit_debug_errno(u, r,
235 "failed to build instance name from '%s': %m", text);
236 } else {
237 name = strdup(text);
238 if (!name)
239 return -ENOMEM;
240 }
241
242 if (unit_has_name(u, name))
243 return 0;
244
245 if (hashmap_contains(u->manager->units, name))
246 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EEXIST),
247 "unit already exist when adding name '%s': %m", name);
248
249 if (!unit_name_is_valid(name, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
250 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
251 "name '%s' is invalid: %m", name);
252
253 t = unit_name_to_type(name);
254 if (t < 0)
255 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
256 "failed to derive unit type from name '%s': %m", name);
257
258 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
259 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
260 "unit type is illegal: u->type(%d) and t(%d) for name '%s': %m",
261 u->type, t, name);
262
263 r = unit_name_to_instance(name, &instance);
264 if (r < 0)
265 return log_unit_debug_errno(u, r, "failed to extract instance from name '%s': %m", name);
266
267 if (instance && !unit_type_may_template(t))
268 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL), "templates are not allowed for name '%s': %m", name);
269
270 /* Ensure that this unit either has no instance, or that the instance matches. */
271 if (u->type != _UNIT_TYPE_INVALID && !streq_ptr(u->instance, instance))
272 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EINVAL),
273 "cannot add name %s, the instances don't match (\"%s\" != \"%s\").",
274 name, instance, u->instance);
275
276 if (u->id && !unit_type_may_alias(t))
277 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(EEXIST),
278 "cannot add name %s, aliases are not allowed for %s units.",
279 name, unit_type_to_string(t));
280
281 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
282 return log_unit_warning_errno(u, SYNTHETIC_ERRNO(E2BIG), "cannot add name, manager has too many units: %m");
283
284 /* Add name to the global hashmap first, because that's easier to undo */
285 r = hashmap_put(u->manager->units, name, u);
286 if (r < 0)
287 return log_unit_debug_errno(u, r, "add unit to hashmap failed for name '%s': %m", text);
288
289 if (u->id) {
290 r = unit_add_alias(u, name); /* unit_add_alias() takes ownership of the name on success */
291 if (r < 0) {
292 hashmap_remove(u->manager->units, name);
293 return r;
294 }
295 TAKE_PTR(name);
296
297 } else {
298 /* A new name, we don't need the set yet. */
299 assert(u->type == _UNIT_TYPE_INVALID);
300 assert(!u->instance);
301
302 u->type = t;
303 u->id = TAKE_PTR(name);
304 u->instance = TAKE_PTR(instance);
305
306 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
307 unit_init(u);
308 }
309
310 unit_add_to_dbus_queue(u);
311 return 0;
312 }
313
314 int unit_choose_id(Unit *u, const char *name) {
315 _cleanup_free_ char *t = NULL;
316 char *s;
317 int r;
318
319 assert(u);
320 assert(name);
321
322 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
323 if (!u->instance)
324 return -EINVAL;
325
326 r = unit_name_replace_instance(name, u->instance, &t);
327 if (r < 0)
328 return r;
329
330 name = t;
331 }
332
333 if (streq_ptr(u->id, name))
334 return 0; /* Nothing to do. */
335
336 /* Selects one of the aliases of this unit as the id */
337 s = set_get(u->aliases, (char*) name);
338 if (!s)
339 return -ENOENT;
340
341 if (u->id) {
342 r = set_remove_and_put(u->aliases, name, u->id);
343 if (r < 0)
344 return r;
345 } else
346 assert_se(set_remove(u->aliases, name)); /* see set_get() above… */
347
348 u->id = s; /* Old u->id is now stored in the set, and s is not stored anywhere */
349 unit_add_to_dbus_queue(u);
350
351 return 0;
352 }
353
354 int unit_set_description(Unit *u, const char *description) {
355 int r;
356
357 assert(u);
358
359 r = free_and_strdup(&u->description, empty_to_null(description));
360 if (r < 0)
361 return r;
362 if (r > 0)
363 unit_add_to_dbus_queue(u);
364
365 return 0;
366 }
367
368 bool unit_may_gc(Unit *u) {
369 UnitActiveState state;
370 int r;
371
372 assert(u);
373
374 /* Checks whether the unit is ready to be unloaded for garbage collection.
375 * Returns true when the unit may be collected, and false if there's some
376 * reason to keep it loaded.
377 *
378 * References from other units are *not* checked here. Instead, this is done
379 * in unit_gc_sweep(), but using markers to properly collect dependency loops.
380 */
381
382 if (u->job)
383 return false;
384
385 if (u->nop_job)
386 return false;
387
388 state = unit_active_state(u);
389
390 /* If the unit is inactive and failed and no job is queued for it, then release its runtime resources */
391 if (UNIT_IS_INACTIVE_OR_FAILED(state) &&
392 UNIT_VTABLE(u)->release_resources)
393 UNIT_VTABLE(u)->release_resources(u);
394
395 if (u->perpetual)
396 return false;
397
398 if (sd_bus_track_count(u->bus_track) > 0)
399 return false;
400
401 /* But we keep the unit object around for longer when it is referenced or configured to not be gc'ed */
402 switch (u->collect_mode) {
403
404 case COLLECT_INACTIVE:
405 if (state != UNIT_INACTIVE)
406 return false;
407
408 break;
409
410 case COLLECT_INACTIVE_OR_FAILED:
411 if (!IN_SET(state, UNIT_INACTIVE, UNIT_FAILED))
412 return false;
413
414 break;
415
416 default:
417 assert_not_reached("Unknown garbage collection mode");
418 }
419
420 if (u->cgroup_path) {
421 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
422 * around. Units with active processes should never be collected. */
423
424 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
425 if (r < 0)
426 log_unit_debug_errno(u, r, "Failed to determine whether cgroup %s is empty: %m", u->cgroup_path);
427 if (r <= 0)
428 return false;
429 }
430
431 if (UNIT_VTABLE(u)->may_gc && !UNIT_VTABLE(u)->may_gc(u))
432 return false;
433
434 return true;
435 }
436
437 void unit_add_to_load_queue(Unit *u) {
438 assert(u);
439 assert(u->type != _UNIT_TYPE_INVALID);
440
441 if (u->load_state != UNIT_STUB || u->in_load_queue)
442 return;
443
444 LIST_PREPEND(load_queue, u->manager->load_queue, u);
445 u->in_load_queue = true;
446 }
447
448 void unit_add_to_cleanup_queue(Unit *u) {
449 assert(u);
450
451 if (u->in_cleanup_queue)
452 return;
453
454 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
455 u->in_cleanup_queue = true;
456 }
457
458 void unit_add_to_gc_queue(Unit *u) {
459 assert(u);
460
461 if (u->in_gc_queue || u->in_cleanup_queue)
462 return;
463
464 if (!unit_may_gc(u))
465 return;
466
467 LIST_PREPEND(gc_queue, u->manager->gc_unit_queue, u);
468 u->in_gc_queue = true;
469 }
470
471 void unit_add_to_dbus_queue(Unit *u) {
472 assert(u);
473 assert(u->type != _UNIT_TYPE_INVALID);
474
475 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
476 return;
477
478 /* Shortcut things if nobody cares */
479 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
480 sd_bus_track_count(u->bus_track) <= 0 &&
481 set_isempty(u->manager->private_buses)) {
482 u->sent_dbus_new_signal = true;
483 return;
484 }
485
486 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
487 u->in_dbus_queue = true;
488 }
489
490 void unit_submit_to_stop_when_unneeded_queue(Unit *u) {
491 assert(u);
492
493 if (u->in_stop_when_unneeded_queue)
494 return;
495
496 if (!u->stop_when_unneeded)
497 return;
498
499 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
500 return;
501
502 LIST_PREPEND(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
503 u->in_stop_when_unneeded_queue = true;
504 }
505
506 static void bidi_set_free(Unit *u, Hashmap *h) {
507 Unit *other;
508 void *v;
509
510 assert(u);
511
512 /* Frees the hashmap and makes sure we are dropped from the inverse pointers */
513
514 HASHMAP_FOREACH_KEY(v, other, h) {
515 for (UnitDependency d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
516 hashmap_remove(other->dependencies[d], u);
517
518 unit_add_to_gc_queue(other);
519 }
520
521 hashmap_free(h);
522 }
523
524 static void unit_remove_transient(Unit *u) {
525 char **i;
526
527 assert(u);
528
529 if (!u->transient)
530 return;
531
532 if (u->fragment_path)
533 (void) unlink(u->fragment_path);
534
535 STRV_FOREACH(i, u->dropin_paths) {
536 _cleanup_free_ char *p = NULL, *pp = NULL;
537
538 p = dirname_malloc(*i); /* Get the drop-in directory from the drop-in file */
539 if (!p)
540 continue;
541
542 pp = dirname_malloc(p); /* Get the config directory from the drop-in directory */
543 if (!pp)
544 continue;
545
546 /* Only drop transient drop-ins */
547 if (!path_equal(u->manager->lookup_paths.transient, pp))
548 continue;
549
550 (void) unlink(*i);
551 (void) rmdir(p);
552 }
553 }
554
555 static void unit_free_requires_mounts_for(Unit *u) {
556 assert(u);
557
558 for (;;) {
559 _cleanup_free_ char *path;
560
561 path = hashmap_steal_first_key(u->requires_mounts_for);
562 if (!path)
563 break;
564 else {
565 char s[strlen(path) + 1];
566
567 PATH_FOREACH_PREFIX_MORE(s, path) {
568 char *y;
569 Set *x;
570
571 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
572 if (!x)
573 continue;
574
575 (void) set_remove(x, u);
576
577 if (set_isempty(x)) {
578 (void) hashmap_remove(u->manager->units_requiring_mounts_for, y);
579 free(y);
580 set_free(x);
581 }
582 }
583 }
584 }
585
586 u->requires_mounts_for = hashmap_free(u->requires_mounts_for);
587 }
588
589 static void unit_done(Unit *u) {
590 ExecContext *ec;
591 CGroupContext *cc;
592
593 assert(u);
594
595 if (u->type < 0)
596 return;
597
598 if (UNIT_VTABLE(u)->done)
599 UNIT_VTABLE(u)->done(u);
600
601 ec = unit_get_exec_context(u);
602 if (ec)
603 exec_context_done(ec);
604
605 cc = unit_get_cgroup_context(u);
606 if (cc)
607 cgroup_context_done(cc);
608 }
609
610 Unit* unit_free(Unit *u) {
611 char *t;
612
613 if (!u)
614 return NULL;
615
616 u->transient_file = safe_fclose(u->transient_file);
617
618 if (!MANAGER_IS_RELOADING(u->manager))
619 unit_remove_transient(u);
620
621 bus_unit_send_removed_signal(u);
622
623 unit_done(u);
624
625 unit_dequeue_rewatch_pids(u);
626
627 sd_bus_slot_unref(u->match_bus_slot);
628 sd_bus_track_unref(u->bus_track);
629 u->deserialized_refs = strv_free(u->deserialized_refs);
630 u->pending_freezer_message = sd_bus_message_unref(u->pending_freezer_message);
631
632 unit_free_requires_mounts_for(u);
633
634 SET_FOREACH(t, u->aliases)
635 hashmap_remove_value(u->manager->units, t, u);
636 if (u->id)
637 hashmap_remove_value(u->manager->units, u->id, u);
638
639 if (!sd_id128_is_null(u->invocation_id))
640 hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
641
642 if (u->job) {
643 Job *j = u->job;
644 job_uninstall(j);
645 job_free(j);
646 }
647
648 if (u->nop_job) {
649 Job *j = u->nop_job;
650 job_uninstall(j);
651 job_free(j);
652 }
653
654 for (UnitDependency d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
655 bidi_set_free(u, u->dependencies[d]);
656
657 /* A unit is being dropped from the tree, make sure our family is realized properly. Do this after we
658 * detach the unit from slice tree in order to eliminate its effect on controller masks. */
659 if (UNIT_ISSET(u->slice))
660 unit_add_family_to_cgroup_realize_queue(UNIT_DEREF(u->slice));
661
662 if (u->on_console)
663 manager_unref_console(u->manager);
664
665 unit_release_cgroup(u);
666
667 if (!MANAGER_IS_RELOADING(u->manager))
668 unit_unlink_state_files(u);
669
670 unit_unref_uid_gid(u, false);
671
672 (void) manager_update_failed_units(u->manager, u, false);
673 set_remove(u->manager->startup_units, u);
674
675 unit_unwatch_all_pids(u);
676
677 unit_ref_unset(&u->slice);
678 while (u->refs_by_target)
679 unit_ref_unset(u->refs_by_target);
680
681 if (u->type != _UNIT_TYPE_INVALID)
682 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
683
684 if (u->in_load_queue)
685 LIST_REMOVE(load_queue, u->manager->load_queue, u);
686
687 if (u->in_dbus_queue)
688 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
689
690 if (u->in_gc_queue)
691 LIST_REMOVE(gc_queue, u->manager->gc_unit_queue, u);
692
693 if (u->in_cgroup_realize_queue)
694 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
695
696 if (u->in_cgroup_empty_queue)
697 LIST_REMOVE(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
698
699 if (u->in_cleanup_queue)
700 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
701
702 if (u->in_target_deps_queue)
703 LIST_REMOVE(target_deps_queue, u->manager->target_deps_queue, u);
704
705 if (u->in_stop_when_unneeded_queue)
706 LIST_REMOVE(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
707
708 safe_close(u->ip_accounting_ingress_map_fd);
709 safe_close(u->ip_accounting_egress_map_fd);
710
711 safe_close(u->ipv4_allow_map_fd);
712 safe_close(u->ipv6_allow_map_fd);
713 safe_close(u->ipv4_deny_map_fd);
714 safe_close(u->ipv6_deny_map_fd);
715
716 bpf_program_unref(u->ip_bpf_ingress);
717 bpf_program_unref(u->ip_bpf_ingress_installed);
718 bpf_program_unref(u->ip_bpf_egress);
719 bpf_program_unref(u->ip_bpf_egress_installed);
720
721 set_free(u->ip_bpf_custom_ingress);
722 set_free(u->ip_bpf_custom_egress);
723 set_free(u->ip_bpf_custom_ingress_installed);
724 set_free(u->ip_bpf_custom_egress_installed);
725
726 bpf_program_unref(u->bpf_device_control_installed);
727
728 condition_free_list(u->conditions);
729 condition_free_list(u->asserts);
730
731 free(u->description);
732 strv_free(u->documentation);
733 free(u->fragment_path);
734 free(u->source_path);
735 strv_free(u->dropin_paths);
736 free(u->instance);
737
738 free(u->job_timeout_reboot_arg);
739 free(u->reboot_arg);
740
741 set_free_free(u->aliases);
742 free(u->id);
743
744 return mfree(u);
745 }
746
747 FreezerState unit_freezer_state(Unit *u) {
748 assert(u);
749
750 return u->freezer_state;
751 }
752
753 int unit_freezer_state_kernel(Unit *u, FreezerState *ret) {
754 char *values[1] = {};
755 int r;
756
757 assert(u);
758
759 r = cg_get_keyed_attribute(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, "cgroup.events",
760 STRV_MAKE("frozen"), values);
761 if (r < 0)
762 return r;
763
764 r = _FREEZER_STATE_INVALID;
765
766 if (values[0]) {
767 if (streq(values[0], "0"))
768 r = FREEZER_RUNNING;
769 else if (streq(values[0], "1"))
770 r = FREEZER_FROZEN;
771 }
772
773 free(values[0]);
774 *ret = r;
775
776 return 0;
777 }
778
779 UnitActiveState unit_active_state(Unit *u) {
780 assert(u);
781
782 if (u->load_state == UNIT_MERGED)
783 return unit_active_state(unit_follow_merge(u));
784
785 /* After a reload it might happen that a unit is not correctly
786 * loaded but still has a process around. That's why we won't
787 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
788
789 return UNIT_VTABLE(u)->active_state(u);
790 }
791
792 const char* unit_sub_state_to_string(Unit *u) {
793 assert(u);
794
795 return UNIT_VTABLE(u)->sub_state_to_string(u);
796 }
797
798 static int hashmap_complete_move(Hashmap **s, Hashmap **other) {
799 assert(s);
800 assert(other);
801
802 if (!*other)
803 return 0;
804
805 if (*s)
806 return hashmap_move(*s, *other);
807 else
808 *s = TAKE_PTR(*other);
809
810 return 0;
811 }
812
813 static int merge_names(Unit *u, Unit *other) {
814 char *name;
815 int r;
816
817 assert(u);
818 assert(other);
819
820 r = unit_add_alias(u, other->id);
821 if (r < 0)
822 return r;
823
824 r = set_move(u->aliases, other->aliases);
825 if (r < 0) {
826 set_remove(u->aliases, other->id);
827 return r;
828 }
829
830 TAKE_PTR(other->id);
831 other->aliases = set_free_free(other->aliases);
832
833 SET_FOREACH(name, u->aliases)
834 assert_se(hashmap_replace(u->manager->units, name, u) == 0);
835
836 return 0;
837 }
838
839 static int reserve_dependencies(Unit *u, Unit *other, UnitDependency d) {
840 unsigned n_reserve;
841
842 assert(u);
843 assert(other);
844 assert(d < _UNIT_DEPENDENCY_MAX);
845
846 /*
847 * If u does not have this dependency set allocated, there is no need
848 * to reserve anything. In that case other's set will be transferred
849 * as a whole to u by complete_move().
850 */
851 if (!u->dependencies[d])
852 return 0;
853
854 /* merge_dependencies() will skip a u-on-u dependency */
855 n_reserve = hashmap_size(other->dependencies[d]) - !!hashmap_get(other->dependencies[d], u);
856
857 return hashmap_reserve(u->dependencies[d], n_reserve);
858 }
859
860 static void merge_dependencies(Unit *u, Unit *other, const char *other_id, UnitDependency d) {
861 Unit *back;
862 void *v;
863 int r;
864
865 /* Merges all dependencies of type 'd' of the unit 'other' into the deps of the unit 'u' */
866
867 assert(u);
868 assert(other);
869 assert(d < _UNIT_DEPENDENCY_MAX);
870
871 /* Fix backwards pointers. Let's iterate through all dependent units of the other unit. */
872 HASHMAP_FOREACH_KEY(v, back, other->dependencies[d])
873
874 /* Let's now iterate through the dependencies of that dependencies of the other units,
875 * looking for pointers back, and let's fix them up, to instead point to 'u'. */
876 for (UnitDependency k = 0; k < _UNIT_DEPENDENCY_MAX; k++)
877 if (back == u) {
878 /* Do not add dependencies between u and itself. */
879 if (hashmap_remove(back->dependencies[k], other))
880 maybe_warn_about_dependency(u, other_id, k);
881 } else {
882 UnitDependencyInfo di_u, di_other;
883
884 /* Let's drop this dependency between "back" and "other", and let's create it between
885 * "back" and "u" instead. Let's merge the bit masks of the dependency we are moving,
886 * and any such dependency which might already exist */
887
888 di_other.data = hashmap_get(back->dependencies[k], other);
889 if (!di_other.data)
890 continue; /* dependency isn't set, let's try the next one */
891
892 di_u.data = hashmap_get(back->dependencies[k], u);
893
894 UnitDependencyInfo di_merged = {
895 .origin_mask = di_u.origin_mask | di_other.origin_mask,
896 .destination_mask = di_u.destination_mask | di_other.destination_mask,
897 };
898
899 r = hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data);
900 if (r < 0)
901 log_warning_errno(r, "Failed to remove/replace: back=%s other=%s u=%s: %m", back->id, other_id, u->id);
902 assert(r >= 0);
903
904 /* assert_se(hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data) >= 0); */
905 }
906
907 /* Also do not move dependencies on u to itself */
908 back = hashmap_remove(other->dependencies[d], u);
909 if (back)
910 maybe_warn_about_dependency(u, other_id, d);
911
912 /* The move cannot fail. The caller must have performed a reservation. */
913 assert_se(hashmap_complete_move(&u->dependencies[d], &other->dependencies[d]) == 0);
914
915 other->dependencies[d] = hashmap_free(other->dependencies[d]);
916 }
917
918 int unit_merge(Unit *u, Unit *other) {
919 const char *other_id = NULL;
920 int r;
921
922 assert(u);
923 assert(other);
924 assert(u->manager == other->manager);
925 assert(u->type != _UNIT_TYPE_INVALID);
926
927 other = unit_follow_merge(other);
928
929 if (other == u)
930 return 0;
931
932 if (u->type != other->type)
933 return -EINVAL;
934
935 if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
936 return -EEXIST;
937
938 if (!IN_SET(other->load_state, UNIT_STUB, UNIT_NOT_FOUND))
939 return -EEXIST;
940
941 if (!streq_ptr(u->instance, other->instance))
942 return -EINVAL;
943
944 if (other->job)
945 return -EEXIST;
946
947 if (other->nop_job)
948 return -EEXIST;
949
950 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
951 return -EEXIST;
952
953 if (other->id)
954 other_id = strdupa(other->id);
955
956 /* Make reservations to ensure merge_dependencies() won't fail */
957 for (UnitDependency d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
958 r = reserve_dependencies(u, other, d);
959 /*
960 * We don't rollback reservations if we fail. We don't have
961 * a way to undo reservations. A reservation is not a leak.
962 */
963 if (r < 0)
964 return r;
965 }
966
967 /* Merge names */
968 r = merge_names(u, other);
969 if (r < 0)
970 return r;
971
972 /* Redirect all references */
973 while (other->refs_by_target)
974 unit_ref_set(other->refs_by_target, other->refs_by_target->source, u);
975
976 /* Merge dependencies */
977 for (UnitDependency d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
978 merge_dependencies(u, other, other_id, d);
979
980 other->load_state = UNIT_MERGED;
981 other->merged_into = u;
982
983 /* If there is still some data attached to the other node, we
984 * don't need it anymore, and can free it. */
985 if (other->load_state != UNIT_STUB)
986 if (UNIT_VTABLE(other)->done)
987 UNIT_VTABLE(other)->done(other);
988
989 unit_add_to_dbus_queue(u);
990 unit_add_to_cleanup_queue(other);
991
992 return 0;
993 }
994
995 int unit_merge_by_name(Unit *u, const char *name) {
996 _cleanup_free_ char *s = NULL;
997 Unit *other;
998 int r;
999
1000 /* Either add name to u, or if a unit with name already exists, merge it with u.
1001 * If name is a template, do the same for name@instance, where instance is u's instance. */
1002
1003 assert(u);
1004 assert(name);
1005
1006 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
1007 if (!u->instance)
1008 return -EINVAL;
1009
1010 r = unit_name_replace_instance(name, u->instance, &s);
1011 if (r < 0)
1012 return r;
1013
1014 name = s;
1015 }
1016
1017 other = manager_get_unit(u->manager, name);
1018 if (other)
1019 return unit_merge(u, other);
1020
1021 return unit_add_name(u, name);
1022 }
1023
1024 Unit* unit_follow_merge(Unit *u) {
1025 assert(u);
1026
1027 while (u->load_state == UNIT_MERGED)
1028 assert_se(u = u->merged_into);
1029
1030 return u;
1031 }
1032
1033 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
1034 int r;
1035
1036 assert(u);
1037 assert(c);
1038
1039 if (c->working_directory && !c->working_directory_missing_ok) {
1040 r = unit_require_mounts_for(u, c->working_directory, UNIT_DEPENDENCY_FILE);
1041 if (r < 0)
1042 return r;
1043 }
1044
1045 if (c->root_directory) {
1046 r = unit_require_mounts_for(u, c->root_directory, UNIT_DEPENDENCY_FILE);
1047 if (r < 0)
1048 return r;
1049 }
1050
1051 if (c->root_image) {
1052 r = unit_require_mounts_for(u, c->root_image, UNIT_DEPENDENCY_FILE);
1053 if (r < 0)
1054 return r;
1055 }
1056
1057 for (ExecDirectoryType dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++) {
1058 if (!u->manager->prefix[dt])
1059 continue;
1060
1061 char **dp;
1062 STRV_FOREACH(dp, c->directories[dt].paths) {
1063 _cleanup_free_ char *p;
1064
1065 p = path_join(u->manager->prefix[dt], *dp);
1066 if (!p)
1067 return -ENOMEM;
1068
1069 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
1070 if (r < 0)
1071 return r;
1072 }
1073 }
1074
1075 if (!MANAGER_IS_SYSTEM(u->manager))
1076 return 0;
1077
1078 /* For the following three directory types we need write access, and /var/ is possibly on the root
1079 * fs. Hence order after systemd-remount-fs.service, to ensure things are writable. */
1080 if (!strv_isempty(c->directories[EXEC_DIRECTORY_STATE].paths) ||
1081 !strv_isempty(c->directories[EXEC_DIRECTORY_CACHE].paths) ||
1082 !strv_isempty(c->directories[EXEC_DIRECTORY_LOGS].paths)) {
1083 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_REMOUNT_FS_SERVICE, true, UNIT_DEPENDENCY_FILE);
1084 if (r < 0)
1085 return r;
1086 }
1087
1088 if (c->private_tmp) {
1089 const char *p;
1090
1091 FOREACH_STRING(p, "/tmp", "/var/tmp") {
1092 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
1093 if (r < 0)
1094 return r;
1095 }
1096
1097 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_TMPFILES_SETUP_SERVICE, true, UNIT_DEPENDENCY_FILE);
1098 if (r < 0)
1099 return r;
1100 }
1101
1102 if (c->root_image) {
1103 /* We need to wait for /dev/loopX to appear when doing RootImage=, hence let's add an
1104 * implicit dependency on udev */
1105
1106 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_UDEVD_SERVICE, true, UNIT_DEPENDENCY_FILE);
1107 if (r < 0)
1108 return r;
1109 }
1110
1111 if (!IN_SET(c->std_output,
1112 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1113 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE) &&
1114 !IN_SET(c->std_error,
1115 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1116 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE) &&
1117 !c->log_namespace)
1118 return 0;
1119
1120 /* If syslog or kernel logging is requested (or log namespacing is), make sure our own logging daemon
1121 * is run first. */
1122
1123 if (c->log_namespace) {
1124 _cleanup_free_ char *socket_unit = NULL, *varlink_socket_unit = NULL;
1125
1126 r = unit_name_build_from_type("systemd-journald", c->log_namespace, UNIT_SOCKET, &socket_unit);
1127 if (r < 0)
1128 return r;
1129
1130 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, socket_unit, true, UNIT_DEPENDENCY_FILE);
1131 if (r < 0)
1132 return r;
1133
1134 r = unit_name_build_from_type("systemd-journald-varlink", c->log_namespace, UNIT_SOCKET, &varlink_socket_unit);
1135 if (r < 0)
1136 return r;
1137
1138 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, varlink_socket_unit, true, UNIT_DEPENDENCY_FILE);
1139 if (r < 0)
1140 return r;
1141 } else
1142 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, true, UNIT_DEPENDENCY_FILE);
1143 if (r < 0)
1144 return r;
1145
1146 return 0;
1147 }
1148
1149 const char *unit_description(Unit *u) {
1150 assert(u);
1151
1152 if (u->description)
1153 return u->description;
1154
1155 return strna(u->id);
1156 }
1157
1158 const char *unit_status_string(Unit *u) {
1159 assert(u);
1160
1161 if (u->manager->status_unit_format == STATUS_UNIT_FORMAT_NAME && u->id)
1162 return u->id;
1163
1164 return unit_description(u);
1165 }
1166
1167 /* Common implementation for multiple backends */
1168 int unit_load_fragment_and_dropin(Unit *u, bool fragment_required) {
1169 int r;
1170
1171 assert(u);
1172
1173 /* Load a .{service,socket,...} file */
1174 r = unit_load_fragment(u);
1175 if (r < 0)
1176 return r;
1177
1178 if (u->load_state == UNIT_STUB) {
1179 if (fragment_required)
1180 return -ENOENT;
1181
1182 u->load_state = UNIT_LOADED;
1183 }
1184
1185 /* Load drop-in directory data. If u is an alias, we might be reloading the
1186 * target unit needlessly. But we cannot be sure which drops-ins have already
1187 * been loaded and which not, at least without doing complicated book-keeping,
1188 * so let's always reread all drop-ins. */
1189 r = unit_load_dropin(unit_follow_merge(u));
1190 if (r < 0)
1191 return r;
1192
1193 if (u->source_path) {
1194 struct stat st;
1195
1196 if (stat(u->source_path, &st) >= 0)
1197 u->source_mtime = timespec_load(&st.st_mtim);
1198 else
1199 u->source_mtime = 0;
1200 }
1201
1202 return 0;
1203 }
1204
1205 void unit_add_to_target_deps_queue(Unit *u) {
1206 Manager *m = u->manager;
1207
1208 assert(u);
1209
1210 if (u->in_target_deps_queue)
1211 return;
1212
1213 LIST_PREPEND(target_deps_queue, m->target_deps_queue, u);
1214 u->in_target_deps_queue = true;
1215 }
1216
1217 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1218 assert(u);
1219 assert(target);
1220
1221 if (target->type != UNIT_TARGET)
1222 return 0;
1223
1224 /* Only add the dependency if both units are loaded, so that
1225 * that loop check below is reliable */
1226 if (u->load_state != UNIT_LOADED ||
1227 target->load_state != UNIT_LOADED)
1228 return 0;
1229
1230 /* If either side wants no automatic dependencies, then let's
1231 * skip this */
1232 if (!u->default_dependencies ||
1233 !target->default_dependencies)
1234 return 0;
1235
1236 /* Don't create loops */
1237 if (hashmap_get(target->dependencies[UNIT_BEFORE], u))
1238 return 0;
1239
1240 return unit_add_dependency(target, UNIT_AFTER, u, true, UNIT_DEPENDENCY_DEFAULT);
1241 }
1242
1243 static int unit_add_slice_dependencies(Unit *u) {
1244 assert(u);
1245
1246 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1247 return 0;
1248
1249 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1250 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1251 relationship). */
1252 UnitDependencyMask mask = u->type == UNIT_SLICE ? UNIT_DEPENDENCY_IMPLICIT : UNIT_DEPENDENCY_FILE;
1253
1254 if (UNIT_ISSET(u->slice))
1255 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, UNIT_DEREF(u->slice), true, mask);
1256
1257 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1258 return 0;
1259
1260 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, true, mask);
1261 }
1262
1263 static int unit_add_mount_dependencies(Unit *u) {
1264 UnitDependencyInfo di;
1265 const char *path;
1266 int r;
1267
1268 assert(u);
1269
1270 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for) {
1271 char prefix[strlen(path) + 1];
1272
1273 PATH_FOREACH_PREFIX_MORE(prefix, path) {
1274 _cleanup_free_ char *p = NULL;
1275 Unit *m;
1276
1277 r = unit_name_from_path(prefix, ".mount", &p);
1278 if (r < 0)
1279 return r;
1280
1281 m = manager_get_unit(u->manager, p);
1282 if (!m) {
1283 /* Make sure to load the mount unit if
1284 * it exists. If so the dependencies
1285 * on this unit will be added later
1286 * during the loading of the mount
1287 * unit. */
1288 (void) manager_load_unit_prepare(u->manager, p, NULL, NULL, &m);
1289 continue;
1290 }
1291 if (m == u)
1292 continue;
1293
1294 if (m->load_state != UNIT_LOADED)
1295 continue;
1296
1297 r = unit_add_dependency(u, UNIT_AFTER, m, true, di.origin_mask);
1298 if (r < 0)
1299 return r;
1300
1301 if (m->fragment_path) {
1302 r = unit_add_dependency(u, UNIT_REQUIRES, m, true, di.origin_mask);
1303 if (r < 0)
1304 return r;
1305 }
1306 }
1307 }
1308
1309 return 0;
1310 }
1311
1312 static int unit_add_oomd_dependencies(Unit *u) {
1313 CGroupContext *c;
1314 bool wants_oomd;
1315 int r;
1316
1317 assert(u);
1318
1319 if (!u->default_dependencies)
1320 return 0;
1321
1322 c = unit_get_cgroup_context(u);
1323 if (!c)
1324 return 0;
1325
1326 wants_oomd = (c->moom_swap == MANAGED_OOM_KILL || c->moom_mem_pressure == MANAGED_OOM_KILL);
1327 if (!wants_oomd)
1328 return 0;
1329
1330 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_WANTS, "systemd-oomd.service", true, UNIT_DEPENDENCY_FILE);
1331 if (r < 0)
1332 return r;
1333
1334 return 0;
1335 }
1336
1337 static int unit_add_startup_units(Unit *u) {
1338 CGroupContext *c;
1339
1340 c = unit_get_cgroup_context(u);
1341 if (!c)
1342 return 0;
1343
1344 if (c->startup_cpu_shares == CGROUP_CPU_SHARES_INVALID &&
1345 c->startup_io_weight == CGROUP_WEIGHT_INVALID &&
1346 c->startup_blockio_weight == CGROUP_BLKIO_WEIGHT_INVALID)
1347 return 0;
1348
1349 return set_ensure_put(&u->manager->startup_units, NULL, u);
1350 }
1351
1352 int unit_load(Unit *u) {
1353 int r;
1354
1355 assert(u);
1356
1357 if (u->in_load_queue) {
1358 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1359 u->in_load_queue = false;
1360 }
1361
1362 if (u->type == _UNIT_TYPE_INVALID)
1363 return -EINVAL;
1364
1365 if (u->load_state != UNIT_STUB)
1366 return 0;
1367
1368 if (u->transient_file) {
1369 /* Finalize transient file: if this is a transient unit file, as soon as we reach unit_load() the setup
1370 * is complete, hence let's synchronize the unit file we just wrote to disk. */
1371
1372 r = fflush_and_check(u->transient_file);
1373 if (r < 0)
1374 goto fail;
1375
1376 u->transient_file = safe_fclose(u->transient_file);
1377 u->fragment_mtime = now(CLOCK_REALTIME);
1378 }
1379
1380 r = UNIT_VTABLE(u)->load(u);
1381 if (r < 0)
1382 goto fail;
1383
1384 assert(u->load_state != UNIT_STUB);
1385
1386 if (u->load_state == UNIT_LOADED) {
1387 unit_add_to_target_deps_queue(u);
1388
1389 r = unit_add_slice_dependencies(u);
1390 if (r < 0)
1391 goto fail;
1392
1393 r = unit_add_mount_dependencies(u);
1394 if (r < 0)
1395 goto fail;
1396
1397 r = unit_add_oomd_dependencies(u);
1398 if (r < 0)
1399 goto fail;
1400
1401 r = unit_add_startup_units(u);
1402 if (r < 0)
1403 goto fail;
1404
1405 if (u->on_failure_job_mode == JOB_ISOLATE && hashmap_size(u->dependencies[UNIT_ON_FAILURE]) > 1) {
1406 r = log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOEXEC),
1407 "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1408 goto fail;
1409 }
1410
1411 if (u->job_running_timeout != USEC_INFINITY && u->job_running_timeout > u->job_timeout)
1412 log_unit_warning(u, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1413
1414 /* We finished loading, let's ensure our parents recalculate the members mask */
1415 unit_invalidate_cgroup_members_masks(u);
1416 }
1417
1418 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1419
1420 unit_add_to_dbus_queue(unit_follow_merge(u));
1421 unit_add_to_gc_queue(u);
1422 (void) manager_varlink_send_managed_oom_update(u);
1423
1424 return 0;
1425
1426 fail:
1427 /* We convert ENOEXEC errors to the UNIT_BAD_SETTING load state here. Configuration parsing code
1428 * should hence return ENOEXEC to ensure units are placed in this state after loading. */
1429
1430 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND :
1431 r == -ENOEXEC ? UNIT_BAD_SETTING :
1432 UNIT_ERROR;
1433 u->load_error = r;
1434
1435 /* Record the timestamp on the cache, so that if the cache gets updated between now and the next time
1436 * an attempt is made to load this unit, we know we need to check again. */
1437 if (u->load_state == UNIT_NOT_FOUND)
1438 u->fragment_not_found_timestamp_hash = u->manager->unit_cache_timestamp_hash;
1439
1440 unit_add_to_dbus_queue(u);
1441 unit_add_to_gc_queue(u);
1442
1443 return log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1444 }
1445
1446 _printf_(7, 8)
1447 static int log_unit_internal(void *userdata, int level, int error, const char *file, int line, const char *func, const char *format, ...) {
1448 Unit *u = userdata;
1449 va_list ap;
1450 int r;
1451
1452 va_start(ap, format);
1453 if (u)
1454 r = log_object_internalv(level, error, file, line, func,
1455 u->manager->unit_log_field,
1456 u->id,
1457 u->manager->invocation_log_field,
1458 u->invocation_id_string,
1459 format, ap);
1460 else
1461 r = log_internalv(level, error, file, line, func, format, ap);
1462 va_end(ap);
1463
1464 return r;
1465 }
1466
1467 static bool unit_test_condition(Unit *u) {
1468 _cleanup_strv_free_ char **env = NULL;
1469 int r;
1470
1471 assert(u);
1472
1473 dual_timestamp_get(&u->condition_timestamp);
1474
1475 r = manager_get_effective_environment(u->manager, &env);
1476 if (r < 0) {
1477 log_unit_error_errno(u, r, "Failed to determine effective environment: %m");
1478 u->condition_result = CONDITION_ERROR;
1479 } else
1480 u->condition_result = condition_test_list(
1481 u->conditions,
1482 env,
1483 condition_type_to_string,
1484 log_unit_internal,
1485 u);
1486
1487 unit_add_to_dbus_queue(u);
1488 return u->condition_result;
1489 }
1490
1491 static bool unit_test_assert(Unit *u) {
1492 _cleanup_strv_free_ char **env = NULL;
1493 int r;
1494
1495 assert(u);
1496
1497 dual_timestamp_get(&u->assert_timestamp);
1498
1499 r = manager_get_effective_environment(u->manager, &env);
1500 if (r < 0) {
1501 log_unit_error_errno(u, r, "Failed to determine effective environment: %m");
1502 u->assert_result = CONDITION_ERROR;
1503 } else
1504 u->assert_result = condition_test_list(
1505 u->asserts,
1506 env,
1507 assert_type_to_string,
1508 log_unit_internal,
1509 u);
1510
1511 unit_add_to_dbus_queue(u);
1512 return u->assert_result;
1513 }
1514
1515 void unit_status_printf(Unit *u, StatusType status_type, const char *status, const char *unit_status_msg_format) {
1516 const char *d;
1517
1518 d = unit_status_string(u);
1519 if (log_get_show_color())
1520 d = strjoina(ANSI_HIGHLIGHT, d, ANSI_NORMAL);
1521
1522 DISABLE_WARNING_FORMAT_NONLITERAL;
1523 manager_status_printf(u->manager, status_type, status, unit_status_msg_format, d);
1524 REENABLE_WARNING;
1525 }
1526
1527 int unit_test_start_limit(Unit *u) {
1528 const char *reason;
1529
1530 assert(u);
1531
1532 if (ratelimit_below(&u->start_ratelimit)) {
1533 u->start_limit_hit = false;
1534 return 0;
1535 }
1536
1537 log_unit_warning(u, "Start request repeated too quickly.");
1538 u->start_limit_hit = true;
1539
1540 reason = strjoina("unit ", u->id, " failed");
1541
1542 emergency_action(u->manager, u->start_limit_action,
1543 EMERGENCY_ACTION_IS_WATCHDOG|EMERGENCY_ACTION_WARN,
1544 u->reboot_arg, -1, reason);
1545
1546 return -ECANCELED;
1547 }
1548
1549 bool unit_shall_confirm_spawn(Unit *u) {
1550 assert(u);
1551
1552 if (manager_is_confirm_spawn_disabled(u->manager))
1553 return false;
1554
1555 /* For some reasons units remaining in the same process group
1556 * as PID 1 fail to acquire the console even if it's not used
1557 * by any process. So skip the confirmation question for them. */
1558 return !unit_get_exec_context(u)->same_pgrp;
1559 }
1560
1561 static bool unit_verify_deps(Unit *u) {
1562 Unit *other;
1563 void *v;
1564
1565 assert(u);
1566
1567 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined with
1568 * After=. We do not check Requires= or Requisite= here as they only should have an effect on the job
1569 * processing, but do not have any effect afterwards. We don't check BindsTo= dependencies that are not used in
1570 * conjunction with After= as for them any such check would make things entirely racy. */
1571
1572 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO]) {
1573
1574 if (!hashmap_contains(u->dependencies[UNIT_AFTER], other))
1575 continue;
1576
1577 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
1578 log_unit_notice(u, "Bound to unit %s, but unit isn't active.", other->id);
1579 return false;
1580 }
1581 }
1582
1583 return true;
1584 }
1585
1586 /* Errors that aren't really errors:
1587 * -EALREADY: Unit is already started.
1588 * -ECOMM: Condition failed
1589 * -EAGAIN: An operation is already in progress. Retry later.
1590 *
1591 * Errors that are real errors:
1592 * -EBADR: This unit type does not support starting.
1593 * -ECANCELED: Start limit hit, too many requests for now
1594 * -EPROTO: Assert failed
1595 * -EINVAL: Unit not loaded
1596 * -EOPNOTSUPP: Unit type not supported
1597 * -ENOLINK: The necessary dependencies are not fulfilled.
1598 * -ESTALE: This unit has been started before and can't be started a second time
1599 * -ENOENT: This is a triggering unit and unit to trigger is not loaded
1600 */
1601 int unit_start(Unit *u) {
1602 UnitActiveState state;
1603 Unit *following;
1604
1605 assert(u);
1606
1607 /* If this is already started, then this will succeed. Note that this will even succeed if this unit
1608 * is not startable by the user. This is relied on to detect when we need to wait for units and when
1609 * waiting is finished. */
1610 state = unit_active_state(u);
1611 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1612 return -EALREADY;
1613 if (state == UNIT_MAINTENANCE)
1614 return -EAGAIN;
1615
1616 /* Units that aren't loaded cannot be started */
1617 if (u->load_state != UNIT_LOADED)
1618 return -EINVAL;
1619
1620 /* Refuse starting scope units more than once */
1621 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_enter_timestamp))
1622 return -ESTALE;
1623
1624 /* If the conditions failed, don't do anything at all. If we already are activating this call might
1625 * still be useful to speed up activation in case there is some hold-off time, but we don't want to
1626 * recheck the condition in that case. */
1627 if (state != UNIT_ACTIVATING &&
1628 !unit_test_condition(u))
1629 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(ECOMM), "Starting requested but condition failed. Not starting unit.");
1630
1631 /* If the asserts failed, fail the entire job */
1632 if (state != UNIT_ACTIVATING &&
1633 !unit_test_assert(u))
1634 return log_unit_notice_errno(u, SYNTHETIC_ERRNO(EPROTO), "Starting requested but asserts failed.");
1635
1636 /* Units of types that aren't supported cannot be started. Note that we do this test only after the
1637 * condition checks, so that we rather return condition check errors (which are usually not
1638 * considered a true failure) than "not supported" errors (which are considered a failure).
1639 */
1640 if (!unit_type_supported(u->type))
1641 return -EOPNOTSUPP;
1642
1643 /* Let's make sure that the deps really are in order before we start this. Normally the job engine
1644 * should have taken care of this already, but let's check this here again. After all, our
1645 * dependencies might not be in effect anymore, due to a reload or due to a failed condition. */
1646 if (!unit_verify_deps(u))
1647 return -ENOLINK;
1648
1649 /* Forward to the main object, if we aren't it. */
1650 following = unit_following(u);
1651 if (following) {
1652 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1653 return unit_start(following);
1654 }
1655
1656 /* If it is stopped, but we cannot start it, then fail */
1657 if (!UNIT_VTABLE(u)->start)
1658 return -EBADR;
1659
1660 /* We don't suppress calls to ->start() here when we are already starting, to allow this request to
1661 * be used as a "hurry up" call, for example when the unit is in some "auto restart" state where it
1662 * waits for a holdoff timer to elapse before it will start again. */
1663
1664 unit_add_to_dbus_queue(u);
1665 unit_cgroup_freezer_action(u, FREEZER_THAW);
1666
1667 return UNIT_VTABLE(u)->start(u);
1668 }
1669
1670 bool unit_can_start(Unit *u) {
1671 assert(u);
1672
1673 if (u->load_state != UNIT_LOADED)
1674 return false;
1675
1676 if (!unit_type_supported(u->type))
1677 return false;
1678
1679 /* Scope units may be started only once */
1680 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_exit_timestamp))
1681 return false;
1682
1683 return !!UNIT_VTABLE(u)->start;
1684 }
1685
1686 bool unit_can_isolate(Unit *u) {
1687 assert(u);
1688
1689 return unit_can_start(u) &&
1690 u->allow_isolate;
1691 }
1692
1693 /* Errors:
1694 * -EBADR: This unit type does not support stopping.
1695 * -EALREADY: Unit is already stopped.
1696 * -EAGAIN: An operation is already in progress. Retry later.
1697 */
1698 int unit_stop(Unit *u) {
1699 UnitActiveState state;
1700 Unit *following;
1701
1702 assert(u);
1703
1704 state = unit_active_state(u);
1705 if (UNIT_IS_INACTIVE_OR_FAILED(state))
1706 return -EALREADY;
1707
1708 following = unit_following(u);
1709 if (following) {
1710 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
1711 return unit_stop(following);
1712 }
1713
1714 if (!UNIT_VTABLE(u)->stop)
1715 return -EBADR;
1716
1717 unit_add_to_dbus_queue(u);
1718 unit_cgroup_freezer_action(u, FREEZER_THAW);
1719
1720 return UNIT_VTABLE(u)->stop(u);
1721 }
1722
1723 bool unit_can_stop(Unit *u) {
1724 assert(u);
1725
1726 /* Note: if we return true here, it does not mean that the unit may be successfully stopped.
1727 * Extrinsic units follow external state and they may stop following external state changes
1728 * (hence we return true here), but an attempt to do this through the manager will fail. */
1729
1730 if (!unit_type_supported(u->type))
1731 return false;
1732
1733 if (u->perpetual)
1734 return false;
1735
1736 return !!UNIT_VTABLE(u)->stop;
1737 }
1738
1739 /* Errors:
1740 * -EBADR: This unit type does not support reloading.
1741 * -ENOEXEC: Unit is not started.
1742 * -EAGAIN: An operation is already in progress. Retry later.
1743 */
1744 int unit_reload(Unit *u) {
1745 UnitActiveState state;
1746 Unit *following;
1747
1748 assert(u);
1749
1750 if (u->load_state != UNIT_LOADED)
1751 return -EINVAL;
1752
1753 if (!unit_can_reload(u))
1754 return -EBADR;
1755
1756 state = unit_active_state(u);
1757 if (state == UNIT_RELOADING)
1758 return -EAGAIN;
1759
1760 if (state != UNIT_ACTIVE)
1761 return log_unit_warning_errno(u, SYNTHETIC_ERRNO(ENOEXEC), "Unit cannot be reloaded because it is inactive.");
1762
1763 following = unit_following(u);
1764 if (following) {
1765 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
1766 return unit_reload(following);
1767 }
1768
1769 unit_add_to_dbus_queue(u);
1770
1771 if (!UNIT_VTABLE(u)->reload) {
1772 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
1773 unit_notify(u, unit_active_state(u), unit_active_state(u), 0);
1774 return 0;
1775 }
1776
1777 unit_cgroup_freezer_action(u, FREEZER_THAW);
1778
1779 return UNIT_VTABLE(u)->reload(u);
1780 }
1781
1782 bool unit_can_reload(Unit *u) {
1783 assert(u);
1784
1785 if (UNIT_VTABLE(u)->can_reload)
1786 return UNIT_VTABLE(u)->can_reload(u);
1787
1788 if (!hashmap_isempty(u->dependencies[UNIT_PROPAGATES_RELOAD_TO]))
1789 return true;
1790
1791 return UNIT_VTABLE(u)->reload;
1792 }
1793
1794 bool unit_is_unneeded(Unit *u) {
1795 static const UnitDependency deps[] = {
1796 UNIT_REQUIRED_BY,
1797 UNIT_REQUISITE_OF,
1798 UNIT_WANTED_BY,
1799 UNIT_BOUND_BY,
1800 };
1801
1802 assert(u);
1803
1804 if (!u->stop_when_unneeded)
1805 return false;
1806
1807 /* Don't clean up while the unit is transitioning or is even inactive. */
1808 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
1809 return false;
1810 if (u->job)
1811 return false;
1812
1813 for (size_t j = 0; j < ELEMENTSOF(deps); j++) {
1814 Unit *other;
1815 void *v;
1816
1817 /* If a dependent unit has a job queued, is active or transitioning, or is marked for
1818 * restart, then don't clean this one up. */
1819
1820 HASHMAP_FOREACH_KEY(v, other, u->dependencies[deps[j]]) {
1821 if (other->job)
1822 return false;
1823
1824 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
1825 return false;
1826
1827 if (unit_will_restart(other))
1828 return false;
1829 }
1830 }
1831
1832 return true;
1833 }
1834
1835 static void check_unneeded_dependencies(Unit *u) {
1836
1837 static const UnitDependency deps[] = {
1838 UNIT_REQUIRES,
1839 UNIT_REQUISITE,
1840 UNIT_WANTS,
1841 UNIT_BINDS_TO,
1842 };
1843
1844 assert(u);
1845
1846 /* Add all units this unit depends on to the queue that processes StopWhenUnneeded= behaviour. */
1847
1848 for (size_t j = 0; j < ELEMENTSOF(deps); j++) {
1849 Unit *other;
1850 void *v;
1851
1852 HASHMAP_FOREACH_KEY(v, other, u->dependencies[deps[j]])
1853 unit_submit_to_stop_when_unneeded_queue(other);
1854 }
1855 }
1856
1857 static void unit_check_binds_to(Unit *u) {
1858 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1859 bool stop = false;
1860 Unit *other;
1861 void *v;
1862 int r;
1863
1864 assert(u);
1865
1866 if (u->job)
1867 return;
1868
1869 if (unit_active_state(u) != UNIT_ACTIVE)
1870 return;
1871
1872 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO]) {
1873 if (other->job)
1874 continue;
1875
1876 if (!other->coldplugged)
1877 /* We might yet create a job for the other unit… */
1878 continue;
1879
1880 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
1881 continue;
1882
1883 stop = true;
1884 break;
1885 }
1886
1887 if (!stop)
1888 return;
1889
1890 /* If stopping a unit fails continuously we might enter a stop
1891 * loop here, hence stop acting on the service being
1892 * unnecessary after a while. */
1893 if (!ratelimit_below(&u->auto_stop_ratelimit)) {
1894 log_unit_warning(u, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other->id);
1895 return;
1896 }
1897
1898 assert(other);
1899 log_unit_info(u, "Unit is bound to inactive unit %s. Stopping, too.", other->id);
1900
1901 /* A unit we need to run is gone. Sniff. Let's stop this. */
1902 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, NULL, &error, NULL);
1903 if (r < 0)
1904 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
1905 }
1906
1907 static void retroactively_start_dependencies(Unit *u) {
1908 Unit *other;
1909 void *v;
1910
1911 assert(u);
1912 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
1913
1914 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUIRES])
1915 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
1916 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1917 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL, NULL);
1918
1919 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO])
1920 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
1921 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1922 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL, NULL);
1923
1924 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_WANTS])
1925 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
1926 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
1927 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL, NULL);
1928
1929 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTS])
1930 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1931 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
1932
1933 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTED_BY])
1934 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1935 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
1936 }
1937
1938 static void retroactively_stop_dependencies(Unit *u) {
1939 Unit *other;
1940 void *v;
1941
1942 assert(u);
1943 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
1944
1945 /* Pull down units which are bound to us recursively if enabled */
1946 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BOUND_BY])
1947 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
1948 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
1949 }
1950
1951 void unit_start_on_failure(Unit *u) {
1952 Unit *other;
1953 void *v;
1954 int r;
1955
1956 assert(u);
1957
1958 if (hashmap_size(u->dependencies[UNIT_ON_FAILURE]) <= 0)
1959 return;
1960
1961 log_unit_info(u, "Triggering OnFailure= dependencies.");
1962
1963 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_ON_FAILURE]) {
1964 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
1965
1966 r = manager_add_job(u->manager, JOB_START, other, u->on_failure_job_mode, NULL, &error, NULL);
1967 if (r < 0)
1968 log_unit_warning_errno(u, r, "Failed to enqueue OnFailure= job, ignoring: %s", bus_error_message(&error, r));
1969 }
1970 }
1971
1972 void unit_trigger_notify(Unit *u) {
1973 Unit *other;
1974 void *v;
1975
1976 assert(u);
1977
1978 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_TRIGGERED_BY])
1979 if (UNIT_VTABLE(other)->trigger_notify)
1980 UNIT_VTABLE(other)->trigger_notify(other, u);
1981 }
1982
1983 static int raise_level(int log_level, bool condition_info, bool condition_notice) {
1984 if (condition_notice && log_level > LOG_NOTICE)
1985 return LOG_NOTICE;
1986 if (condition_info && log_level > LOG_INFO)
1987 return LOG_INFO;
1988 return log_level;
1989 }
1990
1991 static int unit_log_resources(Unit *u) {
1992 struct iovec iovec[1 + _CGROUP_IP_ACCOUNTING_METRIC_MAX + _CGROUP_IO_ACCOUNTING_METRIC_MAX + 4];
1993 bool any_traffic = false, have_ip_accounting = false, any_io = false, have_io_accounting = false;
1994 _cleanup_free_ char *igress = NULL, *egress = NULL, *rr = NULL, *wr = NULL;
1995 int log_level = LOG_DEBUG; /* May be raised if resources consumed over a threshold */
1996 size_t n_message_parts = 0, n_iovec = 0;
1997 char* message_parts[1 + 2 + 2 + 1], *t;
1998 nsec_t nsec = NSEC_INFINITY;
1999 int r;
2000 const char* const ip_fields[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
2001 [CGROUP_IP_INGRESS_BYTES] = "IP_METRIC_INGRESS_BYTES",
2002 [CGROUP_IP_INGRESS_PACKETS] = "IP_METRIC_INGRESS_PACKETS",
2003 [CGROUP_IP_EGRESS_BYTES] = "IP_METRIC_EGRESS_BYTES",
2004 [CGROUP_IP_EGRESS_PACKETS] = "IP_METRIC_EGRESS_PACKETS",
2005 };
2006 const char* const io_fields[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
2007 [CGROUP_IO_READ_BYTES] = "IO_METRIC_READ_BYTES",
2008 [CGROUP_IO_WRITE_BYTES] = "IO_METRIC_WRITE_BYTES",
2009 [CGROUP_IO_READ_OPERATIONS] = "IO_METRIC_READ_OPERATIONS",
2010 [CGROUP_IO_WRITE_OPERATIONS] = "IO_METRIC_WRITE_OPERATIONS",
2011 };
2012
2013 assert(u);
2014
2015 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2016 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2017 * information and the complete data in structured fields. */
2018
2019 (void) unit_get_cpu_usage(u, &nsec);
2020 if (nsec != NSEC_INFINITY) {
2021 char buf[FORMAT_TIMESPAN_MAX] = "";
2022
2023 /* Format the CPU time for inclusion in the structured log message */
2024 if (asprintf(&t, "CPU_USAGE_NSEC=%" PRIu64, nsec) < 0) {
2025 r = log_oom();
2026 goto finish;
2027 }
2028 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2029
2030 /* Format the CPU time for inclusion in the human language message string */
2031 format_timespan(buf, sizeof(buf), nsec / NSEC_PER_USEC, USEC_PER_MSEC);
2032 t = strjoin("consumed ", buf, " CPU time");
2033 if (!t) {
2034 r = log_oom();
2035 goto finish;
2036 }
2037
2038 message_parts[n_message_parts++] = t;
2039
2040 log_level = raise_level(log_level,
2041 nsec > NOTICEWORTHY_CPU_NSEC,
2042 nsec > MENTIONWORTHY_CPU_NSEC);
2043 }
2044
2045 for (CGroupIOAccountingMetric k = 0; k < _CGROUP_IO_ACCOUNTING_METRIC_MAX; k++) {
2046 char buf[FORMAT_BYTES_MAX] = "";
2047 uint64_t value = UINT64_MAX;
2048
2049 assert(io_fields[k]);
2050
2051 (void) unit_get_io_accounting(u, k, k > 0, &value);
2052 if (value == UINT64_MAX)
2053 continue;
2054
2055 have_io_accounting = true;
2056 if (value > 0)
2057 any_io = true;
2058
2059 /* Format IO accounting data for inclusion in the structured log message */
2060 if (asprintf(&t, "%s=%" PRIu64, io_fields[k], value) < 0) {
2061 r = log_oom();
2062 goto finish;
2063 }
2064 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2065
2066 /* Format the IO accounting data for inclusion in the human language message string, but only
2067 * for the bytes counters (and not for the operations counters) */
2068 if (k == CGROUP_IO_READ_BYTES) {
2069 assert(!rr);
2070 rr = strjoin("read ", format_bytes(buf, sizeof(buf), value), " from disk");
2071 if (!rr) {
2072 r = log_oom();
2073 goto finish;
2074 }
2075 } else if (k == CGROUP_IO_WRITE_BYTES) {
2076 assert(!wr);
2077 wr = strjoin("written ", format_bytes(buf, sizeof(buf), value), " to disk");
2078 if (!wr) {
2079 r = log_oom();
2080 goto finish;
2081 }
2082 }
2083
2084 if (IN_SET(k, CGROUP_IO_READ_BYTES, CGROUP_IO_WRITE_BYTES))
2085 log_level = raise_level(log_level,
2086 value > MENTIONWORTHY_IO_BYTES,
2087 value > NOTICEWORTHY_IO_BYTES);
2088 }
2089
2090 if (have_io_accounting) {
2091 if (any_io) {
2092 if (rr)
2093 message_parts[n_message_parts++] = TAKE_PTR(rr);
2094 if (wr)
2095 message_parts[n_message_parts++] = TAKE_PTR(wr);
2096
2097 } else {
2098 char *k;
2099
2100 k = strdup("no IO");
2101 if (!k) {
2102 r = log_oom();
2103 goto finish;
2104 }
2105
2106 message_parts[n_message_parts++] = k;
2107 }
2108 }
2109
2110 for (CGroupIPAccountingMetric m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
2111 char buf[FORMAT_BYTES_MAX] = "";
2112 uint64_t value = UINT64_MAX;
2113
2114 assert(ip_fields[m]);
2115
2116 (void) unit_get_ip_accounting(u, m, &value);
2117 if (value == UINT64_MAX)
2118 continue;
2119
2120 have_ip_accounting = true;
2121 if (value > 0)
2122 any_traffic = true;
2123
2124 /* Format IP accounting data for inclusion in the structured log message */
2125 if (asprintf(&t, "%s=%" PRIu64, ip_fields[m], value) < 0) {
2126 r = log_oom();
2127 goto finish;
2128 }
2129 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2130
2131 /* Format the IP accounting data for inclusion in the human language message string, but only for the
2132 * bytes counters (and not for the packets counters) */
2133 if (m == CGROUP_IP_INGRESS_BYTES) {
2134 assert(!igress);
2135 igress = strjoin("received ", format_bytes(buf, sizeof(buf), value), " IP traffic");
2136 if (!igress) {
2137 r = log_oom();
2138 goto finish;
2139 }
2140 } else if (m == CGROUP_IP_EGRESS_BYTES) {
2141 assert(!egress);
2142 egress = strjoin("sent ", format_bytes(buf, sizeof(buf), value), " IP traffic");
2143 if (!egress) {
2144 r = log_oom();
2145 goto finish;
2146 }
2147 }
2148
2149 if (IN_SET(m, CGROUP_IP_INGRESS_BYTES, CGROUP_IP_EGRESS_BYTES))
2150 log_level = raise_level(log_level,
2151 value > MENTIONWORTHY_IP_BYTES,
2152 value > NOTICEWORTHY_IP_BYTES);
2153 }
2154
2155 if (have_ip_accounting) {
2156 if (any_traffic) {
2157 if (igress)
2158 message_parts[n_message_parts++] = TAKE_PTR(igress);
2159 if (egress)
2160 message_parts[n_message_parts++] = TAKE_PTR(egress);
2161
2162 } else {
2163 char *k;
2164
2165 k = strdup("no IP traffic");
2166 if (!k) {
2167 r = log_oom();
2168 goto finish;
2169 }
2170
2171 message_parts[n_message_parts++] = k;
2172 }
2173 }
2174
2175 /* Is there any accounting data available at all? */
2176 if (n_iovec == 0) {
2177 r = 0;
2178 goto finish;
2179 }
2180
2181 if (n_message_parts == 0)
2182 t = strjoina("MESSAGE=", u->id, ": Completed.");
2183 else {
2184 _cleanup_free_ char *joined;
2185
2186 message_parts[n_message_parts] = NULL;
2187
2188 joined = strv_join(message_parts, ", ");
2189 if (!joined) {
2190 r = log_oom();
2191 goto finish;
2192 }
2193
2194 joined[0] = ascii_toupper(joined[0]);
2195 t = strjoina("MESSAGE=", u->id, ": ", joined, ".");
2196 }
2197
2198 /* The following four fields we allocate on the stack or are static strings, we hence don't want to free them,
2199 * and hence don't increase n_iovec for them */
2200 iovec[n_iovec] = IOVEC_MAKE_STRING(t);
2201 iovec[n_iovec + 1] = IOVEC_MAKE_STRING("MESSAGE_ID=" SD_MESSAGE_UNIT_RESOURCES_STR);
2202
2203 t = strjoina(u->manager->unit_log_field, u->id);
2204 iovec[n_iovec + 2] = IOVEC_MAKE_STRING(t);
2205
2206 t = strjoina(u->manager->invocation_log_field, u->invocation_id_string);
2207 iovec[n_iovec + 3] = IOVEC_MAKE_STRING(t);
2208
2209 log_struct_iovec(log_level, iovec, n_iovec + 4);
2210 r = 0;
2211
2212 finish:
2213 for (size_t i = 0; i < n_message_parts; i++)
2214 free(message_parts[i]);
2215
2216 for (size_t i = 0; i < n_iovec; i++)
2217 free(iovec[i].iov_base);
2218
2219 return r;
2220
2221 }
2222
2223 static void unit_update_on_console(Unit *u) {
2224 bool b;
2225
2226 assert(u);
2227
2228 b = unit_needs_console(u);
2229 if (u->on_console == b)
2230 return;
2231
2232 u->on_console = b;
2233 if (b)
2234 manager_ref_console(u->manager);
2235 else
2236 manager_unref_console(u->manager);
2237 }
2238
2239 static void unit_emit_audit_start(Unit *u) {
2240 assert(u);
2241
2242 if (u->type != UNIT_SERVICE)
2243 return;
2244
2245 /* Write audit record if we have just finished starting up */
2246 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_START, true);
2247 u->in_audit = true;
2248 }
2249
2250 static void unit_emit_audit_stop(Unit *u, UnitActiveState state) {
2251 assert(u);
2252
2253 if (u->type != UNIT_SERVICE)
2254 return;
2255
2256 if (u->in_audit) {
2257 /* Write audit record if we have just finished shutting down */
2258 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_STOP, state == UNIT_INACTIVE);
2259 u->in_audit = false;
2260 } else {
2261 /* Hmm, if there was no start record written write it now, so that we always have a nice pair */
2262 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_START, state == UNIT_INACTIVE);
2263
2264 if (state == UNIT_INACTIVE)
2265 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_STOP, true);
2266 }
2267 }
2268
2269 static bool unit_process_job(Job *j, UnitActiveState ns, UnitNotifyFlags flags) {
2270 bool unexpected = false;
2271 JobResult result;
2272
2273 assert(j);
2274
2275 if (j->state == JOB_WAITING)
2276
2277 /* So we reached a different state for this job. Let's see if we can run it now if it failed previously
2278 * due to EAGAIN. */
2279 job_add_to_run_queue(j);
2280
2281 /* Let's check whether the unit's new state constitutes a finished job, or maybe contradicts a running job and
2282 * hence needs to invalidate jobs. */
2283
2284 switch (j->type) {
2285
2286 case JOB_START:
2287 case JOB_VERIFY_ACTIVE:
2288
2289 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
2290 job_finish_and_invalidate(j, JOB_DONE, true, false);
2291 else if (j->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
2292 unexpected = true;
2293
2294 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2295 if (ns == UNIT_FAILED)
2296 result = JOB_FAILED;
2297 else
2298 result = JOB_DONE;
2299
2300 job_finish_and_invalidate(j, result, true, false);
2301 }
2302 }
2303
2304 break;
2305
2306 case JOB_RELOAD:
2307 case JOB_RELOAD_OR_START:
2308 case JOB_TRY_RELOAD:
2309
2310 if (j->state == JOB_RUNNING) {
2311 if (ns == UNIT_ACTIVE)
2312 job_finish_and_invalidate(j, (flags & UNIT_NOTIFY_RELOAD_FAILURE) ? JOB_FAILED : JOB_DONE, true, false);
2313 else if (!IN_SET(ns, UNIT_ACTIVATING, UNIT_RELOADING)) {
2314 unexpected = true;
2315
2316 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2317 job_finish_and_invalidate(j, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2318 }
2319 }
2320
2321 break;
2322
2323 case JOB_STOP:
2324 case JOB_RESTART:
2325 case JOB_TRY_RESTART:
2326
2327 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2328 job_finish_and_invalidate(j, JOB_DONE, true, false);
2329 else if (j->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
2330 unexpected = true;
2331 job_finish_and_invalidate(j, JOB_FAILED, true, false);
2332 }
2333
2334 break;
2335
2336 default:
2337 assert_not_reached("Job type unknown");
2338 }
2339
2340 return unexpected;
2341 }
2342
2343 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, UnitNotifyFlags flags) {
2344 const char *reason;
2345 Manager *m;
2346
2347 assert(u);
2348 assert(os < _UNIT_ACTIVE_STATE_MAX);
2349 assert(ns < _UNIT_ACTIVE_STATE_MAX);
2350
2351 /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2352 * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2353 * remounted this function will be called too! */
2354
2355 m = u->manager;
2356
2357 /* Let's enqueue the change signal early. In case this unit has a job associated we want that this unit is in
2358 * the bus queue, so that any job change signal queued will force out the unit change signal first. */
2359 unit_add_to_dbus_queue(u);
2360
2361 /* Update systemd-oomd on the property/state change */
2362 if (os != ns) {
2363 /* Always send an update if the unit is going into an inactive state so systemd-oomd knows to stop
2364 * monitoring.
2365 * Also send an update whenever the unit goes active; this is to handle a case where an override file
2366 * sets one of the ManagedOOM*= properties to "kill", then later removes it. systemd-oomd needs to
2367 * know to stop monitoring when the unit changes from "kill" -> "auto" on daemon-reload, but we don't
2368 * have the information on the property. Thus, indiscriminately send an update. */
2369 if (UNIT_IS_INACTIVE_OR_FAILED(ns) || UNIT_IS_ACTIVE_OR_RELOADING(ns))
2370 (void) manager_varlink_send_managed_oom_update(u);
2371 }
2372
2373 /* Update timestamps for state changes */
2374 if (!MANAGER_IS_RELOADING(m)) {
2375 dual_timestamp_get(&u->state_change_timestamp);
2376
2377 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
2378 u->inactive_exit_timestamp = u->state_change_timestamp;
2379 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
2380 u->inactive_enter_timestamp = u->state_change_timestamp;
2381
2382 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
2383 u->active_enter_timestamp = u->state_change_timestamp;
2384 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
2385 u->active_exit_timestamp = u->state_change_timestamp;
2386 }
2387
2388 /* Keep track of failed units */
2389 (void) manager_update_failed_units(m, u, ns == UNIT_FAILED);
2390
2391 /* Make sure the cgroup and state files are always removed when we become inactive */
2392 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2393 SET_FLAG(u->markers,
2394 (1u << UNIT_MARKER_NEEDS_RELOAD)|(1u << UNIT_MARKER_NEEDS_RESTART),
2395 false);
2396 unit_prune_cgroup(u);
2397 unit_unlink_state_files(u);
2398 } else if (ns != os && ns == UNIT_RELOADING)
2399 SET_FLAG(u->markers, 1u << UNIT_MARKER_NEEDS_RELOAD, false);
2400
2401 unit_update_on_console(u);
2402
2403 if (!MANAGER_IS_RELOADING(m)) {
2404 bool unexpected;
2405
2406 /* Let's propagate state changes to the job */
2407 if (u->job)
2408 unexpected = unit_process_job(u->job, ns, flags);
2409 else
2410 unexpected = true;
2411
2412 /* If this state change happened without being requested by a job, then let's retroactively start or
2413 * stop dependencies. We skip that step when deserializing, since we don't want to create any
2414 * additional jobs just because something is already activated. */
2415
2416 if (unexpected) {
2417 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
2418 retroactively_start_dependencies(u);
2419 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2420 retroactively_stop_dependencies(u);
2421 }
2422
2423 /* stop unneeded units regardless if going down was expected or not */
2424 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2425 check_unneeded_dependencies(u);
2426
2427 if (ns != os && ns == UNIT_FAILED) {
2428 log_unit_debug(u, "Unit entered failed state.");
2429
2430 if (!(flags & UNIT_NOTIFY_WILL_AUTO_RESTART))
2431 unit_start_on_failure(u);
2432 }
2433
2434 if (UNIT_IS_ACTIVE_OR_RELOADING(ns) && !UNIT_IS_ACTIVE_OR_RELOADING(os)) {
2435 /* This unit just finished starting up */
2436
2437 unit_emit_audit_start(u);
2438 manager_send_unit_plymouth(m, u);
2439 }
2440
2441 if (UNIT_IS_INACTIVE_OR_FAILED(ns) && !UNIT_IS_INACTIVE_OR_FAILED(os)) {
2442 /* This unit just stopped/failed. */
2443
2444 unit_emit_audit_stop(u, ns);
2445 unit_log_resources(u);
2446 }
2447 }
2448
2449 manager_recheck_journal(m);
2450 manager_recheck_dbus(m);
2451
2452 unit_trigger_notify(u);
2453
2454 if (!MANAGER_IS_RELOADING(m)) {
2455 /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2456 unit_submit_to_stop_when_unneeded_queue(u);
2457
2458 /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens when
2459 * something BindsTo= to a Type=oneshot unit, as these units go directly from starting to inactive,
2460 * without ever entering started.) */
2461 unit_check_binds_to(u);
2462
2463 if (os != UNIT_FAILED && ns == UNIT_FAILED) {
2464 reason = strjoina("unit ", u->id, " failed");
2465 emergency_action(m, u->failure_action, 0, u->reboot_arg, unit_failure_action_exit_status(u), reason);
2466 } else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && ns == UNIT_INACTIVE) {
2467 reason = strjoina("unit ", u->id, " succeeded");
2468 emergency_action(m, u->success_action, 0, u->reboot_arg, unit_success_action_exit_status(u), reason);
2469 }
2470 }
2471
2472 unit_add_to_gc_queue(u);
2473 }
2474
2475 int unit_watch_pid(Unit *u, pid_t pid, bool exclusive) {
2476 int r;
2477
2478 assert(u);
2479 assert(pid_is_valid(pid));
2480
2481 /* Watch a specific PID */
2482
2483 /* Caller might be sure that this PID belongs to this unit only. Let's take this
2484 * opportunity to remove any stalled references to this PID as they can be created
2485 * easily (when watching a process which is not our direct child). */
2486 if (exclusive)
2487 manager_unwatch_pid(u->manager, pid);
2488
2489 r = set_ensure_allocated(&u->pids, NULL);
2490 if (r < 0)
2491 return r;
2492
2493 r = hashmap_ensure_allocated(&u->manager->watch_pids, NULL);
2494 if (r < 0)
2495 return r;
2496
2497 /* First try, let's add the unit keyed by "pid". */
2498 r = hashmap_put(u->manager->watch_pids, PID_TO_PTR(pid), u);
2499 if (r == -EEXIST) {
2500 Unit **array;
2501 bool found = false;
2502 size_t n = 0;
2503
2504 /* OK, the "pid" key is already assigned to a different unit. Let's see if the "-pid" key (which points
2505 * to an array of Units rather than just a Unit), lists us already. */
2506
2507 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2508 if (array)
2509 for (; array[n]; n++)
2510 if (array[n] == u)
2511 found = true;
2512
2513 if (found) /* Found it already? if so, do nothing */
2514 r = 0;
2515 else {
2516 Unit **new_array;
2517
2518 /* Allocate a new array */
2519 new_array = new(Unit*, n + 2);
2520 if (!new_array)
2521 return -ENOMEM;
2522
2523 memcpy_safe(new_array, array, sizeof(Unit*) * n);
2524 new_array[n] = u;
2525 new_array[n+1] = NULL;
2526
2527 /* Add or replace the old array */
2528 r = hashmap_replace(u->manager->watch_pids, PID_TO_PTR(-pid), new_array);
2529 if (r < 0) {
2530 free(new_array);
2531 return r;
2532 }
2533
2534 free(array);
2535 }
2536 } else if (r < 0)
2537 return r;
2538
2539 r = set_put(u->pids, PID_TO_PTR(pid));
2540 if (r < 0)
2541 return r;
2542
2543 return 0;
2544 }
2545
2546 void unit_unwatch_pid(Unit *u, pid_t pid) {
2547 Unit **array;
2548
2549 assert(u);
2550 assert(pid_is_valid(pid));
2551
2552 /* First let's drop the unit in case it's keyed as "pid". */
2553 (void) hashmap_remove_value(u->manager->watch_pids, PID_TO_PTR(pid), u);
2554
2555 /* Then, let's also drop the unit, in case it's in the array keyed by -pid */
2556 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2557 if (array) {
2558 /* Let's iterate through the array, dropping our own entry */
2559
2560 size_t m = 0;
2561 for (size_t n = 0; array[n]; n++)
2562 if (array[n] != u)
2563 array[m++] = array[n];
2564 array[m] = NULL;
2565
2566 if (m == 0) {
2567 /* The array is now empty, remove the entire entry */
2568 assert_se(hashmap_remove(u->manager->watch_pids, PID_TO_PTR(-pid)) == array);
2569 free(array);
2570 }
2571 }
2572
2573 (void) set_remove(u->pids, PID_TO_PTR(pid));
2574 }
2575
2576 void unit_unwatch_all_pids(Unit *u) {
2577 assert(u);
2578
2579 while (!set_isempty(u->pids))
2580 unit_unwatch_pid(u, PTR_TO_PID(set_first(u->pids)));
2581
2582 u->pids = set_free(u->pids);
2583 }
2584
2585 static void unit_tidy_watch_pids(Unit *u) {
2586 pid_t except1, except2;
2587 void *e;
2588
2589 assert(u);
2590
2591 /* Cleans dead PIDs from our list */
2592
2593 except1 = unit_main_pid(u);
2594 except2 = unit_control_pid(u);
2595
2596 SET_FOREACH(e, u->pids) {
2597 pid_t pid = PTR_TO_PID(e);
2598
2599 if (pid == except1 || pid == except2)
2600 continue;
2601
2602 if (!pid_is_unwaited(pid))
2603 unit_unwatch_pid(u, pid);
2604 }
2605 }
2606
2607 static int on_rewatch_pids_event(sd_event_source *s, void *userdata) {
2608 Unit *u = userdata;
2609
2610 assert(s);
2611 assert(u);
2612
2613 unit_tidy_watch_pids(u);
2614 unit_watch_all_pids(u);
2615
2616 /* If the PID set is empty now, then let's finish this off. */
2617 unit_synthesize_cgroup_empty_event(u);
2618
2619 return 0;
2620 }
2621
2622 int unit_enqueue_rewatch_pids(Unit *u) {
2623 int r;
2624
2625 assert(u);
2626
2627 if (!u->cgroup_path)
2628 return -ENOENT;
2629
2630 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
2631 if (r < 0)
2632 return r;
2633 if (r > 0) /* On unified we can use proper notifications */
2634 return 0;
2635
2636 /* Enqueues a low-priority job that will clean up dead PIDs from our list of PIDs to watch and subscribe to new
2637 * PIDs that might have appeared. We do this in a delayed job because the work might be quite slow, as it
2638 * involves issuing kill(pid, 0) on all processes we watch. */
2639
2640 if (!u->rewatch_pids_event_source) {
2641 _cleanup_(sd_event_source_unrefp) sd_event_source *s = NULL;
2642
2643 r = sd_event_add_defer(u->manager->event, &s, on_rewatch_pids_event, u);
2644 if (r < 0)
2645 return log_error_errno(r, "Failed to allocate event source for tidying watched PIDs: %m");
2646
2647 r = sd_event_source_set_priority(s, SD_EVENT_PRIORITY_IDLE);
2648 if (r < 0)
2649 return log_error_errno(r, "Failed to adjust priority of event source for tidying watched PIDs: %m");
2650
2651 (void) sd_event_source_set_description(s, "tidy-watch-pids");
2652
2653 u->rewatch_pids_event_source = TAKE_PTR(s);
2654 }
2655
2656 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_ONESHOT);
2657 if (r < 0)
2658 return log_error_errno(r, "Failed to enable event source for tidying watched PIDs: %m");
2659
2660 return 0;
2661 }
2662
2663 void unit_dequeue_rewatch_pids(Unit *u) {
2664 int r;
2665 assert(u);
2666
2667 if (!u->rewatch_pids_event_source)
2668 return;
2669
2670 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_OFF);
2671 if (r < 0)
2672 log_warning_errno(r, "Failed to disable event source for tidying watched PIDs, ignoring: %m");
2673
2674 u->rewatch_pids_event_source = sd_event_source_unref(u->rewatch_pids_event_source);
2675 }
2676
2677 bool unit_job_is_applicable(Unit *u, JobType j) {
2678 assert(u);
2679 assert(j >= 0 && j < _JOB_TYPE_MAX);
2680
2681 switch (j) {
2682
2683 case JOB_VERIFY_ACTIVE:
2684 case JOB_START:
2685 case JOB_NOP:
2686 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2687 * startable by us but may appear due to external events, and it thus makes sense to permit enqueuing
2688 * jobs for it. */
2689 return true;
2690
2691 case JOB_STOP:
2692 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
2693 * external events), hence it makes no sense to permit enqueuing such a request either. */
2694 return !u->perpetual;
2695
2696 case JOB_RESTART:
2697 case JOB_TRY_RESTART:
2698 return unit_can_stop(u) && unit_can_start(u);
2699
2700 case JOB_RELOAD:
2701 case JOB_TRY_RELOAD:
2702 return unit_can_reload(u);
2703
2704 case JOB_RELOAD_OR_START:
2705 return unit_can_reload(u) && unit_can_start(u);
2706
2707 default:
2708 assert_not_reached("Invalid job type");
2709 }
2710 }
2711
2712 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency) {
2713 assert(u);
2714
2715 /* Only warn about some unit types */
2716 if (!IN_SET(dependency, UNIT_CONFLICTS, UNIT_CONFLICTED_BY, UNIT_BEFORE, UNIT_AFTER, UNIT_ON_FAILURE, UNIT_TRIGGERS, UNIT_TRIGGERED_BY))
2717 return;
2718
2719 if (streq_ptr(u->id, other))
2720 log_unit_warning(u, "Dependency %s=%s dropped", unit_dependency_to_string(dependency), u->id);
2721 else
2722 log_unit_warning(u, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency), strna(other), u->id);
2723 }
2724
2725 static int unit_add_dependency_hashmap(
2726 Hashmap **h,
2727 Unit *other,
2728 UnitDependencyMask origin_mask,
2729 UnitDependencyMask destination_mask) {
2730
2731 UnitDependencyInfo info;
2732 int r;
2733
2734 assert(h);
2735 assert(other);
2736 assert(origin_mask < _UNIT_DEPENDENCY_MASK_FULL);
2737 assert(destination_mask < _UNIT_DEPENDENCY_MASK_FULL);
2738 assert(origin_mask > 0 || destination_mask > 0);
2739
2740 r = hashmap_ensure_allocated(h, NULL);
2741 if (r < 0)
2742 return r;
2743
2744 assert_cc(sizeof(void*) == sizeof(info));
2745
2746 info.data = hashmap_get(*h, other);
2747 if (info.data) {
2748 /* Entry already exists. Add in our mask. */
2749
2750 if (FLAGS_SET(origin_mask, info.origin_mask) &&
2751 FLAGS_SET(destination_mask, info.destination_mask))
2752 return 0; /* NOP */
2753
2754 info.origin_mask |= origin_mask;
2755 info.destination_mask |= destination_mask;
2756
2757 r = hashmap_update(*h, other, info.data);
2758 } else {
2759 info = (UnitDependencyInfo) {
2760 .origin_mask = origin_mask,
2761 .destination_mask = destination_mask,
2762 };
2763
2764 r = hashmap_put(*h, other, info.data);
2765 }
2766 if (r < 0)
2767 return r;
2768
2769 return 1;
2770 }
2771
2772 int unit_add_dependency(
2773 Unit *u,
2774 UnitDependency d,
2775 Unit *other,
2776 bool add_reference,
2777 UnitDependencyMask mask) {
2778
2779 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
2780 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
2781 [UNIT_WANTS] = UNIT_WANTED_BY,
2782 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
2783 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
2784 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
2785 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
2786 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
2787 [UNIT_WANTED_BY] = UNIT_WANTS,
2788 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
2789 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
2790 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
2791 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
2792 [UNIT_BEFORE] = UNIT_AFTER,
2793 [UNIT_AFTER] = UNIT_BEFORE,
2794 [UNIT_ON_FAILURE] = _UNIT_DEPENDENCY_INVALID,
2795 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
2796 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
2797 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
2798 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
2799 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
2800 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
2801 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF,
2802 };
2803 Unit *original_u = u, *original_other = other;
2804 int r;
2805 /* Helper to know whether sending a notification is necessary or not:
2806 * if the dependency is already there, no need to notify! */
2807 bool noop = true;
2808
2809 assert(u);
2810 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
2811 assert(other);
2812
2813 u = unit_follow_merge(u);
2814 other = unit_follow_merge(other);
2815
2816 /* We won't allow dependencies on ourselves. We will not
2817 * consider them an error however. */
2818 if (u == other) {
2819 maybe_warn_about_dependency(original_u, original_other->id, d);
2820 return 0;
2821 }
2822
2823 /* Note that ordering a device unit after a unit is permitted since it
2824 * allows to start its job running timeout at a specific time. */
2825 if (d == UNIT_BEFORE && other->type == UNIT_DEVICE) {
2826 log_unit_warning(u, "Dependency Before=%s ignored (.device units cannot be delayed)", other->id);
2827 return 0;
2828 }
2829
2830 if (d == UNIT_ON_FAILURE && !UNIT_VTABLE(u)->can_fail) {
2831 log_unit_warning(u, "Requested dependency OnFailure=%s ignored (%s units cannot fail).", other->id, unit_type_to_string(u->type));
2832 return 0;
2833 }
2834
2835 if (d == UNIT_TRIGGERS && !UNIT_VTABLE(u)->can_trigger)
2836 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
2837 "Requested dependency Triggers=%s refused (%s units cannot trigger other units).", other->id, unit_type_to_string(u->type));
2838 if (d == UNIT_TRIGGERED_BY && !UNIT_VTABLE(other)->can_trigger)
2839 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
2840 "Requested dependency TriggeredBy=%s refused (%s units cannot trigger other units).", other->id, unit_type_to_string(other->type));
2841
2842 r = unit_add_dependency_hashmap(u->dependencies + d, other, mask, 0);
2843 if (r < 0)
2844 return r;
2845 else if (r > 0)
2846 noop = false;
2847
2848 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
2849 r = unit_add_dependency_hashmap(other->dependencies + inverse_table[d], u, 0, mask);
2850 if (r < 0)
2851 return r;
2852 else if (r > 0)
2853 noop = false;
2854 }
2855
2856 if (add_reference) {
2857 r = unit_add_dependency_hashmap(u->dependencies + UNIT_REFERENCES, other, mask, 0);
2858 if (r < 0)
2859 return r;
2860 else if (r > 0)
2861 noop = false;
2862
2863 r = unit_add_dependency_hashmap(other->dependencies + UNIT_REFERENCED_BY, u, 0, mask);
2864 if (r < 0)
2865 return r;
2866 else if (r > 0)
2867 noop = false;
2868 }
2869
2870 if (!noop)
2871 unit_add_to_dbus_queue(u);
2872 return 0;
2873 }
2874
2875 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference, UnitDependencyMask mask) {
2876 int r;
2877
2878 assert(u);
2879
2880 r = unit_add_dependency(u, d, other, add_reference, mask);
2881 if (r < 0)
2882 return r;
2883
2884 return unit_add_dependency(u, e, other, add_reference, mask);
2885 }
2886
2887 static int resolve_template(Unit *u, const char *name, char **buf, const char **ret) {
2888 int r;
2889
2890 assert(u);
2891 assert(name);
2892 assert(buf);
2893 assert(ret);
2894
2895 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
2896 *buf = NULL;
2897 *ret = name;
2898 return 0;
2899 }
2900
2901 if (u->instance)
2902 r = unit_name_replace_instance(name, u->instance, buf);
2903 else {
2904 _cleanup_free_ char *i = NULL;
2905
2906 r = unit_name_to_prefix(u->id, &i);
2907 if (r < 0)
2908 return r;
2909
2910 r = unit_name_replace_instance(name, i, buf);
2911 }
2912 if (r < 0)
2913 return r;
2914
2915 *ret = *buf;
2916 return 0;
2917 }
2918
2919 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, bool add_reference, UnitDependencyMask mask) {
2920 _cleanup_free_ char *buf = NULL;
2921 Unit *other;
2922 int r;
2923
2924 assert(u);
2925 assert(name);
2926
2927 r = resolve_template(u, name, &buf, &name);
2928 if (r < 0)
2929 return r;
2930
2931 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
2932 if (r < 0)
2933 return r;
2934
2935 return unit_add_dependency(u, d, other, add_reference, mask);
2936 }
2937
2938 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, bool add_reference, UnitDependencyMask mask) {
2939 _cleanup_free_ char *buf = NULL;
2940 Unit *other;
2941 int r;
2942
2943 assert(u);
2944 assert(name);
2945
2946 r = resolve_template(u, name, &buf, &name);
2947 if (r < 0)
2948 return r;
2949
2950 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
2951 if (r < 0)
2952 return r;
2953
2954 return unit_add_two_dependencies(u, d, e, other, add_reference, mask);
2955 }
2956
2957 int set_unit_path(const char *p) {
2958 /* This is mostly for debug purposes */
2959 if (setenv("SYSTEMD_UNIT_PATH", p, 1) < 0)
2960 return -errno;
2961
2962 return 0;
2963 }
2964
2965 char *unit_dbus_path(Unit *u) {
2966 assert(u);
2967
2968 if (!u->id)
2969 return NULL;
2970
2971 return unit_dbus_path_from_name(u->id);
2972 }
2973
2974 char *unit_dbus_path_invocation_id(Unit *u) {
2975 assert(u);
2976
2977 if (sd_id128_is_null(u->invocation_id))
2978 return NULL;
2979
2980 return unit_dbus_path_from_name(u->invocation_id_string);
2981 }
2982
2983 int unit_set_invocation_id(Unit *u, sd_id128_t id) {
2984 int r;
2985
2986 assert(u);
2987
2988 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
2989
2990 if (sd_id128_equal(u->invocation_id, id))
2991 return 0;
2992
2993 if (!sd_id128_is_null(u->invocation_id))
2994 (void) hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
2995
2996 if (sd_id128_is_null(id)) {
2997 r = 0;
2998 goto reset;
2999 }
3000
3001 r = hashmap_ensure_allocated(&u->manager->units_by_invocation_id, &id128_hash_ops);
3002 if (r < 0)
3003 goto reset;
3004
3005 u->invocation_id = id;
3006 sd_id128_to_string(id, u->invocation_id_string);
3007
3008 r = hashmap_put(u->manager->units_by_invocation_id, &u->invocation_id, u);
3009 if (r < 0)
3010 goto reset;
3011
3012 return 0;
3013
3014 reset:
3015 u->invocation_id = SD_ID128_NULL;
3016 u->invocation_id_string[0] = 0;
3017 return r;
3018 }
3019
3020 int unit_set_slice(Unit *u, Unit *slice) {
3021 assert(u);
3022 assert(slice);
3023
3024 /* Sets the unit slice if it has not been set before. Is extra
3025 * careful, to only allow this for units that actually have a
3026 * cgroup context. Also, we don't allow to set this for slices
3027 * (since the parent slice is derived from the name). Make
3028 * sure the unit we set is actually a slice. */
3029
3030 if (!UNIT_HAS_CGROUP_CONTEXT(u))
3031 return -EOPNOTSUPP;
3032
3033 if (u->type == UNIT_SLICE)
3034 return -EINVAL;
3035
3036 if (unit_active_state(u) != UNIT_INACTIVE)
3037 return -EBUSY;
3038
3039 if (slice->type != UNIT_SLICE)
3040 return -EINVAL;
3041
3042 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
3043 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
3044 return -EPERM;
3045
3046 if (UNIT_DEREF(u->slice) == slice)
3047 return 0;
3048
3049 /* Disallow slice changes if @u is already bound to cgroups */
3050 if (UNIT_ISSET(u->slice) && u->cgroup_realized)
3051 return -EBUSY;
3052
3053 unit_ref_set(&u->slice, u, slice);
3054 return 1;
3055 }
3056
3057 int unit_set_default_slice(Unit *u) {
3058 const char *slice_name;
3059 Unit *slice;
3060 int r;
3061
3062 assert(u);
3063
3064 if (UNIT_ISSET(u->slice))
3065 return 0;
3066
3067 if (u->instance) {
3068 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
3069
3070 /* Implicitly place all instantiated units in their
3071 * own per-template slice */
3072
3073 r = unit_name_to_prefix(u->id, &prefix);
3074 if (r < 0)
3075 return r;
3076
3077 /* The prefix is already escaped, but it might include
3078 * "-" which has a special meaning for slice units,
3079 * hence escape it here extra. */
3080 escaped = unit_name_escape(prefix);
3081 if (!escaped)
3082 return -ENOMEM;
3083
3084 if (MANAGER_IS_SYSTEM(u->manager))
3085 slice_name = strjoina("system-", escaped, ".slice");
3086 else
3087 slice_name = strjoina("app-", escaped, ".slice");
3088
3089 } else if (unit_is_extrinsic(u))
3090 /* Keep all extrinsic units (e.g. perpetual units and swap and mount units in user mode) in
3091 * the root slice. They don't really belong in one of the subslices. */
3092 slice_name = SPECIAL_ROOT_SLICE;
3093
3094 else if (MANAGER_IS_SYSTEM(u->manager))
3095 slice_name = SPECIAL_SYSTEM_SLICE;
3096 else
3097 slice_name = SPECIAL_APP_SLICE;
3098
3099 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
3100 if (r < 0)
3101 return r;
3102
3103 return unit_set_slice(u, slice);
3104 }
3105
3106 const char *unit_slice_name(Unit *u) {
3107 assert(u);
3108
3109 if (!UNIT_ISSET(u->slice))
3110 return NULL;
3111
3112 return UNIT_DEREF(u->slice)->id;
3113 }
3114
3115 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
3116 _cleanup_free_ char *t = NULL;
3117 int r;
3118
3119 assert(u);
3120 assert(type);
3121 assert(_found);
3122
3123 r = unit_name_change_suffix(u->id, type, &t);
3124 if (r < 0)
3125 return r;
3126 if (unit_has_name(u, t))
3127 return -EINVAL;
3128
3129 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
3130 assert(r < 0 || *_found != u);
3131 return r;
3132 }
3133
3134 static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3135 const char *new_owner;
3136 Unit *u = userdata;
3137 int r;
3138
3139 assert(message);
3140 assert(u);
3141
3142 r = sd_bus_message_read(message, "sss", NULL, NULL, &new_owner);
3143 if (r < 0) {
3144 bus_log_parse_error(r);
3145 return 0;
3146 }
3147
3148 if (UNIT_VTABLE(u)->bus_name_owner_change)
3149 UNIT_VTABLE(u)->bus_name_owner_change(u, empty_to_null(new_owner));
3150
3151 return 0;
3152 }
3153
3154 static int get_name_owner_handler(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3155 const sd_bus_error *e;
3156 const char *new_owner;
3157 Unit *u = userdata;
3158 int r;
3159
3160 assert(message);
3161 assert(u);
3162
3163 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3164
3165 e = sd_bus_message_get_error(message);
3166 if (e) {
3167 if (!sd_bus_error_has_name(e, "org.freedesktop.DBus.Error.NameHasNoOwner"))
3168 log_unit_error(u, "Unexpected error response from GetNameOwner(): %s", e->message);
3169
3170 new_owner = NULL;
3171 } else {
3172 r = sd_bus_message_read(message, "s", &new_owner);
3173 if (r < 0)
3174 return bus_log_parse_error(r);
3175
3176 assert(!isempty(new_owner));
3177 }
3178
3179 if (UNIT_VTABLE(u)->bus_name_owner_change)
3180 UNIT_VTABLE(u)->bus_name_owner_change(u, new_owner);
3181
3182 return 0;
3183 }
3184
3185 int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
3186 const char *match;
3187 int r;
3188
3189 assert(u);
3190 assert(bus);
3191 assert(name);
3192
3193 if (u->match_bus_slot || u->get_name_owner_slot)
3194 return -EBUSY;
3195
3196 match = strjoina("type='signal',"
3197 "sender='org.freedesktop.DBus',"
3198 "path='/org/freedesktop/DBus',"
3199 "interface='org.freedesktop.DBus',"
3200 "member='NameOwnerChanged',"
3201 "arg0='", name, "'");
3202
3203 r = sd_bus_add_match_async(bus, &u->match_bus_slot, match, signal_name_owner_changed, NULL, u);
3204 if (r < 0)
3205 return r;
3206
3207 r = sd_bus_call_method_async(
3208 bus,
3209 &u->get_name_owner_slot,
3210 "org.freedesktop.DBus",
3211 "/org/freedesktop/DBus",
3212 "org.freedesktop.DBus",
3213 "GetNameOwner",
3214 get_name_owner_handler,
3215 u,
3216 "s", name);
3217 if (r < 0) {
3218 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3219 return r;
3220 }
3221
3222 log_unit_debug(u, "Watching D-Bus name '%s'.", name);
3223 return 0;
3224 }
3225
3226 int unit_watch_bus_name(Unit *u, const char *name) {
3227 int r;
3228
3229 assert(u);
3230 assert(name);
3231
3232 /* Watch a specific name on the bus. We only support one unit
3233 * watching each name for now. */
3234
3235 if (u->manager->api_bus) {
3236 /* If the bus is already available, install the match directly.
3237 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3238 r = unit_install_bus_match(u, u->manager->api_bus, name);
3239 if (r < 0)
3240 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
3241 }
3242
3243 r = hashmap_put(u->manager->watch_bus, name, u);
3244 if (r < 0) {
3245 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3246 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3247 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
3248 }
3249
3250 return 0;
3251 }
3252
3253 void unit_unwatch_bus_name(Unit *u, const char *name) {
3254 assert(u);
3255 assert(name);
3256
3257 (void) hashmap_remove_value(u->manager->watch_bus, name, u);
3258 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3259 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3260 }
3261
3262 bool unit_can_serialize(Unit *u) {
3263 assert(u);
3264
3265 return UNIT_VTABLE(u)->serialize && UNIT_VTABLE(u)->deserialize_item;
3266 }
3267
3268 int unit_add_node_dependency(Unit *u, const char *what, UnitDependency dep, UnitDependencyMask mask) {
3269 _cleanup_free_ char *e = NULL;
3270 Unit *device;
3271 int r;
3272
3273 assert(u);
3274
3275 /* Adds in links to the device node that this unit is based on */
3276 if (isempty(what))
3277 return 0;
3278
3279 if (!is_device_path(what))
3280 return 0;
3281
3282 /* When device units aren't supported (such as in a container), don't create dependencies on them. */
3283 if (!unit_type_supported(UNIT_DEVICE))
3284 return 0;
3285
3286 r = unit_name_from_path(what, ".device", &e);
3287 if (r < 0)
3288 return r;
3289
3290 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
3291 if (r < 0)
3292 return r;
3293
3294 if (dep == UNIT_REQUIRES && device_shall_be_bound_by(device, u))
3295 dep = UNIT_BINDS_TO;
3296
3297 return unit_add_two_dependencies(u, UNIT_AFTER,
3298 MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
3299 device, true, mask);
3300 }
3301
3302 int unit_add_blockdev_dependency(Unit *u, const char *what, UnitDependencyMask mask) {
3303 _cleanup_free_ char *escaped = NULL, *target = NULL;
3304 int r;
3305
3306 assert(u);
3307
3308 if (isempty(what))
3309 return 0;
3310
3311 if (!path_startswith(what, "/dev/"))
3312 return 0;
3313
3314 /* If we don't support devices, then also don't bother with blockdev@.target */
3315 if (!unit_type_supported(UNIT_DEVICE))
3316 return 0;
3317
3318 r = unit_name_path_escape(what, &escaped);
3319 if (r < 0)
3320 return r;
3321
3322 r = unit_name_build("blockdev", escaped, ".target", &target);
3323 if (r < 0)
3324 return r;
3325
3326 return unit_add_dependency_by_name(u, UNIT_AFTER, target, true, mask);
3327 }
3328
3329 int unit_coldplug(Unit *u) {
3330 int r = 0, q;
3331 char **i;
3332 Job *uj;
3333
3334 assert(u);
3335
3336 /* Make sure we don't enter a loop, when coldplugging recursively. */
3337 if (u->coldplugged)
3338 return 0;
3339
3340 u->coldplugged = true;
3341
3342 STRV_FOREACH(i, u->deserialized_refs) {
3343 q = bus_unit_track_add_name(u, *i);
3344 if (q < 0 && r >= 0)
3345 r = q;
3346 }
3347 u->deserialized_refs = strv_free(u->deserialized_refs);
3348
3349 if (UNIT_VTABLE(u)->coldplug) {
3350 q = UNIT_VTABLE(u)->coldplug(u);
3351 if (q < 0 && r >= 0)
3352 r = q;
3353 }
3354
3355 uj = u->job ?: u->nop_job;
3356 if (uj) {
3357 q = job_coldplug(uj);
3358 if (q < 0 && r >= 0)
3359 r = q;
3360 }
3361
3362 return r;
3363 }
3364
3365 void unit_catchup(Unit *u) {
3366 assert(u);
3367
3368 if (UNIT_VTABLE(u)->catchup)
3369 UNIT_VTABLE(u)->catchup(u);
3370 }
3371
3372 static bool fragment_mtime_newer(const char *path, usec_t mtime, bool path_masked) {
3373 struct stat st;
3374
3375 if (!path)
3376 return false;
3377
3378 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3379 * are never out-of-date. */
3380 if (PATH_STARTSWITH_SET(path, "/proc", "/sys"))
3381 return false;
3382
3383 if (stat(path, &st) < 0)
3384 /* What, cannot access this anymore? */
3385 return true;
3386
3387 if (path_masked)
3388 /* For masked files check if they are still so */
3389 return !null_or_empty(&st);
3390 else
3391 /* For non-empty files check the mtime */
3392 return timespec_load(&st.st_mtim) > mtime;
3393
3394 return false;
3395 }
3396
3397 bool unit_need_daemon_reload(Unit *u) {
3398 _cleanup_strv_free_ char **t = NULL;
3399 char **path;
3400
3401 assert(u);
3402
3403 /* For unit files, we allow masking… */
3404 if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime,
3405 u->load_state == UNIT_MASKED))
3406 return true;
3407
3408 /* Source paths should not be masked… */
3409 if (fragment_mtime_newer(u->source_path, u->source_mtime, false))
3410 return true;
3411
3412 if (u->load_state == UNIT_LOADED)
3413 (void) unit_find_dropin_paths(u, &t);
3414 if (!strv_equal(u->dropin_paths, t))
3415 return true;
3416
3417 /* … any drop-ins that are masked are simply omitted from the list. */
3418 STRV_FOREACH(path, u->dropin_paths)
3419 if (fragment_mtime_newer(*path, u->dropin_mtime, false))
3420 return true;
3421
3422 return false;
3423 }
3424
3425 void unit_reset_failed(Unit *u) {
3426 assert(u);
3427
3428 if (UNIT_VTABLE(u)->reset_failed)
3429 UNIT_VTABLE(u)->reset_failed(u);
3430
3431 ratelimit_reset(&u->start_ratelimit);
3432 u->start_limit_hit = false;
3433 }
3434
3435 Unit *unit_following(Unit *u) {
3436 assert(u);
3437
3438 if (UNIT_VTABLE(u)->following)
3439 return UNIT_VTABLE(u)->following(u);
3440
3441 return NULL;
3442 }
3443
3444 bool unit_stop_pending(Unit *u) {
3445 assert(u);
3446
3447 /* This call does check the current state of the unit. It's
3448 * hence useful to be called from state change calls of the
3449 * unit itself, where the state isn't updated yet. This is
3450 * different from unit_inactive_or_pending() which checks both
3451 * the current state and for a queued job. */
3452
3453 return unit_has_job_type(u, JOB_STOP);
3454 }
3455
3456 bool unit_inactive_or_pending(Unit *u) {
3457 assert(u);
3458
3459 /* Returns true if the unit is inactive or going down */
3460
3461 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
3462 return true;
3463
3464 if (unit_stop_pending(u))
3465 return true;
3466
3467 return false;
3468 }
3469
3470 bool unit_active_or_pending(Unit *u) {
3471 assert(u);
3472
3473 /* Returns true if the unit is active or going up */
3474
3475 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
3476 return true;
3477
3478 if (u->job &&
3479 IN_SET(u->job->type, JOB_START, JOB_RELOAD_OR_START, JOB_RESTART))
3480 return true;
3481
3482 return false;
3483 }
3484
3485 bool unit_will_restart_default(Unit *u) {
3486 assert(u);
3487
3488 return unit_has_job_type(u, JOB_START);
3489 }
3490
3491 bool unit_will_restart(Unit *u) {
3492 assert(u);
3493
3494 if (!UNIT_VTABLE(u)->will_restart)
3495 return false;
3496
3497 return UNIT_VTABLE(u)->will_restart(u);
3498 }
3499
3500 int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) {
3501 assert(u);
3502 assert(w >= 0 && w < _KILL_WHO_MAX);
3503 assert(SIGNAL_VALID(signo));
3504
3505 if (!UNIT_VTABLE(u)->kill)
3506 return -EOPNOTSUPP;
3507
3508 return UNIT_VTABLE(u)->kill(u, w, signo, error);
3509 }
3510
3511 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
3512 _cleanup_set_free_ Set *pid_set = NULL;
3513 int r;
3514
3515 pid_set = set_new(NULL);
3516 if (!pid_set)
3517 return NULL;
3518
3519 /* Exclude the main/control pids from being killed via the cgroup */
3520 if (main_pid > 0) {
3521 r = set_put(pid_set, PID_TO_PTR(main_pid));
3522 if (r < 0)
3523 return NULL;
3524 }
3525
3526 if (control_pid > 0) {
3527 r = set_put(pid_set, PID_TO_PTR(control_pid));
3528 if (r < 0)
3529 return NULL;
3530 }
3531
3532 return TAKE_PTR(pid_set);
3533 }
3534
3535 static int kill_common_log(pid_t pid, int signo, void *userdata) {
3536 _cleanup_free_ char *comm = NULL;
3537 Unit *u = userdata;
3538
3539 assert(u);
3540
3541 (void) get_process_comm(pid, &comm);
3542 log_unit_info(u, "Sending signal SIG%s to process " PID_FMT " (%s) on client request.",
3543 signal_to_string(signo), pid, strna(comm));
3544
3545 return 1;
3546 }
3547
3548 int unit_kill_common(
3549 Unit *u,
3550 KillWho who,
3551 int signo,
3552 pid_t main_pid,
3553 pid_t control_pid,
3554 sd_bus_error *error) {
3555
3556 int r = 0;
3557 bool killed = false;
3558
3559 /* This is the common implementation for explicit user-requested killing of unit processes, shared by
3560 * various unit types. Do not confuse with unit_kill_context(), which is what we use when we want to
3561 * stop a service ourselves. */
3562
3563 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
3564 if (main_pid < 0)
3565 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
3566 if (main_pid == 0)
3567 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
3568 }
3569
3570 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
3571 if (control_pid < 0)
3572 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
3573 if (control_pid == 0)
3574 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
3575 }
3576
3577 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL))
3578 if (control_pid > 0) {
3579 _cleanup_free_ char *comm = NULL;
3580 (void) get_process_comm(control_pid, &comm);
3581
3582 if (kill(control_pid, signo) < 0) {
3583 /* Report this failure both to the logs and to the client */
3584 sd_bus_error_set_errnof(
3585 error, errno,
3586 "Failed to send signal SIG%s to control process " PID_FMT " (%s): %m",
3587 signal_to_string(signo), control_pid, strna(comm));
3588 r = log_unit_warning_errno(
3589 u, errno,
3590 "Failed to send signal SIG%s to control process " PID_FMT " (%s) on client request: %m",
3591 signal_to_string(signo), control_pid, strna(comm));
3592 } else {
3593 log_unit_info(u, "Sent signal SIG%s to control process " PID_FMT " (%s) on client request.",
3594 signal_to_string(signo), control_pid, strna(comm));
3595 killed = true;
3596 }
3597 }
3598
3599 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL))
3600 if (main_pid > 0) {
3601 _cleanup_free_ char *comm = NULL;
3602 (void) get_process_comm(main_pid, &comm);
3603
3604 if (kill(main_pid, signo) < 0) {
3605 if (r == 0)
3606 sd_bus_error_set_errnof(
3607 error, errno,
3608 "Failed to send signal SIG%s to main process " PID_FMT " (%s): %m",
3609 signal_to_string(signo), main_pid, strna(comm));
3610
3611 r = log_unit_warning_errno(
3612 u, errno,
3613 "Failed to send signal SIG%s to main process " PID_FMT " (%s) on client request: %m",
3614 signal_to_string(signo), main_pid, strna(comm));
3615 } else {
3616 log_unit_info(u, "Sent signal SIG%s to main process " PID_FMT " (%s) on client request.",
3617 signal_to_string(signo), main_pid, strna(comm));
3618 killed = true;
3619 }
3620 }
3621
3622 if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path) {
3623 _cleanup_set_free_ Set *pid_set = NULL;
3624 int q;
3625
3626 /* Exclude the main/control pids from being killed via the cgroup */
3627 pid_set = unit_pid_set(main_pid, control_pid);
3628 if (!pid_set)
3629 return log_oom();
3630
3631 q = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, 0, pid_set, kill_common_log, u);
3632 if (q < 0) {
3633 if (!IN_SET(q, -ESRCH, -ENOENT)) {
3634 if (r == 0)
3635 sd_bus_error_set_errnof(
3636 error, q,
3637 "Failed to send signal SIG%s to auxiliary processes: %m",
3638 signal_to_string(signo));
3639
3640 r = log_unit_warning_errno(
3641 u, q,
3642 "Failed to send signal SIG%s to auxiliary processes on client request: %m",
3643 signal_to_string(signo));
3644 }
3645 } else
3646 killed = true;
3647 }
3648
3649 /* If the "fail" versions of the operation are requested, then complain if the set of processes we killed is empty */
3650 if (r == 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL, KILL_MAIN_FAIL))
3651 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No matching processes to kill");
3652
3653 return r;
3654 }
3655
3656 int unit_following_set(Unit *u, Set **s) {
3657 assert(u);
3658 assert(s);
3659
3660 if (UNIT_VTABLE(u)->following_set)
3661 return UNIT_VTABLE(u)->following_set(u, s);
3662
3663 *s = NULL;
3664 return 0;
3665 }
3666
3667 UnitFileState unit_get_unit_file_state(Unit *u) {
3668 int r;
3669
3670 assert(u);
3671
3672 if (u->unit_file_state < 0 && u->fragment_path) {
3673 r = unit_file_get_state(
3674 u->manager->unit_file_scope,
3675 NULL,
3676 u->id,
3677 &u->unit_file_state);
3678 if (r < 0)
3679 u->unit_file_state = UNIT_FILE_BAD;
3680 }
3681
3682 return u->unit_file_state;
3683 }
3684
3685 int unit_get_unit_file_preset(Unit *u) {
3686 assert(u);
3687
3688 if (u->unit_file_preset < 0 && u->fragment_path)
3689 u->unit_file_preset = unit_file_query_preset(
3690 u->manager->unit_file_scope,
3691 NULL,
3692 basename(u->fragment_path),
3693 NULL);
3694
3695 return u->unit_file_preset;
3696 }
3697
3698 Unit* unit_ref_set(UnitRef *ref, Unit *source, Unit *target) {
3699 assert(ref);
3700 assert(source);
3701 assert(target);
3702
3703 if (ref->target)
3704 unit_ref_unset(ref);
3705
3706 ref->source = source;
3707 ref->target = target;
3708 LIST_PREPEND(refs_by_target, target->refs_by_target, ref);
3709 return target;
3710 }
3711
3712 void unit_ref_unset(UnitRef *ref) {
3713 assert(ref);
3714
3715 if (!ref->target)
3716 return;
3717
3718 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
3719 * be unreferenced now. */
3720 unit_add_to_gc_queue(ref->target);
3721
3722 LIST_REMOVE(refs_by_target, ref->target->refs_by_target, ref);
3723 ref->source = ref->target = NULL;
3724 }
3725
3726 static int user_from_unit_name(Unit *u, char **ret) {
3727
3728 static const uint8_t hash_key[] = {
3729 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
3730 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
3731 };
3732
3733 _cleanup_free_ char *n = NULL;
3734 int r;
3735
3736 r = unit_name_to_prefix(u->id, &n);
3737 if (r < 0)
3738 return r;
3739
3740 if (valid_user_group_name(n, 0)) {
3741 *ret = TAKE_PTR(n);
3742 return 0;
3743 }
3744
3745 /* If we can't use the unit name as a user name, then let's hash it and use that */
3746 if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
3747 return -ENOMEM;
3748
3749 return 0;
3750 }
3751
3752 int unit_patch_contexts(Unit *u) {
3753 CGroupContext *cc;
3754 ExecContext *ec;
3755 int r;
3756
3757 assert(u);
3758
3759 /* Patch in the manager defaults into the exec and cgroup
3760 * contexts, _after_ the rest of the settings have been
3761 * initialized */
3762
3763 ec = unit_get_exec_context(u);
3764 if (ec) {
3765 /* This only copies in the ones that need memory */
3766 for (unsigned i = 0; i < _RLIMIT_MAX; i++)
3767 if (u->manager->rlimit[i] && !ec->rlimit[i]) {
3768 ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
3769 if (!ec->rlimit[i])
3770 return -ENOMEM;
3771 }
3772
3773 if (MANAGER_IS_USER(u->manager) &&
3774 !ec->working_directory) {
3775
3776 r = get_home_dir(&ec->working_directory);
3777 if (r < 0)
3778 return r;
3779
3780 /* Allow user services to run, even if the
3781 * home directory is missing */
3782 ec->working_directory_missing_ok = true;
3783 }
3784
3785 if (ec->private_devices)
3786 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_MKNOD) | (UINT64_C(1) << CAP_SYS_RAWIO));
3787
3788 if (ec->protect_kernel_modules)
3789 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYS_MODULE);
3790
3791 if (ec->protect_kernel_logs)
3792 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYSLOG);
3793
3794 if (ec->protect_clock)
3795 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_SYS_TIME) | (UINT64_C(1) << CAP_WAKE_ALARM));
3796
3797 if (ec->dynamic_user) {
3798 if (!ec->user) {
3799 r = user_from_unit_name(u, &ec->user);
3800 if (r < 0)
3801 return r;
3802 }
3803
3804 if (!ec->group) {
3805 ec->group = strdup(ec->user);
3806 if (!ec->group)
3807 return -ENOMEM;
3808 }
3809
3810 /* If the dynamic user option is on, let's make sure that the unit can't leave its
3811 * UID/GID around in the file system or on IPC objects. Hence enforce a strict
3812 * sandbox. */
3813
3814 ec->private_tmp = true;
3815 ec->remove_ipc = true;
3816 ec->protect_system = PROTECT_SYSTEM_STRICT;
3817 if (ec->protect_home == PROTECT_HOME_NO)
3818 ec->protect_home = PROTECT_HOME_READ_ONLY;
3819
3820 /* Make sure this service can neither benefit from SUID/SGID binaries nor create
3821 * them. */
3822 ec->no_new_privileges = true;
3823 ec->restrict_suid_sgid = true;
3824 }
3825 }
3826
3827 cc = unit_get_cgroup_context(u);
3828 if (cc && ec) {
3829
3830 if (ec->private_devices &&
3831 cc->device_policy == CGROUP_DEVICE_POLICY_AUTO)
3832 cc->device_policy = CGROUP_DEVICE_POLICY_CLOSED;
3833
3834 if ((ec->root_image || !LIST_IS_EMPTY(ec->mount_images)) &&
3835 (cc->device_policy != CGROUP_DEVICE_POLICY_AUTO || cc->device_allow)) {
3836 const char *p;
3837
3838 /* When RootImage= or MountImages= is specified, the following devices are touched. */
3839 FOREACH_STRING(p, "/dev/loop-control", "/dev/mapper/control") {
3840 r = cgroup_add_device_allow(cc, p, "rw");
3841 if (r < 0)
3842 return r;
3843 }
3844 FOREACH_STRING(p, "block-loop", "block-blkext", "block-device-mapper") {
3845 r = cgroup_add_device_allow(cc, p, "rwm");
3846 if (r < 0)
3847 return r;
3848 }
3849
3850 /* Make sure "block-loop" can be resolved, i.e. make sure "loop" shows up in /proc/devices.
3851 * Same for mapper and verity. */
3852 FOREACH_STRING(p, "modprobe@loop.service", "modprobe@dm_mod.service", "modprobe@dm_verity.service") {
3853 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_WANTS, p, true, UNIT_DEPENDENCY_FILE);
3854 if (r < 0)
3855 return r;
3856 }
3857 }
3858
3859 if (ec->protect_clock) {
3860 r = cgroup_add_device_allow(cc, "char-rtc", "r");
3861 if (r < 0)
3862 return r;
3863 }
3864 }
3865
3866 return 0;
3867 }
3868
3869 ExecContext *unit_get_exec_context(Unit *u) {
3870 size_t offset;
3871 assert(u);
3872
3873 if (u->type < 0)
3874 return NULL;
3875
3876 offset = UNIT_VTABLE(u)->exec_context_offset;
3877 if (offset <= 0)
3878 return NULL;
3879
3880 return (ExecContext*) ((uint8_t*) u + offset);
3881 }
3882
3883 KillContext *unit_get_kill_context(Unit *u) {
3884 size_t offset;
3885 assert(u);
3886
3887 if (u->type < 0)
3888 return NULL;
3889
3890 offset = UNIT_VTABLE(u)->kill_context_offset;
3891 if (offset <= 0)
3892 return NULL;
3893
3894 return (KillContext*) ((uint8_t*) u + offset);
3895 }
3896
3897 CGroupContext *unit_get_cgroup_context(Unit *u) {
3898 size_t offset;
3899
3900 if (u->type < 0)
3901 return NULL;
3902
3903 offset = UNIT_VTABLE(u)->cgroup_context_offset;
3904 if (offset <= 0)
3905 return NULL;
3906
3907 return (CGroupContext*) ((uint8_t*) u + offset);
3908 }
3909
3910 ExecRuntime *unit_get_exec_runtime(Unit *u) {
3911 size_t offset;
3912
3913 if (u->type < 0)
3914 return NULL;
3915
3916 offset = UNIT_VTABLE(u)->exec_runtime_offset;
3917 if (offset <= 0)
3918 return NULL;
3919
3920 return *(ExecRuntime**) ((uint8_t*) u + offset);
3921 }
3922
3923 static const char* unit_drop_in_dir(Unit *u, UnitWriteFlags flags) {
3924 assert(u);
3925
3926 if (UNIT_WRITE_FLAGS_NOOP(flags))
3927 return NULL;
3928
3929 if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
3930 return u->manager->lookup_paths.transient;
3931
3932 if (flags & UNIT_PERSISTENT)
3933 return u->manager->lookup_paths.persistent_control;
3934
3935 if (flags & UNIT_RUNTIME)
3936 return u->manager->lookup_paths.runtime_control;
3937
3938 return NULL;
3939 }
3940
3941 char* unit_escape_setting(const char *s, UnitWriteFlags flags, char **buf) {
3942 char *ret = NULL;
3943
3944 if (!s)
3945 return NULL;
3946
3947 /* Escapes the input string as requested. Returns the escaped string. If 'buf' is specified then the allocated
3948 * return buffer pointer is also written to *buf, except if no escaping was necessary, in which case *buf is
3949 * set to NULL, and the input pointer is returned as-is. This means the return value always contains a properly
3950 * escaped version, but *buf when passed only contains a pointer if an allocation was necessary. If *buf is
3951 * not specified, then the return value always needs to be freed. Callers can use this to optimize memory
3952 * allocations. */
3953
3954 if (flags & UNIT_ESCAPE_SPECIFIERS) {
3955 ret = specifier_escape(s);
3956 if (!ret)
3957 return NULL;
3958
3959 s = ret;
3960 }
3961
3962 if (flags & UNIT_ESCAPE_C) {
3963 char *a;
3964
3965 a = cescape(s);
3966 free(ret);
3967 if (!a)
3968 return NULL;
3969
3970 ret = a;
3971 }
3972
3973 if (buf) {
3974 *buf = ret;
3975 return ret ?: (char*) s;
3976 }
3977
3978 return ret ?: strdup(s);
3979 }
3980
3981 char* unit_concat_strv(char **l, UnitWriteFlags flags) {
3982 _cleanup_free_ char *result = NULL;
3983 size_t n = 0, allocated = 0;
3984 char **i;
3985
3986 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command lines in a
3987 * way suitable for ExecStart= stanzas */
3988
3989 STRV_FOREACH(i, l) {
3990 _cleanup_free_ char *buf = NULL;
3991 const char *p;
3992 size_t a;
3993 char *q;
3994
3995 p = unit_escape_setting(*i, flags, &buf);
3996 if (!p)
3997 return NULL;
3998
3999 a = (n > 0) + 1 + strlen(p) + 1; /* separating space + " + entry + " */
4000 if (!GREEDY_REALLOC(result, allocated, n + a + 1))
4001 return NULL;
4002
4003 q = result + n;
4004 if (n > 0)
4005 *(q++) = ' ';
4006
4007 *(q++) = '"';
4008 q = stpcpy(q, p);
4009 *(q++) = '"';
4010
4011 n += a;
4012 }
4013
4014 if (!GREEDY_REALLOC(result, allocated, n + 1))
4015 return NULL;
4016
4017 result[n] = 0;
4018
4019 return TAKE_PTR(result);
4020 }
4021
4022 int unit_write_setting(Unit *u, UnitWriteFlags flags, const char *name, const char *data) {
4023 _cleanup_free_ char *p = NULL, *q = NULL, *escaped = NULL;
4024 const char *dir, *wrapped;
4025 int r;
4026
4027 assert(u);
4028 assert(name);
4029 assert(data);
4030
4031 if (UNIT_WRITE_FLAGS_NOOP(flags))
4032 return 0;
4033
4034 data = unit_escape_setting(data, flags, &escaped);
4035 if (!data)
4036 return -ENOMEM;
4037
4038 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4039 * previous section header is the same */
4040
4041 if (flags & UNIT_PRIVATE) {
4042 if (!UNIT_VTABLE(u)->private_section)
4043 return -EINVAL;
4044
4045 if (!u->transient_file || u->last_section_private < 0)
4046 data = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
4047 else if (u->last_section_private == 0)
4048 data = strjoina("\n[", UNIT_VTABLE(u)->private_section, "]\n", data);
4049 } else {
4050 if (!u->transient_file || u->last_section_private < 0)
4051 data = strjoina("[Unit]\n", data);
4052 else if (u->last_section_private > 0)
4053 data = strjoina("\n[Unit]\n", data);
4054 }
4055
4056 if (u->transient_file) {
4057 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4058 * write to the transient unit file. */
4059 fputs(data, u->transient_file);
4060
4061 if (!endswith(data, "\n"))
4062 fputc('\n', u->transient_file);
4063
4064 /* Remember which section we wrote this entry to */
4065 u->last_section_private = !!(flags & UNIT_PRIVATE);
4066 return 0;
4067 }
4068
4069 dir = unit_drop_in_dir(u, flags);
4070 if (!dir)
4071 return -EINVAL;
4072
4073 wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4074 "# or an equivalent operation. Do not edit.\n",
4075 data,
4076 "\n");
4077
4078 r = drop_in_file(dir, u->id, 50, name, &p, &q);
4079 if (r < 0)
4080 return r;
4081
4082 (void) mkdir_p_label(p, 0755);
4083
4084 /* Make sure the drop-in dir is registered in our path cache. This way we don't need to stupidly
4085 * recreate the cache after every drop-in we write. */
4086 if (u->manager->unit_path_cache) {
4087 r = set_put_strdup(&u->manager->unit_path_cache, p);
4088 if (r < 0)
4089 return r;
4090 }
4091
4092 r = write_string_file_atomic_label(q, wrapped);
4093 if (r < 0)
4094 return r;
4095
4096 r = strv_push(&u->dropin_paths, q);
4097 if (r < 0)
4098 return r;
4099 q = NULL;
4100
4101 strv_uniq(u->dropin_paths);
4102
4103 u->dropin_mtime = now(CLOCK_REALTIME);
4104
4105 return 0;
4106 }
4107
4108 int unit_write_settingf(Unit *u, UnitWriteFlags flags, const char *name, const char *format, ...) {
4109 _cleanup_free_ char *p = NULL;
4110 va_list ap;
4111 int r;
4112
4113 assert(u);
4114 assert(name);
4115 assert(format);
4116
4117 if (UNIT_WRITE_FLAGS_NOOP(flags))
4118 return 0;
4119
4120 va_start(ap, format);
4121 r = vasprintf(&p, format, ap);
4122 va_end(ap);
4123
4124 if (r < 0)
4125 return -ENOMEM;
4126
4127 return unit_write_setting(u, flags, name, p);
4128 }
4129
4130 int unit_make_transient(Unit *u) {
4131 _cleanup_free_ char *path = NULL;
4132 FILE *f;
4133
4134 assert(u);
4135
4136 if (!UNIT_VTABLE(u)->can_transient)
4137 return -EOPNOTSUPP;
4138
4139 (void) mkdir_p_label(u->manager->lookup_paths.transient, 0755);
4140
4141 path = path_join(u->manager->lookup_paths.transient, u->id);
4142 if (!path)
4143 return -ENOMEM;
4144
4145 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4146 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4147
4148 RUN_WITH_UMASK(0022) {
4149 f = fopen(path, "we");
4150 if (!f)
4151 return -errno;
4152 }
4153
4154 safe_fclose(u->transient_file);
4155 u->transient_file = f;
4156
4157 free_and_replace(u->fragment_path, path);
4158
4159 u->source_path = mfree(u->source_path);
4160 u->dropin_paths = strv_free(u->dropin_paths);
4161 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
4162
4163 u->load_state = UNIT_STUB;
4164 u->load_error = 0;
4165 u->transient = true;
4166
4167 unit_add_to_dbus_queue(u);
4168 unit_add_to_gc_queue(u);
4169
4170 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4171 u->transient_file);
4172
4173 return 0;
4174 }
4175
4176 static int log_kill(pid_t pid, int sig, void *userdata) {
4177 _cleanup_free_ char *comm = NULL;
4178
4179 (void) get_process_comm(pid, &comm);
4180
4181 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4182 only, like for example systemd's own PAM stub process. */
4183 if (comm && comm[0] == '(')
4184 return 0;
4185
4186 log_unit_notice(userdata,
4187 "Killing process " PID_FMT " (%s) with signal SIG%s.",
4188 pid,
4189 strna(comm),
4190 signal_to_string(sig));
4191
4192 return 1;
4193 }
4194
4195 static int operation_to_signal(const KillContext *c, KillOperation k, bool *noteworthy) {
4196 assert(c);
4197
4198 switch (k) {
4199
4200 case KILL_TERMINATE:
4201 case KILL_TERMINATE_AND_LOG:
4202 *noteworthy = false;
4203 return c->kill_signal;
4204
4205 case KILL_RESTART:
4206 *noteworthy = false;
4207 return restart_kill_signal(c);
4208
4209 case KILL_KILL:
4210 *noteworthy = true;
4211 return c->final_kill_signal;
4212
4213 case KILL_WATCHDOG:
4214 *noteworthy = true;
4215 return c->watchdog_signal;
4216
4217 default:
4218 assert_not_reached("KillOperation unknown");
4219 }
4220 }
4221
4222 int unit_kill_context(
4223 Unit *u,
4224 KillContext *c,
4225 KillOperation k,
4226 pid_t main_pid,
4227 pid_t control_pid,
4228 bool main_pid_alien) {
4229
4230 bool wait_for_exit = false, send_sighup;
4231 cg_kill_log_func_t log_func = NULL;
4232 int sig, r;
4233
4234 assert(u);
4235 assert(c);
4236
4237 /* Kill the processes belonging to this unit, in preparation for shutting the unit down. Returns > 0
4238 * if we killed something worth waiting for, 0 otherwise. Do not confuse with unit_kill_common()
4239 * which is used for user-requested killing of unit processes. */
4240
4241 if (c->kill_mode == KILL_NONE)
4242 return 0;
4243
4244 bool noteworthy;
4245 sig = operation_to_signal(c, k, &noteworthy);
4246 if (noteworthy)
4247 log_func = log_kill;
4248
4249 send_sighup =
4250 c->send_sighup &&
4251 IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
4252 sig != SIGHUP;
4253
4254 if (main_pid > 0) {
4255 if (log_func)
4256 log_func(main_pid, sig, u);
4257
4258 r = kill_and_sigcont(main_pid, sig);
4259 if (r < 0 && r != -ESRCH) {
4260 _cleanup_free_ char *comm = NULL;
4261 (void) get_process_comm(main_pid, &comm);
4262
4263 log_unit_warning_errno(u, r, "Failed to kill main process " PID_FMT " (%s), ignoring: %m", main_pid, strna(comm));
4264 } else {
4265 if (!main_pid_alien)
4266 wait_for_exit = true;
4267
4268 if (r != -ESRCH && send_sighup)
4269 (void) kill(main_pid, SIGHUP);
4270 }
4271 }
4272
4273 if (control_pid > 0) {
4274 if (log_func)
4275 log_func(control_pid, sig, u);
4276
4277 r = kill_and_sigcont(control_pid, sig);
4278 if (r < 0 && r != -ESRCH) {
4279 _cleanup_free_ char *comm = NULL;
4280 (void) get_process_comm(control_pid, &comm);
4281
4282 log_unit_warning_errno(u, r, "Failed to kill control process " PID_FMT " (%s), ignoring: %m", control_pid, strna(comm));
4283 } else {
4284 wait_for_exit = true;
4285
4286 if (r != -ESRCH && send_sighup)
4287 (void) kill(control_pid, SIGHUP);
4288 }
4289 }
4290
4291 if (u->cgroup_path &&
4292 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
4293 _cleanup_set_free_ Set *pid_set = NULL;
4294
4295 /* Exclude the main/control pids from being killed via the cgroup */
4296 pid_set = unit_pid_set(main_pid, control_pid);
4297 if (!pid_set)
4298 return -ENOMEM;
4299
4300 r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4301 sig,
4302 CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
4303 pid_set,
4304 log_func, u);
4305 if (r < 0) {
4306 if (!IN_SET(r, -EAGAIN, -ESRCH, -ENOENT))
4307 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", u->cgroup_path);
4308
4309 } else if (r > 0) {
4310
4311 /* FIXME: For now, on the legacy hierarchy, we will not wait for the cgroup members to die if
4312 * we are running in a container or if this is a delegation unit, simply because cgroup
4313 * notification is unreliable in these cases. It doesn't work at all in containers, and outside
4314 * of containers it can be confused easily by left-over directories in the cgroup — which
4315 * however should not exist in non-delegated units. On the unified hierarchy that's different,
4316 * there we get proper events. Hence rely on them. */
4317
4318 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0 ||
4319 (detect_container() == 0 && !unit_cgroup_delegate(u)))
4320 wait_for_exit = true;
4321
4322 if (send_sighup) {
4323 set_free(pid_set);
4324
4325 pid_set = unit_pid_set(main_pid, control_pid);
4326 if (!pid_set)
4327 return -ENOMEM;
4328
4329 (void) cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4330 SIGHUP,
4331 CGROUP_IGNORE_SELF,
4332 pid_set,
4333 NULL, NULL);
4334 }
4335 }
4336 }
4337
4338 return wait_for_exit;
4339 }
4340
4341 int unit_require_mounts_for(Unit *u, const char *path, UnitDependencyMask mask) {
4342 _cleanup_free_ char *p = NULL;
4343 UnitDependencyInfo di;
4344 int r;
4345
4346 assert(u);
4347 assert(path);
4348
4349 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these paths in
4350 * the unit (from the path to the UnitDependencyInfo structure indicating how to the dependency came to
4351 * be). However, we build a prefix table for all possible prefixes so that new appearing mount units can easily
4352 * determine which units to make themselves a dependency of. */
4353
4354 if (!path_is_absolute(path))
4355 return -EINVAL;
4356
4357 r = hashmap_ensure_allocated(&u->requires_mounts_for, &path_hash_ops);
4358 if (r < 0)
4359 return r;
4360
4361 p = strdup(path);
4362 if (!p)
4363 return -ENOMEM;
4364
4365 path = path_simplify(p, true);
4366
4367 if (!path_is_normalized(path))
4368 return -EPERM;
4369
4370 if (hashmap_contains(u->requires_mounts_for, path))
4371 return 0;
4372
4373 di = (UnitDependencyInfo) {
4374 .origin_mask = mask
4375 };
4376
4377 r = hashmap_put(u->requires_mounts_for, path, di.data);
4378 if (r < 0)
4379 return r;
4380 p = NULL;
4381
4382 char prefix[strlen(path) + 1];
4383 PATH_FOREACH_PREFIX_MORE(prefix, path) {
4384 Set *x;
4385
4386 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
4387 if (!x) {
4388 _cleanup_free_ char *q = NULL;
4389
4390 r = hashmap_ensure_allocated(&u->manager->units_requiring_mounts_for, &path_hash_ops);
4391 if (r < 0)
4392 return r;
4393
4394 q = strdup(prefix);
4395 if (!q)
4396 return -ENOMEM;
4397
4398 x = set_new(NULL);
4399 if (!x)
4400 return -ENOMEM;
4401
4402 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
4403 if (r < 0) {
4404 set_free(x);
4405 return r;
4406 }
4407 q = NULL;
4408 }
4409
4410 r = set_put(x, u);
4411 if (r < 0)
4412 return r;
4413 }
4414
4415 return 0;
4416 }
4417
4418 int unit_setup_exec_runtime(Unit *u) {
4419 ExecRuntime **rt;
4420 size_t offset;
4421 Unit *other;
4422 void *v;
4423 int r;
4424
4425 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4426 assert(offset > 0);
4427
4428 /* Check if there already is an ExecRuntime for this unit? */
4429 rt = (ExecRuntime**) ((uint8_t*) u + offset);
4430 if (*rt)
4431 return 0;
4432
4433 /* Try to get it from somebody else */
4434 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_JOINS_NAMESPACE_OF]) {
4435 r = exec_runtime_acquire(u->manager, NULL, other->id, false, rt);
4436 if (r == 1)
4437 return 1;
4438 }
4439
4440 return exec_runtime_acquire(u->manager, unit_get_exec_context(u), u->id, true, rt);
4441 }
4442
4443 int unit_setup_dynamic_creds(Unit *u) {
4444 ExecContext *ec;
4445 DynamicCreds *dcreds;
4446 size_t offset;
4447
4448 assert(u);
4449
4450 offset = UNIT_VTABLE(u)->dynamic_creds_offset;
4451 assert(offset > 0);
4452 dcreds = (DynamicCreds*) ((uint8_t*) u + offset);
4453
4454 ec = unit_get_exec_context(u);
4455 assert(ec);
4456
4457 if (!ec->dynamic_user)
4458 return 0;
4459
4460 return dynamic_creds_acquire(dcreds, u->manager, ec->user, ec->group);
4461 }
4462
4463 bool unit_type_supported(UnitType t) {
4464 if (_unlikely_(t < 0))
4465 return false;
4466 if (_unlikely_(t >= _UNIT_TYPE_MAX))
4467 return false;
4468
4469 if (!unit_vtable[t]->supported)
4470 return true;
4471
4472 return unit_vtable[t]->supported();
4473 }
4474
4475 void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
4476 int r;
4477
4478 assert(u);
4479 assert(where);
4480
4481 r = dir_is_empty(where);
4482 if (r > 0 || r == -ENOTDIR)
4483 return;
4484 if (r < 0) {
4485 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
4486 return;
4487 }
4488
4489 log_struct(LOG_NOTICE,
4490 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4491 LOG_UNIT_ID(u),
4492 LOG_UNIT_INVOCATION_ID(u),
4493 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
4494 "WHERE=%s", where);
4495 }
4496
4497 int unit_fail_if_noncanonical(Unit *u, const char* where) {
4498 _cleanup_free_ char *canonical_where = NULL;
4499 int r;
4500
4501 assert(u);
4502 assert(where);
4503
4504 r = chase_symlinks(where, NULL, CHASE_NONEXISTENT, &canonical_where, NULL);
4505 if (r < 0) {
4506 log_unit_debug_errno(u, r, "Failed to check %s for symlinks, ignoring: %m", where);
4507 return 0;
4508 }
4509
4510 /* We will happily ignore a trailing slash (or any redundant slashes) */
4511 if (path_equal(where, canonical_where))
4512 return 0;
4513
4514 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
4515 log_struct(LOG_ERR,
4516 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
4517 LOG_UNIT_ID(u),
4518 LOG_UNIT_INVOCATION_ID(u),
4519 LOG_UNIT_MESSAGE(u, "Mount path %s is not canonical (contains a symlink).", where),
4520 "WHERE=%s", where);
4521
4522 return -ELOOP;
4523 }
4524
4525 bool unit_is_pristine(Unit *u) {
4526 assert(u);
4527
4528 /* Check if the unit already exists or is already around,
4529 * in a number of different ways. Note that to cater for unit
4530 * types such as slice, we are generally fine with units that
4531 * are marked UNIT_LOADED even though nothing was actually
4532 * loaded, as those unit types don't require a file on disk. */
4533
4534 return !(!IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) ||
4535 u->fragment_path ||
4536 u->source_path ||
4537 !strv_isempty(u->dropin_paths) ||
4538 u->job ||
4539 u->merged_into);
4540 }
4541
4542 pid_t unit_control_pid(Unit *u) {
4543 assert(u);
4544
4545 if (UNIT_VTABLE(u)->control_pid)
4546 return UNIT_VTABLE(u)->control_pid(u);
4547
4548 return 0;
4549 }
4550
4551 pid_t unit_main_pid(Unit *u) {
4552 assert(u);
4553
4554 if (UNIT_VTABLE(u)->main_pid)
4555 return UNIT_VTABLE(u)->main_pid(u);
4556
4557 return 0;
4558 }
4559
4560 static void unit_unref_uid_internal(
4561 Unit *u,
4562 uid_t *ref_uid,
4563 bool destroy_now,
4564 void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
4565
4566 assert(u);
4567 assert(ref_uid);
4568 assert(_manager_unref_uid);
4569
4570 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
4571 * gid_t are actually the same time, with the same validity rules.
4572 *
4573 * Drops a reference to UID/GID from a unit. */
4574
4575 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4576 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4577
4578 if (!uid_is_valid(*ref_uid))
4579 return;
4580
4581 _manager_unref_uid(u->manager, *ref_uid, destroy_now);
4582 *ref_uid = UID_INVALID;
4583 }
4584
4585 static void unit_unref_uid(Unit *u, bool destroy_now) {
4586 unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
4587 }
4588
4589 static void unit_unref_gid(Unit *u, bool destroy_now) {
4590 unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
4591 }
4592
4593 void unit_unref_uid_gid(Unit *u, bool destroy_now) {
4594 assert(u);
4595
4596 unit_unref_uid(u, destroy_now);
4597 unit_unref_gid(u, destroy_now);
4598 }
4599
4600 static int unit_ref_uid_internal(
4601 Unit *u,
4602 uid_t *ref_uid,
4603 uid_t uid,
4604 bool clean_ipc,
4605 int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
4606
4607 int r;
4608
4609 assert(u);
4610 assert(ref_uid);
4611 assert(uid_is_valid(uid));
4612 assert(_manager_ref_uid);
4613
4614 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
4615 * are actually the same type, and have the same validity rules.
4616 *
4617 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
4618 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
4619 * drops to zero. */
4620
4621 assert_cc(sizeof(uid_t) == sizeof(gid_t));
4622 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
4623
4624 if (*ref_uid == uid)
4625 return 0;
4626
4627 if (uid_is_valid(*ref_uid)) /* Already set? */
4628 return -EBUSY;
4629
4630 r = _manager_ref_uid(u->manager, uid, clean_ipc);
4631 if (r < 0)
4632 return r;
4633
4634 *ref_uid = uid;
4635 return 1;
4636 }
4637
4638 static int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
4639 return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
4640 }
4641
4642 static int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
4643 return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
4644 }
4645
4646 static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
4647 int r = 0, q = 0;
4648
4649 assert(u);
4650
4651 /* Reference both a UID and a GID in one go. Either references both, or neither. */
4652
4653 if (uid_is_valid(uid)) {
4654 r = unit_ref_uid(u, uid, clean_ipc);
4655 if (r < 0)
4656 return r;
4657 }
4658
4659 if (gid_is_valid(gid)) {
4660 q = unit_ref_gid(u, gid, clean_ipc);
4661 if (q < 0) {
4662 if (r > 0)
4663 unit_unref_uid(u, false);
4664
4665 return q;
4666 }
4667 }
4668
4669 return r > 0 || q > 0;
4670 }
4671
4672 int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
4673 ExecContext *c;
4674 int r;
4675
4676 assert(u);
4677
4678 c = unit_get_exec_context(u);
4679
4680 r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
4681 if (r < 0)
4682 return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
4683
4684 return r;
4685 }
4686
4687 void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
4688 int r;
4689
4690 assert(u);
4691
4692 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
4693 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
4694 * objects when no service references the UID/GID anymore. */
4695
4696 r = unit_ref_uid_gid(u, uid, gid);
4697 if (r > 0)
4698 unit_add_to_dbus_queue(u);
4699 }
4700
4701 int unit_acquire_invocation_id(Unit *u) {
4702 sd_id128_t id;
4703 int r;
4704
4705 assert(u);
4706
4707 r = sd_id128_randomize(&id);
4708 if (r < 0)
4709 return log_unit_error_errno(u, r, "Failed to generate invocation ID for unit: %m");
4710
4711 r = unit_set_invocation_id(u, id);
4712 if (r < 0)
4713 return log_unit_error_errno(u, r, "Failed to set invocation ID for unit: %m");
4714
4715 unit_add_to_dbus_queue(u);
4716 return 0;
4717 }
4718
4719 int unit_set_exec_params(Unit *u, ExecParameters *p) {
4720 int r;
4721
4722 assert(u);
4723 assert(p);
4724
4725 /* Copy parameters from manager */
4726 r = manager_get_effective_environment(u->manager, &p->environment);
4727 if (r < 0)
4728 return r;
4729
4730 p->confirm_spawn = manager_get_confirm_spawn(u->manager);
4731 p->cgroup_supported = u->manager->cgroup_supported;
4732 p->prefix = u->manager->prefix;
4733 SET_FLAG(p->flags, EXEC_PASS_LOG_UNIT|EXEC_CHOWN_DIRECTORIES, MANAGER_IS_SYSTEM(u->manager));
4734
4735 /* Copy parameters from unit */
4736 p->cgroup_path = u->cgroup_path;
4737 SET_FLAG(p->flags, EXEC_CGROUP_DELEGATE, unit_cgroup_delegate(u));
4738
4739 p->received_credentials = u->manager->received_credentials;
4740
4741 return 0;
4742 }
4743
4744 int unit_fork_helper_process(Unit *u, const char *name, pid_t *ret) {
4745 int r;
4746
4747 assert(u);
4748 assert(ret);
4749
4750 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
4751 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
4752
4753 (void) unit_realize_cgroup(u);
4754
4755 r = safe_fork(name, FORK_REOPEN_LOG, ret);
4756 if (r != 0)
4757 return r;
4758
4759 (void) default_signals(SIGNALS_CRASH_HANDLER, SIGNALS_IGNORE);
4760 (void) ignore_signals(SIGPIPE);
4761
4762 (void) prctl(PR_SET_PDEATHSIG, SIGTERM);
4763
4764 if (u->cgroup_path) {
4765 r = cg_attach_everywhere(u->manager->cgroup_supported, u->cgroup_path, 0, NULL, NULL);
4766 if (r < 0) {
4767 log_unit_error_errno(u, r, "Failed to join unit cgroup %s: %m", u->cgroup_path);
4768 _exit(EXIT_CGROUP);
4769 }
4770 }
4771
4772 return 0;
4773 }
4774
4775 int unit_fork_and_watch_rm_rf(Unit *u, char **paths, pid_t *ret_pid) {
4776 pid_t pid;
4777 int r;
4778
4779 assert(u);
4780 assert(ret_pid);
4781
4782 r = unit_fork_helper_process(u, "(sd-rmrf)", &pid);
4783 if (r < 0)
4784 return r;
4785 if (r == 0) {
4786 int ret = EXIT_SUCCESS;
4787 char **i;
4788
4789 STRV_FOREACH(i, paths) {
4790 r = rm_rf(*i, REMOVE_ROOT|REMOVE_PHYSICAL|REMOVE_MISSING_OK);
4791 if (r < 0) {
4792 log_error_errno(r, "Failed to remove '%s': %m", *i);
4793 ret = EXIT_FAILURE;
4794 }
4795 }
4796
4797 _exit(ret);
4798 }
4799
4800 r = unit_watch_pid(u, pid, true);
4801 if (r < 0)
4802 return r;
4803
4804 *ret_pid = pid;
4805 return 0;
4806 }
4807
4808 static void unit_update_dependency_mask(Unit *u, UnitDependency d, Unit *other, UnitDependencyInfo di) {
4809 assert(u);
4810 assert(d >= 0);
4811 assert(d < _UNIT_DEPENDENCY_MAX);
4812 assert(other);
4813
4814 if (di.origin_mask == 0 && di.destination_mask == 0) {
4815 /* No bit set anymore, let's drop the whole entry */
4816 assert_se(hashmap_remove(u->dependencies[d], other));
4817 log_unit_debug(u, "lost dependency %s=%s", unit_dependency_to_string(d), other->id);
4818 } else
4819 /* Mask was reduced, let's update the entry */
4820 assert_se(hashmap_update(u->dependencies[d], other, di.data) == 0);
4821 }
4822
4823 void unit_remove_dependencies(Unit *u, UnitDependencyMask mask) {
4824 assert(u);
4825
4826 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
4827
4828 if (mask == 0)
4829 return;
4830
4831 for (UnitDependency d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
4832 bool done;
4833
4834 do {
4835 UnitDependencyInfo di;
4836 Unit *other;
4837
4838 done = true;
4839
4840 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d]) {
4841 if (FLAGS_SET(~mask, di.origin_mask))
4842 continue;
4843 di.origin_mask &= ~mask;
4844 unit_update_dependency_mask(u, d, other, di);
4845
4846 /* We updated the dependency from our unit to the other unit now. But most dependencies
4847 * imply a reverse dependency. Hence, let's delete that one too. For that we go through
4848 * all dependency types on the other unit and delete all those which point to us and
4849 * have the right mask set. */
4850
4851 for (UnitDependency q = 0; q < _UNIT_DEPENDENCY_MAX; q++) {
4852 UnitDependencyInfo dj;
4853
4854 dj.data = hashmap_get(other->dependencies[q], u);
4855 if (FLAGS_SET(~mask, dj.destination_mask))
4856 continue;
4857 dj.destination_mask &= ~mask;
4858
4859 unit_update_dependency_mask(other, q, u, dj);
4860 }
4861
4862 unit_add_to_gc_queue(other);
4863
4864 done = false;
4865 break;
4866 }
4867
4868 } while (!done);
4869 }
4870 }
4871
4872 static int unit_get_invocation_path(Unit *u, char **ret) {
4873 char *p;
4874 int r;
4875
4876 assert(u);
4877 assert(ret);
4878
4879 if (MANAGER_IS_SYSTEM(u->manager))
4880 p = strjoin("/run/systemd/units/invocation:", u->id);
4881 else {
4882 _cleanup_free_ char *user_path = NULL;
4883 r = xdg_user_runtime_dir(&user_path, "/systemd/units/invocation:");
4884 if (r < 0)
4885 return r;
4886 p = strjoin(user_path, u->id);
4887 }
4888
4889 if (!p)
4890 return -ENOMEM;
4891
4892 *ret = p;
4893 return 0;
4894 }
4895
4896 static int unit_export_invocation_id(Unit *u) {
4897 _cleanup_free_ char *p = NULL;
4898 int r;
4899
4900 assert(u);
4901
4902 if (u->exported_invocation_id)
4903 return 0;
4904
4905 if (sd_id128_is_null(u->invocation_id))
4906 return 0;
4907
4908 r = unit_get_invocation_path(u, &p);
4909 if (r < 0)
4910 return log_unit_debug_errno(u, r, "Failed to get invocation path: %m");
4911
4912 r = symlink_atomic_label(u->invocation_id_string, p);
4913 if (r < 0)
4914 return log_unit_debug_errno(u, r, "Failed to create invocation ID symlink %s: %m", p);
4915
4916 u->exported_invocation_id = true;
4917 return 0;
4918 }
4919
4920 static int unit_export_log_level_max(Unit *u, const ExecContext *c) {
4921 const char *p;
4922 char buf[2];
4923 int r;
4924
4925 assert(u);
4926 assert(c);
4927
4928 if (u->exported_log_level_max)
4929 return 0;
4930
4931 if (c->log_level_max < 0)
4932 return 0;
4933
4934 assert(c->log_level_max <= 7);
4935
4936 buf[0] = '0' + c->log_level_max;
4937 buf[1] = 0;
4938
4939 p = strjoina("/run/systemd/units/log-level-max:", u->id);
4940 r = symlink_atomic(buf, p);
4941 if (r < 0)
4942 return log_unit_debug_errno(u, r, "Failed to create maximum log level symlink %s: %m", p);
4943
4944 u->exported_log_level_max = true;
4945 return 0;
4946 }
4947
4948 static int unit_export_log_extra_fields(Unit *u, const ExecContext *c) {
4949 _cleanup_close_ int fd = -1;
4950 struct iovec *iovec;
4951 const char *p;
4952 char *pattern;
4953 le64_t *sizes;
4954 ssize_t n;
4955 int r;
4956
4957 if (u->exported_log_extra_fields)
4958 return 0;
4959
4960 if (c->n_log_extra_fields <= 0)
4961 return 0;
4962
4963 sizes = newa(le64_t, c->n_log_extra_fields);
4964 iovec = newa(struct iovec, c->n_log_extra_fields * 2);
4965
4966 for (size_t i = 0; i < c->n_log_extra_fields; i++) {
4967 sizes[i] = htole64(c->log_extra_fields[i].iov_len);
4968
4969 iovec[i*2] = IOVEC_MAKE(sizes + i, sizeof(le64_t));
4970 iovec[i*2+1] = c->log_extra_fields[i];
4971 }
4972
4973 p = strjoina("/run/systemd/units/log-extra-fields:", u->id);
4974 pattern = strjoina(p, ".XXXXXX");
4975
4976 fd = mkostemp_safe(pattern);
4977 if (fd < 0)
4978 return log_unit_debug_errno(u, fd, "Failed to create extra fields file %s: %m", p);
4979
4980 n = writev(fd, iovec, c->n_log_extra_fields*2);
4981 if (n < 0) {
4982 r = log_unit_debug_errno(u, errno, "Failed to write extra fields: %m");
4983 goto fail;
4984 }
4985
4986 (void) fchmod(fd, 0644);
4987
4988 if (rename(pattern, p) < 0) {
4989 r = log_unit_debug_errno(u, errno, "Failed to rename extra fields file: %m");
4990 goto fail;
4991 }
4992
4993 u->exported_log_extra_fields = true;
4994 return 0;
4995
4996 fail:
4997 (void) unlink(pattern);
4998 return r;
4999 }
5000
5001 static int unit_export_log_ratelimit_interval(Unit *u, const ExecContext *c) {
5002 _cleanup_free_ char *buf = NULL;
5003 const char *p;
5004 int r;
5005
5006 assert(u);
5007 assert(c);
5008
5009 if (u->exported_log_ratelimit_interval)
5010 return 0;
5011
5012 if (c->log_ratelimit_interval_usec == 0)
5013 return 0;
5014
5015 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5016
5017 if (asprintf(&buf, "%" PRIu64, c->log_ratelimit_interval_usec) < 0)
5018 return log_oom();
5019
5020 r = symlink_atomic(buf, p);
5021 if (r < 0)
5022 return log_unit_debug_errno(u, r, "Failed to create log rate limit interval symlink %s: %m", p);
5023
5024 u->exported_log_ratelimit_interval = true;
5025 return 0;
5026 }
5027
5028 static int unit_export_log_ratelimit_burst(Unit *u, const ExecContext *c) {
5029 _cleanup_free_ char *buf = NULL;
5030 const char *p;
5031 int r;
5032
5033 assert(u);
5034 assert(c);
5035
5036 if (u->exported_log_ratelimit_burst)
5037 return 0;
5038
5039 if (c->log_ratelimit_burst == 0)
5040 return 0;
5041
5042 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5043
5044 if (asprintf(&buf, "%u", c->log_ratelimit_burst) < 0)
5045 return log_oom();
5046
5047 r = symlink_atomic(buf, p);
5048 if (r < 0)
5049 return log_unit_debug_errno(u, r, "Failed to create log rate limit burst symlink %s: %m", p);
5050
5051 u->exported_log_ratelimit_burst = true;
5052 return 0;
5053 }
5054
5055 void unit_export_state_files(Unit *u) {
5056 const ExecContext *c;
5057
5058 assert(u);
5059
5060 if (!u->id)
5061 return;
5062
5063 if (MANAGER_IS_TEST_RUN(u->manager))
5064 return;
5065
5066 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5067 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5068 * the IPC system itself and PID 1 also log to the journal.
5069 *
5070 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5071 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5072 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5073 * namespace at least.
5074 *
5075 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5076 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5077 * them with one. */
5078
5079 (void) unit_export_invocation_id(u);
5080
5081 if (!MANAGER_IS_SYSTEM(u->manager))
5082 return;
5083
5084 c = unit_get_exec_context(u);
5085 if (c) {
5086 (void) unit_export_log_level_max(u, c);
5087 (void) unit_export_log_extra_fields(u, c);
5088 (void) unit_export_log_ratelimit_interval(u, c);
5089 (void) unit_export_log_ratelimit_burst(u, c);
5090 }
5091 }
5092
5093 void unit_unlink_state_files(Unit *u) {
5094 const char *p;
5095
5096 assert(u);
5097
5098 if (!u->id)
5099 return;
5100
5101 /* Undoes the effect of unit_export_state() */
5102
5103 if (u->exported_invocation_id) {
5104 _cleanup_free_ char *invocation_path = NULL;
5105 int r = unit_get_invocation_path(u, &invocation_path);
5106 if (r >= 0) {
5107 (void) unlink(invocation_path);
5108 u->exported_invocation_id = false;
5109 }
5110 }
5111
5112 if (!MANAGER_IS_SYSTEM(u->manager))
5113 return;
5114
5115 if (u->exported_log_level_max) {
5116 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5117 (void) unlink(p);
5118
5119 u->exported_log_level_max = false;
5120 }
5121
5122 if (u->exported_log_extra_fields) {
5123 p = strjoina("/run/systemd/units/extra-fields:", u->id);
5124 (void) unlink(p);
5125
5126 u->exported_log_extra_fields = false;
5127 }
5128
5129 if (u->exported_log_ratelimit_interval) {
5130 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5131 (void) unlink(p);
5132
5133 u->exported_log_ratelimit_interval = false;
5134 }
5135
5136 if (u->exported_log_ratelimit_burst) {
5137 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5138 (void) unlink(p);
5139
5140 u->exported_log_ratelimit_burst = false;
5141 }
5142 }
5143
5144 int unit_prepare_exec(Unit *u) {
5145 int r;
5146
5147 assert(u);
5148
5149 /* Load any custom firewall BPF programs here once to test if they are existing and actually loadable.
5150 * Fail here early since later errors in the call chain unit_realize_cgroup to cgroup_context_apply are ignored. */
5151 r = bpf_firewall_load_custom(u);
5152 if (r < 0)
5153 return r;
5154
5155 /* Prepares everything so that we can fork of a process for this unit */
5156
5157 (void) unit_realize_cgroup(u);
5158
5159 if (u->reset_accounting) {
5160 (void) unit_reset_accounting(u);
5161 u->reset_accounting = false;
5162 }
5163
5164 unit_export_state_files(u);
5165
5166 r = unit_setup_exec_runtime(u);
5167 if (r < 0)
5168 return r;
5169
5170 r = unit_setup_dynamic_creds(u);
5171 if (r < 0)
5172 return r;
5173
5174 return 0;
5175 }
5176
5177 static bool ignore_leftover_process(const char *comm) {
5178 return comm && comm[0] == '('; /* Most likely our own helper process (PAM?), ignore */
5179 }
5180
5181 int unit_log_leftover_process_start(pid_t pid, int sig, void *userdata) {
5182 _cleanup_free_ char *comm = NULL;
5183
5184 (void) get_process_comm(pid, &comm);
5185
5186 if (ignore_leftover_process(comm))
5187 return 0;
5188
5189 /* During start we print a warning */
5190
5191 log_unit_warning(userdata,
5192 "Found left-over process " PID_FMT " (%s) in control group while starting unit. Ignoring.\n"
5193 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5194 pid, strna(comm));
5195
5196 return 1;
5197 }
5198
5199 int unit_log_leftover_process_stop(pid_t pid, int sig, void *userdata) {
5200 _cleanup_free_ char *comm = NULL;
5201
5202 (void) get_process_comm(pid, &comm);
5203
5204 if (ignore_leftover_process(comm))
5205 return 0;
5206
5207 /* During stop we only print an informational message */
5208
5209 log_unit_info(userdata,
5210 "Unit process " PID_FMT " (%s) remains running after unit stopped.",
5211 pid, strna(comm));
5212
5213 return 1;
5214 }
5215
5216 int unit_warn_leftover_processes(Unit *u, cg_kill_log_func_t log_func) {
5217 assert(u);
5218
5219 (void) unit_pick_cgroup_path(u);
5220
5221 if (!u->cgroup_path)
5222 return 0;
5223
5224 return cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, 0, 0, NULL, log_func, u);
5225 }
5226
5227 bool unit_needs_console(Unit *u) {
5228 ExecContext *ec;
5229 UnitActiveState state;
5230
5231 assert(u);
5232
5233 state = unit_active_state(u);
5234
5235 if (UNIT_IS_INACTIVE_OR_FAILED(state))
5236 return false;
5237
5238 if (UNIT_VTABLE(u)->needs_console)
5239 return UNIT_VTABLE(u)->needs_console(u);
5240
5241 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
5242 ec = unit_get_exec_context(u);
5243 if (!ec)
5244 return false;
5245
5246 return exec_context_may_touch_console(ec);
5247 }
5248
5249 const char *unit_label_path(const Unit *u) {
5250 const char *p;
5251
5252 assert(u);
5253
5254 /* Returns the file system path to use for MAC access decisions, i.e. the file to read the SELinux label off
5255 * when validating access checks. */
5256
5257 p = u->source_path ?: u->fragment_path;
5258 if (!p)
5259 return NULL;
5260
5261 /* If a unit is masked, then don't read the SELinux label of /dev/null, as that really makes no sense */
5262 if (null_or_empty_path(p) > 0)
5263 return NULL;
5264
5265 return p;
5266 }
5267
5268 int unit_pid_attachable(Unit *u, pid_t pid, sd_bus_error *error) {
5269 int r;
5270
5271 assert(u);
5272
5273 /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
5274 * and not a kernel thread either */
5275
5276 /* First, a simple range check */
5277 if (!pid_is_valid(pid))
5278 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process identifier " PID_FMT " is not valid.", pid);
5279
5280 /* Some extra safety check */
5281 if (pid == 1 || pid == getpid_cached())
5282 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a manager process, refusing.", pid);
5283
5284 /* Don't even begin to bother with kernel threads */
5285 r = is_kernel_thread(pid);
5286 if (r == -ESRCH)
5287 return sd_bus_error_setf(error, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN, "Process with ID " PID_FMT " does not exist.", pid);
5288 if (r < 0)
5289 return sd_bus_error_set_errnof(error, r, "Failed to determine whether process " PID_FMT " is a kernel thread: %m", pid);
5290 if (r > 0)
5291 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a kernel thread, refusing.", pid);
5292
5293 return 0;
5294 }
5295
5296 void unit_log_success(Unit *u) {
5297 assert(u);
5298
5299 log_struct(LOG_INFO,
5300 "MESSAGE_ID=" SD_MESSAGE_UNIT_SUCCESS_STR,
5301 LOG_UNIT_ID(u),
5302 LOG_UNIT_INVOCATION_ID(u),
5303 LOG_UNIT_MESSAGE(u, "Deactivated successfully."));
5304 }
5305
5306 void unit_log_failure(Unit *u, const char *result) {
5307 assert(u);
5308 assert(result);
5309
5310 log_struct(LOG_WARNING,
5311 "MESSAGE_ID=" SD_MESSAGE_UNIT_FAILURE_RESULT_STR,
5312 LOG_UNIT_ID(u),
5313 LOG_UNIT_INVOCATION_ID(u),
5314 LOG_UNIT_MESSAGE(u, "Failed with result '%s'.", result),
5315 "UNIT_RESULT=%s", result);
5316 }
5317
5318 void unit_log_skip(Unit *u, const char *result) {
5319 assert(u);
5320 assert(result);
5321
5322 log_struct(LOG_INFO,
5323 "MESSAGE_ID=" SD_MESSAGE_UNIT_SKIPPED_STR,
5324 LOG_UNIT_ID(u),
5325 LOG_UNIT_INVOCATION_ID(u),
5326 LOG_UNIT_MESSAGE(u, "Skipped due to '%s'.", result),
5327 "UNIT_RESULT=%s", result);
5328 }
5329
5330 void unit_log_process_exit(
5331 Unit *u,
5332 const char *kind,
5333 const char *command,
5334 bool success,
5335 int code,
5336 int status) {
5337
5338 int level;
5339
5340 assert(u);
5341 assert(kind);
5342
5343 /* If this is a successful exit, let's log about the exit code on DEBUG level. If this is a failure
5344 * and the process exited on its own via exit(), then let's make this a NOTICE, under the assumption
5345 * that the service already logged the reason at a higher log level on its own. Otherwise, make it a
5346 * WARNING. */
5347 if (success)
5348 level = LOG_DEBUG;
5349 else if (code == CLD_EXITED)
5350 level = LOG_NOTICE;
5351 else
5352 level = LOG_WARNING;
5353
5354 log_struct(level,
5355 "MESSAGE_ID=" SD_MESSAGE_UNIT_PROCESS_EXIT_STR,
5356 LOG_UNIT_MESSAGE(u, "%s exited, code=%s, status=%i/%s",
5357 kind,
5358 sigchld_code_to_string(code), status,
5359 strna(code == CLD_EXITED
5360 ? exit_status_to_string(status, EXIT_STATUS_FULL)
5361 : signal_to_string(status))),
5362 "EXIT_CODE=%s", sigchld_code_to_string(code),
5363 "EXIT_STATUS=%i", status,
5364 "COMMAND=%s", strna(command),
5365 LOG_UNIT_ID(u),
5366 LOG_UNIT_INVOCATION_ID(u));
5367 }
5368
5369 int unit_exit_status(Unit *u) {
5370 assert(u);
5371
5372 /* Returns the exit status to propagate for the most recent cycle of this unit. Returns a value in the range
5373 * 0…255 if there's something to propagate. EOPNOTSUPP if the concept does not apply to this unit type, ENODATA
5374 * if no data is currently known (for example because the unit hasn't deactivated yet) and EBADE if the main
5375 * service process has exited abnormally (signal/coredump). */
5376
5377 if (!UNIT_VTABLE(u)->exit_status)
5378 return -EOPNOTSUPP;
5379
5380 return UNIT_VTABLE(u)->exit_status(u);
5381 }
5382
5383 int unit_failure_action_exit_status(Unit *u) {
5384 int r;
5385
5386 assert(u);
5387
5388 /* Returns the exit status to propagate on failure, or an error if there's nothing to propagate */
5389
5390 if (u->failure_action_exit_status >= 0)
5391 return u->failure_action_exit_status;
5392
5393 r = unit_exit_status(u);
5394 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
5395 return 255;
5396
5397 return r;
5398 }
5399
5400 int unit_success_action_exit_status(Unit *u) {
5401 int r;
5402
5403 assert(u);
5404
5405 /* Returns the exit status to propagate on success, or an error if there's nothing to propagate */
5406
5407 if (u->success_action_exit_status >= 0)
5408 return u->success_action_exit_status;
5409
5410 r = unit_exit_status(u);
5411 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
5412 return 255;
5413
5414 return r;
5415 }
5416
5417 int unit_test_trigger_loaded(Unit *u) {
5418 Unit *trigger;
5419
5420 /* Tests whether the unit to trigger is loaded */
5421
5422 trigger = UNIT_TRIGGER(u);
5423 if (!trigger)
5424 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT),
5425 "Refusing to start, no unit to trigger.");
5426 if (trigger->load_state != UNIT_LOADED)
5427 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT),
5428 "Refusing to start, unit %s to trigger not loaded.", trigger->id);
5429
5430 return 0;
5431 }
5432
5433 void unit_destroy_runtime_data(Unit *u, const ExecContext *context) {
5434 assert(u);
5435 assert(context);
5436
5437 if (context->runtime_directory_preserve_mode == EXEC_PRESERVE_NO ||
5438 (context->runtime_directory_preserve_mode == EXEC_PRESERVE_RESTART && !unit_will_restart(u)))
5439 exec_context_destroy_runtime_directory(context, u->manager->prefix[EXEC_DIRECTORY_RUNTIME]);
5440
5441 exec_context_destroy_credentials(context, u->manager->prefix[EXEC_DIRECTORY_RUNTIME], u->id);
5442 }
5443
5444 int unit_clean(Unit *u, ExecCleanMask mask) {
5445 UnitActiveState state;
5446
5447 assert(u);
5448
5449 /* Special return values:
5450 *
5451 * -EOPNOTSUPP → cleaning not supported for this unit type
5452 * -EUNATCH → cleaning not defined for this resource type
5453 * -EBUSY → unit currently can't be cleaned since it's running or not properly loaded, or has
5454 * a job queued or similar
5455 */
5456
5457 if (!UNIT_VTABLE(u)->clean)
5458 return -EOPNOTSUPP;
5459
5460 if (mask == 0)
5461 return -EUNATCH;
5462
5463 if (u->load_state != UNIT_LOADED)
5464 return -EBUSY;
5465
5466 if (u->job)
5467 return -EBUSY;
5468
5469 state = unit_active_state(u);
5470 if (!IN_SET(state, UNIT_INACTIVE))
5471 return -EBUSY;
5472
5473 return UNIT_VTABLE(u)->clean(u, mask);
5474 }
5475
5476 int unit_can_clean(Unit *u, ExecCleanMask *ret) {
5477 assert(u);
5478
5479 if (!UNIT_VTABLE(u)->clean ||
5480 u->load_state != UNIT_LOADED) {
5481 *ret = 0;
5482 return 0;
5483 }
5484
5485 /* When the clean() method is set, can_clean() really should be set too */
5486 assert(UNIT_VTABLE(u)->can_clean);
5487
5488 return UNIT_VTABLE(u)->can_clean(u, ret);
5489 }
5490
5491 bool unit_can_freeze(Unit *u) {
5492 assert(u);
5493
5494 if (UNIT_VTABLE(u)->can_freeze)
5495 return UNIT_VTABLE(u)->can_freeze(u);
5496
5497 return UNIT_VTABLE(u)->freeze;
5498 }
5499
5500 void unit_frozen(Unit *u) {
5501 assert(u);
5502
5503 u->freezer_state = FREEZER_FROZEN;
5504
5505 bus_unit_send_pending_freezer_message(u);
5506 }
5507
5508 void unit_thawed(Unit *u) {
5509 assert(u);
5510
5511 u->freezer_state = FREEZER_RUNNING;
5512
5513 bus_unit_send_pending_freezer_message(u);
5514 }
5515
5516 static int unit_freezer_action(Unit *u, FreezerAction action) {
5517 UnitActiveState s;
5518 int (*method)(Unit*);
5519 int r;
5520
5521 assert(u);
5522 assert(IN_SET(action, FREEZER_FREEZE, FREEZER_THAW));
5523
5524 method = action == FREEZER_FREEZE ? UNIT_VTABLE(u)->freeze : UNIT_VTABLE(u)->thaw;
5525 if (!method || !cg_freezer_supported())
5526 return -EOPNOTSUPP;
5527
5528 if (u->job)
5529 return -EBUSY;
5530
5531 if (u->load_state != UNIT_LOADED)
5532 return -EHOSTDOWN;
5533
5534 s = unit_active_state(u);
5535 if (s != UNIT_ACTIVE)
5536 return -EHOSTDOWN;
5537
5538 if (IN_SET(u->freezer_state, FREEZER_FREEZING, FREEZER_THAWING))
5539 return -EALREADY;
5540
5541 r = method(u);
5542 if (r <= 0)
5543 return r;
5544
5545 return 1;
5546 }
5547
5548 int unit_freeze(Unit *u) {
5549 return unit_freezer_action(u, FREEZER_FREEZE);
5550 }
5551
5552 int unit_thaw(Unit *u) {
5553 return unit_freezer_action(u, FREEZER_THAW);
5554 }
5555
5556 /* Wrappers around low-level cgroup freezer operations common for service and scope units */
5557 int unit_freeze_vtable_common(Unit *u) {
5558 return unit_cgroup_freezer_action(u, FREEZER_FREEZE);
5559 }
5560
5561 int unit_thaw_vtable_common(Unit *u) {
5562 return unit_cgroup_freezer_action(u, FREEZER_THAW);
5563 }
5564
5565 static const char* const collect_mode_table[_COLLECT_MODE_MAX] = {
5566 [COLLECT_INACTIVE] = "inactive",
5567 [COLLECT_INACTIVE_OR_FAILED] = "inactive-or-failed",
5568 };
5569
5570 DEFINE_STRING_TABLE_LOOKUP(collect_mode, CollectMode);