]> git.proxmox.com Git - systemd.git/blob - src/core/unit.c
Update upstream source from tag 'upstream/245.7'
[systemd.git] / src / core / unit.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2
3 #include <errno.h>
4 #include <stdlib.h>
5 #include <sys/prctl.h>
6 #include <unistd.h>
7
8 #include "sd-id128.h"
9 #include "sd-messages.h"
10
11 #include "all-units.h"
12 #include "alloc-util.h"
13 #include "bpf-firewall.h"
14 #include "bus-common-errors.h"
15 #include "bus-util.h"
16 #include "cgroup-setup.h"
17 #include "cgroup-util.h"
18 #include "dbus-unit.h"
19 #include "dbus.h"
20 #include "dropin.h"
21 #include "escape.h"
22 #include "execute.h"
23 #include "fd-util.h"
24 #include "fileio-label.h"
25 #include "fileio.h"
26 #include "format-util.h"
27 #include "fs-util.h"
28 #include "id128-util.h"
29 #include "io-util.h"
30 #include "install.h"
31 #include "load-dropin.h"
32 #include "load-fragment.h"
33 #include "log.h"
34 #include "macro.h"
35 #include "missing_audit.h"
36 #include "mkdir.h"
37 #include "parse-util.h"
38 #include "path-util.h"
39 #include "process-util.h"
40 #include "rm-rf.h"
41 #include "serialize.h"
42 #include "set.h"
43 #include "signal-util.h"
44 #include "sparse-endian.h"
45 #include "special.h"
46 #include "specifier.h"
47 #include "stat-util.h"
48 #include "stdio-util.h"
49 #include "string-table.h"
50 #include "string-util.h"
51 #include "strv.h"
52 #include "terminal-util.h"
53 #include "tmpfile-util.h"
54 #include "umask-util.h"
55 #include "unit-name.h"
56 #include "unit.h"
57 #include "user-util.h"
58 #include "virt.h"
59
60 /* Thresholds for logging at INFO level about resource consumption */
61 #define MENTIONWORTHY_CPU_NSEC (1 * NSEC_PER_SEC)
62 #define MENTIONWORTHY_IO_BYTES (1024 * 1024ULL)
63 #define MENTIONWORTHY_IP_BYTES (0ULL)
64
65 /* Thresholds for logging at INFO level about resource consumption */
66 #define NOTICEWORTHY_CPU_NSEC (10*60 * NSEC_PER_SEC) /* 10 minutes */
67 #define NOTICEWORTHY_IO_BYTES (10 * 1024 * 1024ULL) /* 10 MB */
68 #define NOTICEWORTHY_IP_BYTES (128 * 1024 * 1024ULL) /* 128 MB */
69
70 const UnitVTable * const unit_vtable[_UNIT_TYPE_MAX] = {
71 [UNIT_SERVICE] = &service_vtable,
72 [UNIT_SOCKET] = &socket_vtable,
73 [UNIT_TARGET] = &target_vtable,
74 [UNIT_DEVICE] = &device_vtable,
75 [UNIT_MOUNT] = &mount_vtable,
76 [UNIT_AUTOMOUNT] = &automount_vtable,
77 [UNIT_SWAP] = &swap_vtable,
78 [UNIT_TIMER] = &timer_vtable,
79 [UNIT_PATH] = &path_vtable,
80 [UNIT_SLICE] = &slice_vtable,
81 [UNIT_SCOPE] = &scope_vtable,
82 };
83
84 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency);
85
86 Unit *unit_new(Manager *m, size_t size) {
87 Unit *u;
88
89 assert(m);
90 assert(size >= sizeof(Unit));
91
92 u = malloc0(size);
93 if (!u)
94 return NULL;
95
96 u->names = set_new(&string_hash_ops);
97 if (!u->names)
98 return mfree(u);
99
100 u->manager = m;
101 u->type = _UNIT_TYPE_INVALID;
102 u->default_dependencies = true;
103 u->unit_file_state = _UNIT_FILE_STATE_INVALID;
104 u->unit_file_preset = -1;
105 u->on_failure_job_mode = JOB_REPLACE;
106 u->cgroup_control_inotify_wd = -1;
107 u->cgroup_memory_inotify_wd = -1;
108 u->job_timeout = USEC_INFINITY;
109 u->job_running_timeout = USEC_INFINITY;
110 u->ref_uid = UID_INVALID;
111 u->ref_gid = GID_INVALID;
112 u->cpu_usage_last = NSEC_INFINITY;
113 u->cgroup_invalidated_mask |= CGROUP_MASK_BPF_FIREWALL;
114 u->failure_action_exit_status = u->success_action_exit_status = -1;
115
116 u->ip_accounting_ingress_map_fd = -1;
117 u->ip_accounting_egress_map_fd = -1;
118 u->ipv4_allow_map_fd = -1;
119 u->ipv6_allow_map_fd = -1;
120 u->ipv4_deny_map_fd = -1;
121 u->ipv6_deny_map_fd = -1;
122
123 u->last_section_private = -1;
124
125 u->start_ratelimit = (RateLimit) { m->default_start_limit_interval, m->default_start_limit_burst };
126 u->auto_stop_ratelimit = (RateLimit) { 10 * USEC_PER_SEC, 16 };
127
128 for (CGroupIOAccountingMetric i = 0; i < _CGROUP_IO_ACCOUNTING_METRIC_MAX; i++)
129 u->io_accounting_last[i] = UINT64_MAX;
130
131 return u;
132 }
133
134 int unit_new_for_name(Manager *m, size_t size, const char *name, Unit **ret) {
135 _cleanup_(unit_freep) Unit *u = NULL;
136 int r;
137
138 u = unit_new(m, size);
139 if (!u)
140 return -ENOMEM;
141
142 r = unit_add_name(u, name);
143 if (r < 0)
144 return r;
145
146 *ret = TAKE_PTR(u);
147
148 return r;
149 }
150
151 bool unit_has_name(const Unit *u, const char *name) {
152 assert(u);
153 assert(name);
154
155 return set_contains(u->names, (char*) name);
156 }
157
158 static void unit_init(Unit *u) {
159 CGroupContext *cc;
160 ExecContext *ec;
161 KillContext *kc;
162
163 assert(u);
164 assert(u->manager);
165 assert(u->type >= 0);
166
167 cc = unit_get_cgroup_context(u);
168 if (cc) {
169 cgroup_context_init(cc);
170
171 /* Copy in the manager defaults into the cgroup
172 * context, _before_ the rest of the settings have
173 * been initialized */
174
175 cc->cpu_accounting = u->manager->default_cpu_accounting;
176 cc->io_accounting = u->manager->default_io_accounting;
177 cc->blockio_accounting = u->manager->default_blockio_accounting;
178 cc->memory_accounting = u->manager->default_memory_accounting;
179 cc->tasks_accounting = u->manager->default_tasks_accounting;
180 cc->ip_accounting = u->manager->default_ip_accounting;
181
182 if (u->type != UNIT_SLICE)
183 cc->tasks_max = u->manager->default_tasks_max;
184 }
185
186 ec = unit_get_exec_context(u);
187 if (ec) {
188 exec_context_init(ec);
189
190 ec->keyring_mode = MANAGER_IS_SYSTEM(u->manager) ?
191 EXEC_KEYRING_SHARED : EXEC_KEYRING_INHERIT;
192 }
193
194 kc = unit_get_kill_context(u);
195 if (kc)
196 kill_context_init(kc);
197
198 if (UNIT_VTABLE(u)->init)
199 UNIT_VTABLE(u)->init(u);
200 }
201
202 int unit_add_name(Unit *u, const char *text) {
203 _cleanup_free_ char *s = NULL, *i = NULL;
204 UnitType t;
205 int r;
206
207 assert(u);
208 assert(text);
209
210 if (unit_name_is_valid(text, UNIT_NAME_TEMPLATE)) {
211
212 if (!u->instance)
213 return -EINVAL;
214
215 r = unit_name_replace_instance(text, u->instance, &s);
216 if (r < 0)
217 return r;
218 } else {
219 s = strdup(text);
220 if (!s)
221 return -ENOMEM;
222 }
223
224 if (set_contains(u->names, s))
225 return 0;
226 if (hashmap_contains(u->manager->units, s))
227 return -EEXIST;
228
229 if (!unit_name_is_valid(s, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
230 return -EINVAL;
231
232 t = unit_name_to_type(s);
233 if (t < 0)
234 return -EINVAL;
235
236 if (u->type != _UNIT_TYPE_INVALID && t != u->type)
237 return -EINVAL;
238
239 r = unit_name_to_instance(s, &i);
240 if (r < 0)
241 return r;
242
243 if (i && !unit_type_may_template(t))
244 return -EINVAL;
245
246 /* Ensure that this unit is either instanced or not instanced,
247 * but not both. Note that we do allow names with different
248 * instance names however! */
249 if (u->type != _UNIT_TYPE_INVALID && !u->instance != !i)
250 return -EINVAL;
251
252 if (!unit_type_may_alias(t) && !set_isempty(u->names))
253 return -EEXIST;
254
255 if (hashmap_size(u->manager->units) >= MANAGER_MAX_NAMES)
256 return -E2BIG;
257
258 r = set_put(u->names, s);
259 if (r < 0)
260 return r;
261 assert(r > 0);
262
263 r = hashmap_put(u->manager->units, s, u);
264 if (r < 0) {
265 (void) set_remove(u->names, s);
266 return r;
267 }
268
269 if (u->type == _UNIT_TYPE_INVALID) {
270 u->type = t;
271 u->id = s;
272 u->instance = TAKE_PTR(i);
273
274 LIST_PREPEND(units_by_type, u->manager->units_by_type[t], u);
275
276 unit_init(u);
277 }
278
279 s = NULL;
280
281 unit_add_to_dbus_queue(u);
282 return 0;
283 }
284
285 int unit_choose_id(Unit *u, const char *name) {
286 _cleanup_free_ char *t = NULL;
287 char *s, *i;
288 int r;
289
290 assert(u);
291 assert(name);
292
293 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
294
295 if (!u->instance)
296 return -EINVAL;
297
298 r = unit_name_replace_instance(name, u->instance, &t);
299 if (r < 0)
300 return r;
301
302 name = t;
303 }
304
305 /* Selects one of the names of this unit as the id */
306 s = set_get(u->names, (char*) name);
307 if (!s)
308 return -ENOENT;
309
310 /* Determine the new instance from the new id */
311 r = unit_name_to_instance(s, &i);
312 if (r < 0)
313 return r;
314
315 u->id = s;
316
317 free(u->instance);
318 u->instance = i;
319
320 unit_add_to_dbus_queue(u);
321
322 return 0;
323 }
324
325 int unit_set_description(Unit *u, const char *description) {
326 int r;
327
328 assert(u);
329
330 r = free_and_strdup(&u->description, empty_to_null(description));
331 if (r < 0)
332 return r;
333 if (r > 0)
334 unit_add_to_dbus_queue(u);
335
336 return 0;
337 }
338
339 bool unit_may_gc(Unit *u) {
340 UnitActiveState state;
341 int r;
342
343 assert(u);
344
345 /* Checks whether the unit is ready to be unloaded for garbage collection.
346 * Returns true when the unit may be collected, and false if there's some
347 * reason to keep it loaded.
348 *
349 * References from other units are *not* checked here. Instead, this is done
350 * in unit_gc_sweep(), but using markers to properly collect dependency loops.
351 */
352
353 if (u->job)
354 return false;
355
356 if (u->nop_job)
357 return false;
358
359 state = unit_active_state(u);
360
361 /* If the unit is inactive and failed and no job is queued for it, then release its runtime resources */
362 if (UNIT_IS_INACTIVE_OR_FAILED(state) &&
363 UNIT_VTABLE(u)->release_resources)
364 UNIT_VTABLE(u)->release_resources(u);
365
366 if (u->perpetual)
367 return false;
368
369 if (sd_bus_track_count(u->bus_track) > 0)
370 return false;
371
372 /* But we keep the unit object around for longer when it is referenced or configured to not be gc'ed */
373 switch (u->collect_mode) {
374
375 case COLLECT_INACTIVE:
376 if (state != UNIT_INACTIVE)
377 return false;
378
379 break;
380
381 case COLLECT_INACTIVE_OR_FAILED:
382 if (!IN_SET(state, UNIT_INACTIVE, UNIT_FAILED))
383 return false;
384
385 break;
386
387 default:
388 assert_not_reached("Unknown garbage collection mode");
389 }
390
391 if (u->cgroup_path) {
392 /* If the unit has a cgroup, then check whether there's anything in it. If so, we should stay
393 * around. Units with active processes should never be collected. */
394
395 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path);
396 if (r < 0)
397 log_unit_debug_errno(u, r, "Failed to determine whether cgroup %s is empty: %m", u->cgroup_path);
398 if (r <= 0)
399 return false;
400 }
401
402 if (UNIT_VTABLE(u)->may_gc && !UNIT_VTABLE(u)->may_gc(u))
403 return false;
404
405 return true;
406 }
407
408 void unit_add_to_load_queue(Unit *u) {
409 assert(u);
410 assert(u->type != _UNIT_TYPE_INVALID);
411
412 if (u->load_state != UNIT_STUB || u->in_load_queue)
413 return;
414
415 LIST_PREPEND(load_queue, u->manager->load_queue, u);
416 u->in_load_queue = true;
417 }
418
419 void unit_add_to_cleanup_queue(Unit *u) {
420 assert(u);
421
422 if (u->in_cleanup_queue)
423 return;
424
425 LIST_PREPEND(cleanup_queue, u->manager->cleanup_queue, u);
426 u->in_cleanup_queue = true;
427 }
428
429 void unit_add_to_gc_queue(Unit *u) {
430 assert(u);
431
432 if (u->in_gc_queue || u->in_cleanup_queue)
433 return;
434
435 if (!unit_may_gc(u))
436 return;
437
438 LIST_PREPEND(gc_queue, u->manager->gc_unit_queue, u);
439 u->in_gc_queue = true;
440 }
441
442 void unit_add_to_dbus_queue(Unit *u) {
443 assert(u);
444 assert(u->type != _UNIT_TYPE_INVALID);
445
446 if (u->load_state == UNIT_STUB || u->in_dbus_queue)
447 return;
448
449 /* Shortcut things if nobody cares */
450 if (sd_bus_track_count(u->manager->subscribed) <= 0 &&
451 sd_bus_track_count(u->bus_track) <= 0 &&
452 set_isempty(u->manager->private_buses)) {
453 u->sent_dbus_new_signal = true;
454 return;
455 }
456
457 LIST_PREPEND(dbus_queue, u->manager->dbus_unit_queue, u);
458 u->in_dbus_queue = true;
459 }
460
461 void unit_submit_to_stop_when_unneeded_queue(Unit *u) {
462 assert(u);
463
464 if (u->in_stop_when_unneeded_queue)
465 return;
466
467 if (!u->stop_when_unneeded)
468 return;
469
470 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
471 return;
472
473 LIST_PREPEND(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
474 u->in_stop_when_unneeded_queue = true;
475 }
476
477 static void bidi_set_free(Unit *u, Hashmap *h) {
478 Unit *other;
479 Iterator i;
480 void *v;
481
482 assert(u);
483
484 /* Frees the hashmap and makes sure we are dropped from the inverse pointers */
485
486 HASHMAP_FOREACH_KEY(v, other, h, i) {
487 UnitDependency d;
488
489 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
490 hashmap_remove(other->dependencies[d], u);
491
492 unit_add_to_gc_queue(other);
493 }
494
495 hashmap_free(h);
496 }
497
498 static void unit_remove_transient(Unit *u) {
499 char **i;
500
501 assert(u);
502
503 if (!u->transient)
504 return;
505
506 if (u->fragment_path)
507 (void) unlink(u->fragment_path);
508
509 STRV_FOREACH(i, u->dropin_paths) {
510 _cleanup_free_ char *p = NULL, *pp = NULL;
511
512 p = dirname_malloc(*i); /* Get the drop-in directory from the drop-in file */
513 if (!p)
514 continue;
515
516 pp = dirname_malloc(p); /* Get the config directory from the drop-in directory */
517 if (!pp)
518 continue;
519
520 /* Only drop transient drop-ins */
521 if (!path_equal(u->manager->lookup_paths.transient, pp))
522 continue;
523
524 (void) unlink(*i);
525 (void) rmdir(p);
526 }
527 }
528
529 static void unit_free_requires_mounts_for(Unit *u) {
530 assert(u);
531
532 for (;;) {
533 _cleanup_free_ char *path;
534
535 path = hashmap_steal_first_key(u->requires_mounts_for);
536 if (!path)
537 break;
538 else {
539 char s[strlen(path) + 1];
540
541 PATH_FOREACH_PREFIX_MORE(s, path) {
542 char *y;
543 Set *x;
544
545 x = hashmap_get2(u->manager->units_requiring_mounts_for, s, (void**) &y);
546 if (!x)
547 continue;
548
549 (void) set_remove(x, u);
550
551 if (set_isempty(x)) {
552 (void) hashmap_remove(u->manager->units_requiring_mounts_for, y);
553 free(y);
554 set_free(x);
555 }
556 }
557 }
558 }
559
560 u->requires_mounts_for = hashmap_free(u->requires_mounts_for);
561 }
562
563 static void unit_done(Unit *u) {
564 ExecContext *ec;
565 CGroupContext *cc;
566
567 assert(u);
568
569 if (u->type < 0)
570 return;
571
572 if (UNIT_VTABLE(u)->done)
573 UNIT_VTABLE(u)->done(u);
574
575 ec = unit_get_exec_context(u);
576 if (ec)
577 exec_context_done(ec);
578
579 cc = unit_get_cgroup_context(u);
580 if (cc)
581 cgroup_context_done(cc);
582 }
583
584 void unit_free(Unit *u) {
585 UnitDependency d;
586 Iterator i;
587 char *t;
588
589 if (!u)
590 return;
591
592 if (UNIT_ISSET(u->slice)) {
593 /* A unit is being dropped from the tree, make sure our parent slice recalculates the member mask */
594 unit_invalidate_cgroup_members_masks(UNIT_DEREF(u->slice));
595
596 /* And make sure the parent is realized again, updating cgroup memberships */
597 unit_add_to_cgroup_realize_queue(UNIT_DEREF(u->slice));
598 }
599
600 u->transient_file = safe_fclose(u->transient_file);
601
602 if (!MANAGER_IS_RELOADING(u->manager))
603 unit_remove_transient(u);
604
605 bus_unit_send_removed_signal(u);
606
607 unit_done(u);
608
609 unit_dequeue_rewatch_pids(u);
610
611 sd_bus_slot_unref(u->match_bus_slot);
612 sd_bus_track_unref(u->bus_track);
613 u->deserialized_refs = strv_free(u->deserialized_refs);
614
615 unit_free_requires_mounts_for(u);
616
617 SET_FOREACH(t, u->names, i)
618 hashmap_remove_value(u->manager->units, t, u);
619
620 if (!sd_id128_is_null(u->invocation_id))
621 hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
622
623 if (u->job) {
624 Job *j = u->job;
625 job_uninstall(j);
626 job_free(j);
627 }
628
629 if (u->nop_job) {
630 Job *j = u->nop_job;
631 job_uninstall(j);
632 job_free(j);
633 }
634
635 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
636 bidi_set_free(u, u->dependencies[d]);
637
638 if (u->on_console)
639 manager_unref_console(u->manager);
640
641 unit_release_cgroup(u);
642
643 if (!MANAGER_IS_RELOADING(u->manager))
644 unit_unlink_state_files(u);
645
646 unit_unref_uid_gid(u, false);
647
648 (void) manager_update_failed_units(u->manager, u, false);
649 set_remove(u->manager->startup_units, u);
650
651 unit_unwatch_all_pids(u);
652
653 unit_ref_unset(&u->slice);
654 while (u->refs_by_target)
655 unit_ref_unset(u->refs_by_target);
656
657 if (u->type != _UNIT_TYPE_INVALID)
658 LIST_REMOVE(units_by_type, u->manager->units_by_type[u->type], u);
659
660 if (u->in_load_queue)
661 LIST_REMOVE(load_queue, u->manager->load_queue, u);
662
663 if (u->in_dbus_queue)
664 LIST_REMOVE(dbus_queue, u->manager->dbus_unit_queue, u);
665
666 if (u->in_gc_queue)
667 LIST_REMOVE(gc_queue, u->manager->gc_unit_queue, u);
668
669 if (u->in_cgroup_realize_queue)
670 LIST_REMOVE(cgroup_realize_queue, u->manager->cgroup_realize_queue, u);
671
672 if (u->in_cgroup_empty_queue)
673 LIST_REMOVE(cgroup_empty_queue, u->manager->cgroup_empty_queue, u);
674
675 if (u->in_cleanup_queue)
676 LIST_REMOVE(cleanup_queue, u->manager->cleanup_queue, u);
677
678 if (u->in_target_deps_queue)
679 LIST_REMOVE(target_deps_queue, u->manager->target_deps_queue, u);
680
681 if (u->in_stop_when_unneeded_queue)
682 LIST_REMOVE(stop_when_unneeded_queue, u->manager->stop_when_unneeded_queue, u);
683
684 safe_close(u->ip_accounting_ingress_map_fd);
685 safe_close(u->ip_accounting_egress_map_fd);
686
687 safe_close(u->ipv4_allow_map_fd);
688 safe_close(u->ipv6_allow_map_fd);
689 safe_close(u->ipv4_deny_map_fd);
690 safe_close(u->ipv6_deny_map_fd);
691
692 bpf_program_unref(u->ip_bpf_ingress);
693 bpf_program_unref(u->ip_bpf_ingress_installed);
694 bpf_program_unref(u->ip_bpf_egress);
695 bpf_program_unref(u->ip_bpf_egress_installed);
696
697 set_free(u->ip_bpf_custom_ingress);
698 set_free(u->ip_bpf_custom_egress);
699 set_free(u->ip_bpf_custom_ingress_installed);
700 set_free(u->ip_bpf_custom_egress_installed);
701
702 bpf_program_unref(u->bpf_device_control_installed);
703
704 condition_free_list(u->conditions);
705 condition_free_list(u->asserts);
706
707 free(u->description);
708 strv_free(u->documentation);
709 free(u->fragment_path);
710 free(u->source_path);
711 strv_free(u->dropin_paths);
712 free(u->instance);
713
714 free(u->job_timeout_reboot_arg);
715
716 set_free_free(u->names);
717
718 free(u->reboot_arg);
719
720 free(u);
721 }
722
723 UnitActiveState unit_active_state(Unit *u) {
724 assert(u);
725
726 if (u->load_state == UNIT_MERGED)
727 return unit_active_state(unit_follow_merge(u));
728
729 /* After a reload it might happen that a unit is not correctly
730 * loaded but still has a process around. That's why we won't
731 * shortcut failed loading to UNIT_INACTIVE_FAILED. */
732
733 return UNIT_VTABLE(u)->active_state(u);
734 }
735
736 const char* unit_sub_state_to_string(Unit *u) {
737 assert(u);
738
739 return UNIT_VTABLE(u)->sub_state_to_string(u);
740 }
741
742 static int set_complete_move(Set **s, Set **other) {
743 assert(s);
744 assert(other);
745
746 if (!other)
747 return 0;
748
749 if (*s)
750 return set_move(*s, *other);
751 else
752 *s = TAKE_PTR(*other);
753
754 return 0;
755 }
756
757 static int hashmap_complete_move(Hashmap **s, Hashmap **other) {
758 assert(s);
759 assert(other);
760
761 if (!*other)
762 return 0;
763
764 if (*s)
765 return hashmap_move(*s, *other);
766 else
767 *s = TAKE_PTR(*other);
768
769 return 0;
770 }
771
772 static int merge_names(Unit *u, Unit *other) {
773 char *t;
774 Iterator i;
775 int r;
776
777 assert(u);
778 assert(other);
779
780 r = set_complete_move(&u->names, &other->names);
781 if (r < 0)
782 return r;
783
784 set_free_free(other->names);
785 other->names = NULL;
786 other->id = NULL;
787
788 SET_FOREACH(t, u->names, i)
789 assert_se(hashmap_replace(u->manager->units, t, u) == 0);
790
791 return 0;
792 }
793
794 static int reserve_dependencies(Unit *u, Unit *other, UnitDependency d) {
795 unsigned n_reserve;
796
797 assert(u);
798 assert(other);
799 assert(d < _UNIT_DEPENDENCY_MAX);
800
801 /*
802 * If u does not have this dependency set allocated, there is no need
803 * to reserve anything. In that case other's set will be transferred
804 * as a whole to u by complete_move().
805 */
806 if (!u->dependencies[d])
807 return 0;
808
809 /* merge_dependencies() will skip a u-on-u dependency */
810 n_reserve = hashmap_size(other->dependencies[d]) - !!hashmap_get(other->dependencies[d], u);
811
812 return hashmap_reserve(u->dependencies[d], n_reserve);
813 }
814
815 static void merge_dependencies(Unit *u, Unit *other, const char *other_id, UnitDependency d) {
816 Iterator i;
817 Unit *back;
818 void *v;
819 int r;
820
821 /* Merges all dependencies of type 'd' of the unit 'other' into the deps of the unit 'u' */
822
823 assert(u);
824 assert(other);
825 assert(d < _UNIT_DEPENDENCY_MAX);
826
827 /* Fix backwards pointers. Let's iterate through all dependent units of the other unit. */
828 HASHMAP_FOREACH_KEY(v, back, other->dependencies[d], i) {
829 UnitDependency k;
830
831 /* Let's now iterate through the dependencies of that dependencies of the other units, looking for
832 * pointers back, and let's fix them up, to instead point to 'u'. */
833
834 for (k = 0; k < _UNIT_DEPENDENCY_MAX; k++) {
835 if (back == u) {
836 /* Do not add dependencies between u and itself. */
837 if (hashmap_remove(back->dependencies[k], other))
838 maybe_warn_about_dependency(u, other_id, k);
839 } else {
840 UnitDependencyInfo di_u, di_other, di_merged;
841
842 /* Let's drop this dependency between "back" and "other", and let's create it between
843 * "back" and "u" instead. Let's merge the bit masks of the dependency we are moving,
844 * and any such dependency which might already exist */
845
846 di_other.data = hashmap_get(back->dependencies[k], other);
847 if (!di_other.data)
848 continue; /* dependency isn't set, let's try the next one */
849
850 di_u.data = hashmap_get(back->dependencies[k], u);
851
852 di_merged = (UnitDependencyInfo) {
853 .origin_mask = di_u.origin_mask | di_other.origin_mask,
854 .destination_mask = di_u.destination_mask | di_other.destination_mask,
855 };
856
857 r = hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data);
858 if (r < 0)
859 log_warning_errno(r, "Failed to remove/replace: back=%s other=%s u=%s: %m", back->id, other_id, u->id);
860 assert(r >= 0);
861
862 /* assert_se(hashmap_remove_and_replace(back->dependencies[k], other, u, di_merged.data) >= 0); */
863 }
864 }
865
866 }
867
868 /* Also do not move dependencies on u to itself */
869 back = hashmap_remove(other->dependencies[d], u);
870 if (back)
871 maybe_warn_about_dependency(u, other_id, d);
872
873 /* The move cannot fail. The caller must have performed a reservation. */
874 assert_se(hashmap_complete_move(&u->dependencies[d], &other->dependencies[d]) == 0);
875
876 other->dependencies[d] = hashmap_free(other->dependencies[d]);
877 }
878
879 int unit_merge(Unit *u, Unit *other) {
880 UnitDependency d;
881 const char *other_id = NULL;
882 int r;
883
884 assert(u);
885 assert(other);
886 assert(u->manager == other->manager);
887 assert(u->type != _UNIT_TYPE_INVALID);
888
889 other = unit_follow_merge(other);
890
891 if (other == u)
892 return 0;
893
894 if (u->type != other->type)
895 return -EINVAL;
896
897 if (!u->instance != !other->instance)
898 return -EINVAL;
899
900 if (!unit_type_may_alias(u->type)) /* Merging only applies to unit names that support aliases */
901 return -EEXIST;
902
903 if (!IN_SET(other->load_state, UNIT_STUB, UNIT_NOT_FOUND))
904 return -EEXIST;
905
906 if (other->job)
907 return -EEXIST;
908
909 if (other->nop_job)
910 return -EEXIST;
911
912 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
913 return -EEXIST;
914
915 if (other->id)
916 other_id = strdupa(other->id);
917
918 /* Make reservations to ensure merge_dependencies() won't fail */
919 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
920 r = reserve_dependencies(u, other, d);
921 /*
922 * We don't rollback reservations if we fail. We don't have
923 * a way to undo reservations. A reservation is not a leak.
924 */
925 if (r < 0)
926 return r;
927 }
928
929 /* Merge names */
930 r = merge_names(u, other);
931 if (r < 0)
932 return r;
933
934 /* Redirect all references */
935 while (other->refs_by_target)
936 unit_ref_set(other->refs_by_target, other->refs_by_target->source, u);
937
938 /* Merge dependencies */
939 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
940 merge_dependencies(u, other, other_id, d);
941
942 other->load_state = UNIT_MERGED;
943 other->merged_into = u;
944
945 /* If there is still some data attached to the other node, we
946 * don't need it anymore, and can free it. */
947 if (other->load_state != UNIT_STUB)
948 if (UNIT_VTABLE(other)->done)
949 UNIT_VTABLE(other)->done(other);
950
951 unit_add_to_dbus_queue(u);
952 unit_add_to_cleanup_queue(other);
953
954 return 0;
955 }
956
957 int unit_merge_by_name(Unit *u, const char *name) {
958 _cleanup_free_ char *s = NULL;
959 Unit *other;
960 int r;
961
962 /* Either add name to u, or if a unit with name already exists, merge it with u.
963 * If name is a template, do the same for name@instance, where instance is u's instance. */
964
965 assert(u);
966 assert(name);
967
968 if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
969 if (!u->instance)
970 return -EINVAL;
971
972 r = unit_name_replace_instance(name, u->instance, &s);
973 if (r < 0)
974 return r;
975
976 name = s;
977 }
978
979 other = manager_get_unit(u->manager, name);
980 if (other)
981 return unit_merge(u, other);
982
983 return unit_add_name(u, name);
984 }
985
986 Unit* unit_follow_merge(Unit *u) {
987 assert(u);
988
989 while (u->load_state == UNIT_MERGED)
990 assert_se(u = u->merged_into);
991
992 return u;
993 }
994
995 int unit_add_exec_dependencies(Unit *u, ExecContext *c) {
996 ExecDirectoryType dt;
997 char **dp;
998 int r;
999
1000 assert(u);
1001 assert(c);
1002
1003 if (c->working_directory && !c->working_directory_missing_ok) {
1004 r = unit_require_mounts_for(u, c->working_directory, UNIT_DEPENDENCY_FILE);
1005 if (r < 0)
1006 return r;
1007 }
1008
1009 if (c->root_directory) {
1010 r = unit_require_mounts_for(u, c->root_directory, UNIT_DEPENDENCY_FILE);
1011 if (r < 0)
1012 return r;
1013 }
1014
1015 if (c->root_image) {
1016 r = unit_require_mounts_for(u, c->root_image, UNIT_DEPENDENCY_FILE);
1017 if (r < 0)
1018 return r;
1019 }
1020
1021 for (dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++) {
1022 if (!u->manager->prefix[dt])
1023 continue;
1024
1025 STRV_FOREACH(dp, c->directories[dt].paths) {
1026 _cleanup_free_ char *p;
1027
1028 p = path_join(u->manager->prefix[dt], *dp);
1029 if (!p)
1030 return -ENOMEM;
1031
1032 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
1033 if (r < 0)
1034 return r;
1035 }
1036 }
1037
1038 if (!MANAGER_IS_SYSTEM(u->manager))
1039 return 0;
1040
1041 if (c->private_tmp) {
1042 const char *p;
1043
1044 FOREACH_STRING(p, "/tmp", "/var/tmp") {
1045 r = unit_require_mounts_for(u, p, UNIT_DEPENDENCY_FILE);
1046 if (r < 0)
1047 return r;
1048 }
1049
1050 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_TMPFILES_SETUP_SERVICE, true, UNIT_DEPENDENCY_FILE);
1051 if (r < 0)
1052 return r;
1053 }
1054
1055 if (!IN_SET(c->std_output,
1056 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1057 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1058 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE) &&
1059 !IN_SET(c->std_error,
1060 EXEC_OUTPUT_JOURNAL, EXEC_OUTPUT_JOURNAL_AND_CONSOLE,
1061 EXEC_OUTPUT_KMSG, EXEC_OUTPUT_KMSG_AND_CONSOLE,
1062 EXEC_OUTPUT_SYSLOG, EXEC_OUTPUT_SYSLOG_AND_CONSOLE) &&
1063 !c->log_namespace)
1064 return 0;
1065
1066 /* If syslog or kernel logging is requested (or log namespacing is), make sure our own logging daemon
1067 * is run first. */
1068
1069 if (c->log_namespace) {
1070 _cleanup_free_ char *socket_unit = NULL, *varlink_socket_unit = NULL;
1071
1072 r = unit_name_build_from_type("systemd-journald", c->log_namespace, UNIT_SOCKET, &socket_unit);
1073 if (r < 0)
1074 return r;
1075
1076 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, socket_unit, true, UNIT_DEPENDENCY_FILE);
1077 if (r < 0)
1078 return r;
1079
1080 r = unit_name_build_from_type("systemd-journald-varlink", c->log_namespace, UNIT_SOCKET, &varlink_socket_unit);
1081 if (r < 0)
1082 return r;
1083
1084 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, varlink_socket_unit, true, UNIT_DEPENDENCY_FILE);
1085 if (r < 0)
1086 return r;
1087 } else
1088 r = unit_add_dependency_by_name(u, UNIT_AFTER, SPECIAL_JOURNALD_SOCKET, true, UNIT_DEPENDENCY_FILE);
1089 if (r < 0)
1090 return r;
1091
1092 return 0;
1093 }
1094
1095 const char *unit_description(Unit *u) {
1096 assert(u);
1097
1098 if (u->description)
1099 return u->description;
1100
1101 return strna(u->id);
1102 }
1103
1104 const char *unit_status_string(Unit *u) {
1105 assert(u);
1106
1107 if (u->manager->status_unit_format == STATUS_UNIT_FORMAT_NAME && u->id)
1108 return u->id;
1109
1110 return unit_description(u);
1111 }
1112
1113 static void print_unit_dependency_mask(FILE *f, const char *kind, UnitDependencyMask mask, bool *space) {
1114 const struct {
1115 UnitDependencyMask mask;
1116 const char *name;
1117 } table[] = {
1118 { UNIT_DEPENDENCY_FILE, "file" },
1119 { UNIT_DEPENDENCY_IMPLICIT, "implicit" },
1120 { UNIT_DEPENDENCY_DEFAULT, "default" },
1121 { UNIT_DEPENDENCY_UDEV, "udev" },
1122 { UNIT_DEPENDENCY_PATH, "path" },
1123 { UNIT_DEPENDENCY_MOUNTINFO_IMPLICIT, "mountinfo-implicit" },
1124 { UNIT_DEPENDENCY_MOUNTINFO_DEFAULT, "mountinfo-default" },
1125 { UNIT_DEPENDENCY_PROC_SWAP, "proc-swap" },
1126 };
1127 size_t i;
1128
1129 assert(f);
1130 assert(kind);
1131 assert(space);
1132
1133 for (i = 0; i < ELEMENTSOF(table); i++) {
1134
1135 if (mask == 0)
1136 break;
1137
1138 if (FLAGS_SET(mask, table[i].mask)) {
1139 if (*space)
1140 fputc(' ', f);
1141 else
1142 *space = true;
1143
1144 fputs(kind, f);
1145 fputs("-", f);
1146 fputs(table[i].name, f);
1147
1148 mask &= ~table[i].mask;
1149 }
1150 }
1151
1152 assert(mask == 0);
1153 }
1154
1155 void unit_dump(Unit *u, FILE *f, const char *prefix) {
1156 char *t, **j;
1157 UnitDependency d;
1158 Iterator i;
1159 const char *prefix2;
1160 char timestamp[5][FORMAT_TIMESTAMP_MAX], timespan[FORMAT_TIMESPAN_MAX];
1161 Unit *following;
1162 _cleanup_set_free_ Set *following_set = NULL;
1163 const char *n;
1164 CGroupMask m;
1165 int r;
1166
1167 assert(u);
1168 assert(u->type >= 0);
1169
1170 prefix = strempty(prefix);
1171 prefix2 = strjoina(prefix, "\t");
1172
1173 fprintf(f,
1174 "%s-> Unit %s:\n",
1175 prefix, u->id);
1176
1177 SET_FOREACH(t, u->names, i)
1178 if (!streq(t, u->id))
1179 fprintf(f, "%s\tAlias: %s\n", prefix, t);
1180
1181 fprintf(f,
1182 "%s\tDescription: %s\n"
1183 "%s\tInstance: %s\n"
1184 "%s\tUnit Load State: %s\n"
1185 "%s\tUnit Active State: %s\n"
1186 "%s\tState Change Timestamp: %s\n"
1187 "%s\tInactive Exit Timestamp: %s\n"
1188 "%s\tActive Enter Timestamp: %s\n"
1189 "%s\tActive Exit Timestamp: %s\n"
1190 "%s\tInactive Enter Timestamp: %s\n"
1191 "%s\tMay GC: %s\n"
1192 "%s\tNeed Daemon Reload: %s\n"
1193 "%s\tTransient: %s\n"
1194 "%s\tPerpetual: %s\n"
1195 "%s\tGarbage Collection Mode: %s\n"
1196 "%s\tSlice: %s\n"
1197 "%s\tCGroup: %s\n"
1198 "%s\tCGroup realized: %s\n",
1199 prefix, unit_description(u),
1200 prefix, strna(u->instance),
1201 prefix, unit_load_state_to_string(u->load_state),
1202 prefix, unit_active_state_to_string(unit_active_state(u)),
1203 prefix, strna(format_timestamp(timestamp[0], sizeof(timestamp[0]), u->state_change_timestamp.realtime)),
1204 prefix, strna(format_timestamp(timestamp[1], sizeof(timestamp[1]), u->inactive_exit_timestamp.realtime)),
1205 prefix, strna(format_timestamp(timestamp[2], sizeof(timestamp[2]), u->active_enter_timestamp.realtime)),
1206 prefix, strna(format_timestamp(timestamp[3], sizeof(timestamp[3]), u->active_exit_timestamp.realtime)),
1207 prefix, strna(format_timestamp(timestamp[4], sizeof(timestamp[4]), u->inactive_enter_timestamp.realtime)),
1208 prefix, yes_no(unit_may_gc(u)),
1209 prefix, yes_no(unit_need_daemon_reload(u)),
1210 prefix, yes_no(u->transient),
1211 prefix, yes_no(u->perpetual),
1212 prefix, collect_mode_to_string(u->collect_mode),
1213 prefix, strna(unit_slice_name(u)),
1214 prefix, strna(u->cgroup_path),
1215 prefix, yes_no(u->cgroup_realized));
1216
1217 if (u->cgroup_realized_mask != 0) {
1218 _cleanup_free_ char *s = NULL;
1219 (void) cg_mask_to_string(u->cgroup_realized_mask, &s);
1220 fprintf(f, "%s\tCGroup realized mask: %s\n", prefix, strnull(s));
1221 }
1222
1223 if (u->cgroup_enabled_mask != 0) {
1224 _cleanup_free_ char *s = NULL;
1225 (void) cg_mask_to_string(u->cgroup_enabled_mask, &s);
1226 fprintf(f, "%s\tCGroup enabled mask: %s\n", prefix, strnull(s));
1227 }
1228
1229 m = unit_get_own_mask(u);
1230 if (m != 0) {
1231 _cleanup_free_ char *s = NULL;
1232 (void) cg_mask_to_string(m, &s);
1233 fprintf(f, "%s\tCGroup own mask: %s\n", prefix, strnull(s));
1234 }
1235
1236 m = unit_get_members_mask(u);
1237 if (m != 0) {
1238 _cleanup_free_ char *s = NULL;
1239 (void) cg_mask_to_string(m, &s);
1240 fprintf(f, "%s\tCGroup members mask: %s\n", prefix, strnull(s));
1241 }
1242
1243 m = unit_get_delegate_mask(u);
1244 if (m != 0) {
1245 _cleanup_free_ char *s = NULL;
1246 (void) cg_mask_to_string(m, &s);
1247 fprintf(f, "%s\tCGroup delegate mask: %s\n", prefix, strnull(s));
1248 }
1249
1250 if (!sd_id128_is_null(u->invocation_id))
1251 fprintf(f, "%s\tInvocation ID: " SD_ID128_FORMAT_STR "\n",
1252 prefix, SD_ID128_FORMAT_VAL(u->invocation_id));
1253
1254 STRV_FOREACH(j, u->documentation)
1255 fprintf(f, "%s\tDocumentation: %s\n", prefix, *j);
1256
1257 following = unit_following(u);
1258 if (following)
1259 fprintf(f, "%s\tFollowing: %s\n", prefix, following->id);
1260
1261 r = unit_following_set(u, &following_set);
1262 if (r >= 0) {
1263 Unit *other;
1264
1265 SET_FOREACH(other, following_set, i)
1266 fprintf(f, "%s\tFollowing Set Member: %s\n", prefix, other->id);
1267 }
1268
1269 if (u->fragment_path)
1270 fprintf(f, "%s\tFragment Path: %s\n", prefix, u->fragment_path);
1271
1272 if (u->source_path)
1273 fprintf(f, "%s\tSource Path: %s\n", prefix, u->source_path);
1274
1275 STRV_FOREACH(j, u->dropin_paths)
1276 fprintf(f, "%s\tDropIn Path: %s\n", prefix, *j);
1277
1278 if (u->failure_action != EMERGENCY_ACTION_NONE)
1279 fprintf(f, "%s\tFailure Action: %s\n", prefix, emergency_action_to_string(u->failure_action));
1280 if (u->failure_action_exit_status >= 0)
1281 fprintf(f, "%s\tFailure Action Exit Status: %i\n", prefix, u->failure_action_exit_status);
1282 if (u->success_action != EMERGENCY_ACTION_NONE)
1283 fprintf(f, "%s\tSuccess Action: %s\n", prefix, emergency_action_to_string(u->success_action));
1284 if (u->success_action_exit_status >= 0)
1285 fprintf(f, "%s\tSuccess Action Exit Status: %i\n", prefix, u->success_action_exit_status);
1286
1287 if (u->job_timeout != USEC_INFINITY)
1288 fprintf(f, "%s\tJob Timeout: %s\n", prefix, format_timespan(timespan, sizeof(timespan), u->job_timeout, 0));
1289
1290 if (u->job_timeout_action != EMERGENCY_ACTION_NONE)
1291 fprintf(f, "%s\tJob Timeout Action: %s\n", prefix, emergency_action_to_string(u->job_timeout_action));
1292
1293 if (u->job_timeout_reboot_arg)
1294 fprintf(f, "%s\tJob Timeout Reboot Argument: %s\n", prefix, u->job_timeout_reboot_arg);
1295
1296 condition_dump_list(u->conditions, f, prefix, condition_type_to_string);
1297 condition_dump_list(u->asserts, f, prefix, assert_type_to_string);
1298
1299 if (dual_timestamp_is_set(&u->condition_timestamp))
1300 fprintf(f,
1301 "%s\tCondition Timestamp: %s\n"
1302 "%s\tCondition Result: %s\n",
1303 prefix, strna(format_timestamp(timestamp[0], sizeof(timestamp[0]), u->condition_timestamp.realtime)),
1304 prefix, yes_no(u->condition_result));
1305
1306 if (dual_timestamp_is_set(&u->assert_timestamp))
1307 fprintf(f,
1308 "%s\tAssert Timestamp: %s\n"
1309 "%s\tAssert Result: %s\n",
1310 prefix, strna(format_timestamp(timestamp[0], sizeof(timestamp[0]), u->assert_timestamp.realtime)),
1311 prefix, yes_no(u->assert_result));
1312
1313 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
1314 UnitDependencyInfo di;
1315 Unit *other;
1316
1317 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
1318 bool space = false;
1319
1320 fprintf(f, "%s\t%s: %s (", prefix, unit_dependency_to_string(d), other->id);
1321
1322 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1323 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1324
1325 fputs(")\n", f);
1326 }
1327 }
1328
1329 if (!hashmap_isempty(u->requires_mounts_for)) {
1330 UnitDependencyInfo di;
1331 const char *path;
1332
1333 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1334 bool space = false;
1335
1336 fprintf(f, "%s\tRequiresMountsFor: %s (", prefix, path);
1337
1338 print_unit_dependency_mask(f, "origin", di.origin_mask, &space);
1339 print_unit_dependency_mask(f, "destination", di.destination_mask, &space);
1340
1341 fputs(")\n", f);
1342 }
1343 }
1344
1345 if (u->load_state == UNIT_LOADED) {
1346
1347 fprintf(f,
1348 "%s\tStopWhenUnneeded: %s\n"
1349 "%s\tRefuseManualStart: %s\n"
1350 "%s\tRefuseManualStop: %s\n"
1351 "%s\tDefaultDependencies: %s\n"
1352 "%s\tOnFailureJobMode: %s\n"
1353 "%s\tIgnoreOnIsolate: %s\n",
1354 prefix, yes_no(u->stop_when_unneeded),
1355 prefix, yes_no(u->refuse_manual_start),
1356 prefix, yes_no(u->refuse_manual_stop),
1357 prefix, yes_no(u->default_dependencies),
1358 prefix, job_mode_to_string(u->on_failure_job_mode),
1359 prefix, yes_no(u->ignore_on_isolate));
1360
1361 if (UNIT_VTABLE(u)->dump)
1362 UNIT_VTABLE(u)->dump(u, f, prefix2);
1363
1364 } else if (u->load_state == UNIT_MERGED)
1365 fprintf(f,
1366 "%s\tMerged into: %s\n",
1367 prefix, u->merged_into->id);
1368 else if (u->load_state == UNIT_ERROR)
1369 fprintf(f, "%s\tLoad Error Code: %s\n", prefix, strerror_safe(u->load_error));
1370
1371 for (n = sd_bus_track_first(u->bus_track); n; n = sd_bus_track_next(u->bus_track))
1372 fprintf(f, "%s\tBus Ref: %s\n", prefix, n);
1373
1374 if (u->job)
1375 job_dump(u->job, f, prefix2);
1376
1377 if (u->nop_job)
1378 job_dump(u->nop_job, f, prefix2);
1379 }
1380
1381 /* Common implementation for multiple backends */
1382 int unit_load_fragment_and_dropin(Unit *u, bool fragment_required) {
1383 int r;
1384
1385 assert(u);
1386
1387 /* Load a .{service,socket,...} file */
1388 r = unit_load_fragment(u);
1389 if (r < 0)
1390 return r;
1391
1392 if (u->load_state == UNIT_STUB) {
1393 if (fragment_required)
1394 return -ENOENT;
1395
1396 u->load_state = UNIT_LOADED;
1397 }
1398
1399 /* Load drop-in directory data. If u is an alias, we might be reloading the
1400 * target unit needlessly. But we cannot be sure which drops-ins have already
1401 * been loaded and which not, at least without doing complicated book-keeping,
1402 * so let's always reread all drop-ins. */
1403 r = unit_load_dropin(unit_follow_merge(u));
1404 if (r < 0)
1405 return r;
1406
1407 if (u->source_path) {
1408 struct stat st;
1409
1410 if (stat(u->source_path, &st) >= 0)
1411 u->source_mtime = timespec_load(&st.st_mtim);
1412 else
1413 u->source_mtime = 0;
1414 }
1415
1416 return 0;
1417 }
1418
1419 void unit_add_to_target_deps_queue(Unit *u) {
1420 Manager *m = u->manager;
1421
1422 assert(u);
1423
1424 if (u->in_target_deps_queue)
1425 return;
1426
1427 LIST_PREPEND(target_deps_queue, m->target_deps_queue, u);
1428 u->in_target_deps_queue = true;
1429 }
1430
1431 int unit_add_default_target_dependency(Unit *u, Unit *target) {
1432 assert(u);
1433 assert(target);
1434
1435 if (target->type != UNIT_TARGET)
1436 return 0;
1437
1438 /* Only add the dependency if both units are loaded, so that
1439 * that loop check below is reliable */
1440 if (u->load_state != UNIT_LOADED ||
1441 target->load_state != UNIT_LOADED)
1442 return 0;
1443
1444 /* If either side wants no automatic dependencies, then let's
1445 * skip this */
1446 if (!u->default_dependencies ||
1447 !target->default_dependencies)
1448 return 0;
1449
1450 /* Don't create loops */
1451 if (hashmap_get(target->dependencies[UNIT_BEFORE], u))
1452 return 0;
1453
1454 return unit_add_dependency(target, UNIT_AFTER, u, true, UNIT_DEPENDENCY_DEFAULT);
1455 }
1456
1457 static int unit_add_slice_dependencies(Unit *u) {
1458 UnitDependencyMask mask;
1459 assert(u);
1460
1461 if (!UNIT_HAS_CGROUP_CONTEXT(u))
1462 return 0;
1463
1464 /* Slice units are implicitly ordered against their parent slices (as this relationship is encoded in the
1465 name), while all other units are ordered based on configuration (as in their case Slice= configures the
1466 relationship). */
1467 mask = u->type == UNIT_SLICE ? UNIT_DEPENDENCY_IMPLICIT : UNIT_DEPENDENCY_FILE;
1468
1469 if (UNIT_ISSET(u->slice))
1470 return unit_add_two_dependencies(u, UNIT_AFTER, UNIT_REQUIRES, UNIT_DEREF(u->slice), true, mask);
1471
1472 if (unit_has_name(u, SPECIAL_ROOT_SLICE))
1473 return 0;
1474
1475 return unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_REQUIRES, SPECIAL_ROOT_SLICE, true, mask);
1476 }
1477
1478 static int unit_add_mount_dependencies(Unit *u) {
1479 UnitDependencyInfo di;
1480 const char *path;
1481 Iterator i;
1482 int r;
1483
1484 assert(u);
1485
1486 HASHMAP_FOREACH_KEY(di.data, path, u->requires_mounts_for, i) {
1487 char prefix[strlen(path) + 1];
1488
1489 PATH_FOREACH_PREFIX_MORE(prefix, path) {
1490 _cleanup_free_ char *p = NULL;
1491 Unit *m;
1492
1493 r = unit_name_from_path(prefix, ".mount", &p);
1494 if (r < 0)
1495 return r;
1496
1497 m = manager_get_unit(u->manager, p);
1498 if (!m) {
1499 /* Make sure to load the mount unit if
1500 * it exists. If so the dependencies
1501 * on this unit will be added later
1502 * during the loading of the mount
1503 * unit. */
1504 (void) manager_load_unit_prepare(u->manager, p, NULL, NULL, &m);
1505 continue;
1506 }
1507 if (m == u)
1508 continue;
1509
1510 if (m->load_state != UNIT_LOADED)
1511 continue;
1512
1513 r = unit_add_dependency(u, UNIT_AFTER, m, true, di.origin_mask);
1514 if (r < 0)
1515 return r;
1516
1517 if (m->fragment_path) {
1518 r = unit_add_dependency(u, UNIT_REQUIRES, m, true, di.origin_mask);
1519 if (r < 0)
1520 return r;
1521 }
1522 }
1523 }
1524
1525 return 0;
1526 }
1527
1528 static int unit_add_startup_units(Unit *u) {
1529 CGroupContext *c;
1530 int r;
1531
1532 c = unit_get_cgroup_context(u);
1533 if (!c)
1534 return 0;
1535
1536 if (c->startup_cpu_shares == CGROUP_CPU_SHARES_INVALID &&
1537 c->startup_io_weight == CGROUP_WEIGHT_INVALID &&
1538 c->startup_blockio_weight == CGROUP_BLKIO_WEIGHT_INVALID)
1539 return 0;
1540
1541 r = set_ensure_allocated(&u->manager->startup_units, NULL);
1542 if (r < 0)
1543 return r;
1544
1545 return set_put(u->manager->startup_units, u);
1546 }
1547
1548 int unit_load(Unit *u) {
1549 int r;
1550
1551 assert(u);
1552
1553 if (u->in_load_queue) {
1554 LIST_REMOVE(load_queue, u->manager->load_queue, u);
1555 u->in_load_queue = false;
1556 }
1557
1558 if (u->type == _UNIT_TYPE_INVALID)
1559 return -EINVAL;
1560
1561 if (u->load_state != UNIT_STUB)
1562 return 0;
1563
1564 if (u->transient_file) {
1565 /* Finalize transient file: if this is a transient unit file, as soon as we reach unit_load() the setup
1566 * is complete, hence let's synchronize the unit file we just wrote to disk. */
1567
1568 r = fflush_and_check(u->transient_file);
1569 if (r < 0)
1570 goto fail;
1571
1572 u->transient_file = safe_fclose(u->transient_file);
1573 u->fragment_mtime = now(CLOCK_REALTIME);
1574 }
1575
1576 r = UNIT_VTABLE(u)->load(u);
1577 if (r < 0)
1578 goto fail;
1579
1580 assert(u->load_state != UNIT_STUB);
1581
1582 if (u->load_state == UNIT_LOADED) {
1583 unit_add_to_target_deps_queue(u);
1584
1585 r = unit_add_slice_dependencies(u);
1586 if (r < 0)
1587 goto fail;
1588
1589 r = unit_add_mount_dependencies(u);
1590 if (r < 0)
1591 goto fail;
1592
1593 r = unit_add_startup_units(u);
1594 if (r < 0)
1595 goto fail;
1596
1597 if (u->on_failure_job_mode == JOB_ISOLATE && hashmap_size(u->dependencies[UNIT_ON_FAILURE]) > 1) {
1598 log_unit_error(u, "More than one OnFailure= dependencies specified but OnFailureJobMode=isolate set. Refusing.");
1599 r = -ENOEXEC;
1600 goto fail;
1601 }
1602
1603 if (u->job_running_timeout != USEC_INFINITY && u->job_running_timeout > u->job_timeout)
1604 log_unit_warning(u, "JobRunningTimeoutSec= is greater than JobTimeoutSec=, it has no effect.");
1605
1606 /* We finished loading, let's ensure our parents recalculate the members mask */
1607 unit_invalidate_cgroup_members_masks(u);
1608 }
1609
1610 assert((u->load_state != UNIT_MERGED) == !u->merged_into);
1611
1612 unit_add_to_dbus_queue(unit_follow_merge(u));
1613 unit_add_to_gc_queue(u);
1614
1615 return 0;
1616
1617 fail:
1618 /* We convert ENOEXEC errors to the UNIT_BAD_SETTING load state here. Configuration parsing code should hence
1619 * return ENOEXEC to ensure units are placed in this state after loading */
1620
1621 u->load_state = u->load_state == UNIT_STUB ? UNIT_NOT_FOUND :
1622 r == -ENOEXEC ? UNIT_BAD_SETTING :
1623 UNIT_ERROR;
1624 u->load_error = r;
1625
1626 unit_add_to_dbus_queue(u);
1627 unit_add_to_gc_queue(u);
1628
1629 return log_unit_debug_errno(u, r, "Failed to load configuration: %m");
1630 }
1631
1632 _printf_(7, 8)
1633 static int log_unit_internal(void *userdata, int level, int error, const char *file, int line, const char *func, const char *format, ...) {
1634 Unit *u = userdata;
1635 va_list ap;
1636 int r;
1637
1638 va_start(ap, format);
1639 if (u)
1640 r = log_object_internalv(level, error, file, line, func,
1641 u->manager->unit_log_field,
1642 u->id,
1643 u->manager->invocation_log_field,
1644 u->invocation_id_string,
1645 format, ap);
1646 else
1647 r = log_internalv(level, error, file, line, func, format, ap);
1648 va_end(ap);
1649
1650 return r;
1651 }
1652
1653 static bool unit_test_condition(Unit *u) {
1654 assert(u);
1655
1656 dual_timestamp_get(&u->condition_timestamp);
1657 u->condition_result = condition_test_list(u->conditions, condition_type_to_string, log_unit_internal, u);
1658
1659 unit_add_to_dbus_queue(u);
1660
1661 return u->condition_result;
1662 }
1663
1664 static bool unit_test_assert(Unit *u) {
1665 assert(u);
1666
1667 dual_timestamp_get(&u->assert_timestamp);
1668 u->assert_result = condition_test_list(u->asserts, assert_type_to_string, log_unit_internal, u);
1669
1670 unit_add_to_dbus_queue(u);
1671
1672 return u->assert_result;
1673 }
1674
1675 void unit_status_printf(Unit *u, StatusType status_type, const char *status, const char *unit_status_msg_format) {
1676 const char *d;
1677
1678 d = unit_status_string(u);
1679 if (log_get_show_color())
1680 d = strjoina(ANSI_HIGHLIGHT, d, ANSI_NORMAL);
1681
1682 DISABLE_WARNING_FORMAT_NONLITERAL;
1683 manager_status_printf(u->manager, status_type, status, unit_status_msg_format, d);
1684 REENABLE_WARNING;
1685 }
1686
1687 int unit_test_start_limit(Unit *u) {
1688 const char *reason;
1689
1690 assert(u);
1691
1692 if (ratelimit_below(&u->start_ratelimit)) {
1693 u->start_limit_hit = false;
1694 return 0;
1695 }
1696
1697 log_unit_warning(u, "Start request repeated too quickly.");
1698 u->start_limit_hit = true;
1699
1700 reason = strjoina("unit ", u->id, " failed");
1701
1702 emergency_action(u->manager, u->start_limit_action,
1703 EMERGENCY_ACTION_IS_WATCHDOG|EMERGENCY_ACTION_WARN,
1704 u->reboot_arg, -1, reason);
1705
1706 return -ECANCELED;
1707 }
1708
1709 bool unit_shall_confirm_spawn(Unit *u) {
1710 assert(u);
1711
1712 if (manager_is_confirm_spawn_disabled(u->manager))
1713 return false;
1714
1715 /* For some reasons units remaining in the same process group
1716 * as PID 1 fail to acquire the console even if it's not used
1717 * by any process. So skip the confirmation question for them. */
1718 return !unit_get_exec_context(u)->same_pgrp;
1719 }
1720
1721 static bool unit_verify_deps(Unit *u) {
1722 Unit *other;
1723 Iterator j;
1724 void *v;
1725
1726 assert(u);
1727
1728 /* Checks whether all BindsTo= dependencies of this unit are fulfilled — if they are also combined with
1729 * After=. We do not check Requires= or Requisite= here as they only should have an effect on the job
1730 * processing, but do not have any effect afterwards. We don't check BindsTo= dependencies that are not used in
1731 * conjunction with After= as for them any such check would make things entirely racy. */
1732
1733 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], j) {
1734
1735 if (!hashmap_contains(u->dependencies[UNIT_AFTER], other))
1736 continue;
1737
1738 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(other))) {
1739 log_unit_notice(u, "Bound to unit %s, but unit isn't active.", other->id);
1740 return false;
1741 }
1742 }
1743
1744 return true;
1745 }
1746
1747 /* Errors that aren't really errors:
1748 * -EALREADY: Unit is already started.
1749 * -ECOMM: Condition failed
1750 * -EAGAIN: An operation is already in progress. Retry later.
1751 *
1752 * Errors that are real errors:
1753 * -EBADR: This unit type does not support starting.
1754 * -ECANCELED: Start limit hit, too many requests for now
1755 * -EPROTO: Assert failed
1756 * -EINVAL: Unit not loaded
1757 * -EOPNOTSUPP: Unit type not supported
1758 * -ENOLINK: The necessary dependencies are not fulfilled.
1759 * -ESTALE: This unit has been started before and can't be started a second time
1760 * -ENOENT: This is a triggering unit and unit to trigger is not loaded
1761 */
1762 int unit_start(Unit *u) {
1763 UnitActiveState state;
1764 Unit *following;
1765
1766 assert(u);
1767
1768 /* If this is already started, then this will succeed. Note that this will even succeed if this unit
1769 * is not startable by the user. This is relied on to detect when we need to wait for units and when
1770 * waiting is finished. */
1771 state = unit_active_state(u);
1772 if (UNIT_IS_ACTIVE_OR_RELOADING(state))
1773 return -EALREADY;
1774 if (state == UNIT_MAINTENANCE)
1775 return -EAGAIN;
1776
1777 /* Units that aren't loaded cannot be started */
1778 if (u->load_state != UNIT_LOADED)
1779 return -EINVAL;
1780
1781 /* Refuse starting scope units more than once */
1782 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_enter_timestamp))
1783 return -ESTALE;
1784
1785 /* If the conditions failed, don't do anything at all. If we already are activating this call might
1786 * still be useful to speed up activation in case there is some hold-off time, but we don't want to
1787 * recheck the condition in that case. */
1788 if (state != UNIT_ACTIVATING &&
1789 !unit_test_condition(u))
1790 return log_unit_debug_errno(u, SYNTHETIC_ERRNO(ECOMM), "Starting requested but condition failed. Not starting unit.");
1791
1792 /* If the asserts failed, fail the entire job */
1793 if (state != UNIT_ACTIVATING &&
1794 !unit_test_assert(u))
1795 return log_unit_notice_errno(u, SYNTHETIC_ERRNO(EPROTO), "Starting requested but asserts failed.");
1796
1797 /* Units of types that aren't supported cannot be started. Note that we do this test only after the
1798 * condition checks, so that we rather return condition check errors (which are usually not
1799 * considered a true failure) than "not supported" errors (which are considered a failure).
1800 */
1801 if (!unit_type_supported(u->type))
1802 return -EOPNOTSUPP;
1803
1804 /* Let's make sure that the deps really are in order before we start this. Normally the job engine
1805 * should have taken care of this already, but let's check this here again. After all, our
1806 * dependencies might not be in effect anymore, due to a reload or due to a failed condition. */
1807 if (!unit_verify_deps(u))
1808 return -ENOLINK;
1809
1810 /* Forward to the main object, if we aren't it. */
1811 following = unit_following(u);
1812 if (following) {
1813 log_unit_debug(u, "Redirecting start request from %s to %s.", u->id, following->id);
1814 return unit_start(following);
1815 }
1816
1817 /* If it is stopped, but we cannot start it, then fail */
1818 if (!UNIT_VTABLE(u)->start)
1819 return -EBADR;
1820
1821 /* We don't suppress calls to ->start() here when we are already starting, to allow this request to
1822 * be used as a "hurry up" call, for example when the unit is in some "auto restart" state where it
1823 * waits for a holdoff timer to elapse before it will start again. */
1824
1825 unit_add_to_dbus_queue(u);
1826
1827 return UNIT_VTABLE(u)->start(u);
1828 }
1829
1830 bool unit_can_start(Unit *u) {
1831 assert(u);
1832
1833 if (u->load_state != UNIT_LOADED)
1834 return false;
1835
1836 if (!unit_type_supported(u->type))
1837 return false;
1838
1839 /* Scope units may be started only once */
1840 if (UNIT_VTABLE(u)->once_only && dual_timestamp_is_set(&u->inactive_exit_timestamp))
1841 return false;
1842
1843 return !!UNIT_VTABLE(u)->start;
1844 }
1845
1846 bool unit_can_isolate(Unit *u) {
1847 assert(u);
1848
1849 return unit_can_start(u) &&
1850 u->allow_isolate;
1851 }
1852
1853 /* Errors:
1854 * -EBADR: This unit type does not support stopping.
1855 * -EALREADY: Unit is already stopped.
1856 * -EAGAIN: An operation is already in progress. Retry later.
1857 */
1858 int unit_stop(Unit *u) {
1859 UnitActiveState state;
1860 Unit *following;
1861
1862 assert(u);
1863
1864 state = unit_active_state(u);
1865 if (UNIT_IS_INACTIVE_OR_FAILED(state))
1866 return -EALREADY;
1867
1868 following = unit_following(u);
1869 if (following) {
1870 log_unit_debug(u, "Redirecting stop request from %s to %s.", u->id, following->id);
1871 return unit_stop(following);
1872 }
1873
1874 if (!UNIT_VTABLE(u)->stop)
1875 return -EBADR;
1876
1877 unit_add_to_dbus_queue(u);
1878
1879 return UNIT_VTABLE(u)->stop(u);
1880 }
1881
1882 bool unit_can_stop(Unit *u) {
1883 assert(u);
1884
1885 if (!unit_type_supported(u->type))
1886 return false;
1887
1888 if (u->perpetual)
1889 return false;
1890
1891 return !!UNIT_VTABLE(u)->stop;
1892 }
1893
1894 /* Errors:
1895 * -EBADR: This unit type does not support reloading.
1896 * -ENOEXEC: Unit is not started.
1897 * -EAGAIN: An operation is already in progress. Retry later.
1898 */
1899 int unit_reload(Unit *u) {
1900 UnitActiveState state;
1901 Unit *following;
1902
1903 assert(u);
1904
1905 if (u->load_state != UNIT_LOADED)
1906 return -EINVAL;
1907
1908 if (!unit_can_reload(u))
1909 return -EBADR;
1910
1911 state = unit_active_state(u);
1912 if (state == UNIT_RELOADING)
1913 return -EAGAIN;
1914
1915 if (state != UNIT_ACTIVE) {
1916 log_unit_warning(u, "Unit cannot be reloaded because it is inactive.");
1917 return -ENOEXEC;
1918 }
1919
1920 following = unit_following(u);
1921 if (following) {
1922 log_unit_debug(u, "Redirecting reload request from %s to %s.", u->id, following->id);
1923 return unit_reload(following);
1924 }
1925
1926 unit_add_to_dbus_queue(u);
1927
1928 if (!UNIT_VTABLE(u)->reload) {
1929 /* Unit doesn't have a reload function, but we need to propagate the reload anyway */
1930 unit_notify(u, unit_active_state(u), unit_active_state(u), 0);
1931 return 0;
1932 }
1933
1934 return UNIT_VTABLE(u)->reload(u);
1935 }
1936
1937 bool unit_can_reload(Unit *u) {
1938 assert(u);
1939
1940 if (UNIT_VTABLE(u)->can_reload)
1941 return UNIT_VTABLE(u)->can_reload(u);
1942
1943 if (!hashmap_isempty(u->dependencies[UNIT_PROPAGATES_RELOAD_TO]))
1944 return true;
1945
1946 return UNIT_VTABLE(u)->reload;
1947 }
1948
1949 bool unit_is_unneeded(Unit *u) {
1950 static const UnitDependency deps[] = {
1951 UNIT_REQUIRED_BY,
1952 UNIT_REQUISITE_OF,
1953 UNIT_WANTED_BY,
1954 UNIT_BOUND_BY,
1955 };
1956 size_t j;
1957
1958 assert(u);
1959
1960 if (!u->stop_when_unneeded)
1961 return false;
1962
1963 /* Don't clean up while the unit is transitioning or is even inactive. */
1964 if (!UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
1965 return false;
1966 if (u->job)
1967 return false;
1968
1969 for (j = 0; j < ELEMENTSOF(deps); j++) {
1970 Unit *other;
1971 Iterator i;
1972 void *v;
1973
1974 /* If a dependent unit has a job queued, is active or transitioning, or is marked for
1975 * restart, then don't clean this one up. */
1976
1977 HASHMAP_FOREACH_KEY(v, other, u->dependencies[deps[j]], i) {
1978 if (other->job)
1979 return false;
1980
1981 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
1982 return false;
1983
1984 if (unit_will_restart(other))
1985 return false;
1986 }
1987 }
1988
1989 return true;
1990 }
1991
1992 static void check_unneeded_dependencies(Unit *u) {
1993
1994 static const UnitDependency deps[] = {
1995 UNIT_REQUIRES,
1996 UNIT_REQUISITE,
1997 UNIT_WANTS,
1998 UNIT_BINDS_TO,
1999 };
2000 size_t j;
2001
2002 assert(u);
2003
2004 /* Add all units this unit depends on to the queue that processes StopWhenUnneeded= behaviour. */
2005
2006 for (j = 0; j < ELEMENTSOF(deps); j++) {
2007 Unit *other;
2008 Iterator i;
2009 void *v;
2010
2011 HASHMAP_FOREACH_KEY(v, other, u->dependencies[deps[j]], i)
2012 unit_submit_to_stop_when_unneeded_queue(other);
2013 }
2014 }
2015
2016 static void unit_check_binds_to(Unit *u) {
2017 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2018 bool stop = false;
2019 Unit *other;
2020 Iterator i;
2021 void *v;
2022 int r;
2023
2024 assert(u);
2025
2026 if (u->job)
2027 return;
2028
2029 if (unit_active_state(u) != UNIT_ACTIVE)
2030 return;
2031
2032 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i) {
2033 if (other->job)
2034 continue;
2035
2036 if (!other->coldplugged)
2037 /* We might yet create a job for the other unit… */
2038 continue;
2039
2040 if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
2041 continue;
2042
2043 stop = true;
2044 break;
2045 }
2046
2047 if (!stop)
2048 return;
2049
2050 /* If stopping a unit fails continuously we might enter a stop
2051 * loop here, hence stop acting on the service being
2052 * unnecessary after a while. */
2053 if (!ratelimit_below(&u->auto_stop_ratelimit)) {
2054 log_unit_warning(u, "Unit is bound to inactive unit %s, but not stopping since we tried this too often recently.", other->id);
2055 return;
2056 }
2057
2058 assert(other);
2059 log_unit_info(u, "Unit is bound to inactive unit %s. Stopping, too.", other->id);
2060
2061 /* A unit we need to run is gone. Sniff. Let's stop this. */
2062 r = manager_add_job(u->manager, JOB_STOP, u, JOB_FAIL, NULL, &error, NULL);
2063 if (r < 0)
2064 log_unit_warning_errno(u, r, "Failed to enqueue stop job, ignoring: %s", bus_error_message(&error, r));
2065 }
2066
2067 static void retroactively_start_dependencies(Unit *u) {
2068 Iterator i;
2069 Unit *other;
2070 void *v;
2071
2072 assert(u);
2073 assert(UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)));
2074
2075 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REQUIRES], i)
2076 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2077 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2078 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL, NULL);
2079
2080 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BINDS_TO], i)
2081 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2082 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2083 manager_add_job(u->manager, JOB_START, other, JOB_REPLACE, NULL, NULL, NULL);
2084
2085 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_WANTS], i)
2086 if (!hashmap_get(u->dependencies[UNIT_AFTER], other) &&
2087 !UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(other)))
2088 manager_add_job(u->manager, JOB_START, other, JOB_FAIL, NULL, NULL, NULL);
2089
2090 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTS], i)
2091 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2092 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2093
2094 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_CONFLICTED_BY], i)
2095 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2096 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2097 }
2098
2099 static void retroactively_stop_dependencies(Unit *u) {
2100 Unit *other;
2101 Iterator i;
2102 void *v;
2103
2104 assert(u);
2105 assert(UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)));
2106
2107 /* Pull down units which are bound to us recursively if enabled */
2108 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BOUND_BY], i)
2109 if (!UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other)))
2110 manager_add_job(u->manager, JOB_STOP, other, JOB_REPLACE, NULL, NULL, NULL);
2111 }
2112
2113 void unit_start_on_failure(Unit *u) {
2114 Unit *other;
2115 Iterator i;
2116 void *v;
2117 int r;
2118
2119 assert(u);
2120
2121 if (hashmap_size(u->dependencies[UNIT_ON_FAILURE]) <= 0)
2122 return;
2123
2124 log_unit_info(u, "Triggering OnFailure= dependencies.");
2125
2126 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_ON_FAILURE], i) {
2127 _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
2128
2129 r = manager_add_job(u->manager, JOB_START, other, u->on_failure_job_mode, NULL, &error, NULL);
2130 if (r < 0)
2131 log_unit_warning_errno(u, r, "Failed to enqueue OnFailure= job, ignoring: %s", bus_error_message(&error, r));
2132 }
2133 }
2134
2135 void unit_trigger_notify(Unit *u) {
2136 Unit *other;
2137 Iterator i;
2138 void *v;
2139
2140 assert(u);
2141
2142 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_TRIGGERED_BY], i)
2143 if (UNIT_VTABLE(other)->trigger_notify)
2144 UNIT_VTABLE(other)->trigger_notify(other, u);
2145 }
2146
2147 static int raise_level(int log_level, bool condition_info, bool condition_notice) {
2148 if (condition_notice && log_level > LOG_NOTICE)
2149 return LOG_NOTICE;
2150 if (condition_info && log_level > LOG_INFO)
2151 return LOG_INFO;
2152 return log_level;
2153 }
2154
2155 static int unit_log_resources(Unit *u) {
2156 struct iovec iovec[1 + _CGROUP_IP_ACCOUNTING_METRIC_MAX + _CGROUP_IO_ACCOUNTING_METRIC_MAX + 4];
2157 bool any_traffic = false, have_ip_accounting = false, any_io = false, have_io_accounting = false;
2158 _cleanup_free_ char *igress = NULL, *egress = NULL, *rr = NULL, *wr = NULL;
2159 int log_level = LOG_DEBUG; /* May be raised if resources consumed over a treshold */
2160 size_t n_message_parts = 0, n_iovec = 0;
2161 char* message_parts[1 + 2 + 2 + 1], *t;
2162 nsec_t nsec = NSEC_INFINITY;
2163 CGroupIPAccountingMetric m;
2164 size_t i;
2165 int r;
2166 const char* const ip_fields[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
2167 [CGROUP_IP_INGRESS_BYTES] = "IP_METRIC_INGRESS_BYTES",
2168 [CGROUP_IP_INGRESS_PACKETS] = "IP_METRIC_INGRESS_PACKETS",
2169 [CGROUP_IP_EGRESS_BYTES] = "IP_METRIC_EGRESS_BYTES",
2170 [CGROUP_IP_EGRESS_PACKETS] = "IP_METRIC_EGRESS_PACKETS",
2171 };
2172 const char* const io_fields[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
2173 [CGROUP_IO_READ_BYTES] = "IO_METRIC_READ_BYTES",
2174 [CGROUP_IO_WRITE_BYTES] = "IO_METRIC_WRITE_BYTES",
2175 [CGROUP_IO_READ_OPERATIONS] = "IO_METRIC_READ_OPERATIONS",
2176 [CGROUP_IO_WRITE_OPERATIONS] = "IO_METRIC_WRITE_OPERATIONS",
2177 };
2178
2179 assert(u);
2180
2181 /* Invoked whenever a unit enters failed or dead state. Logs information about consumed resources if resource
2182 * accounting was enabled for a unit. It does this in two ways: a friendly human readable string with reduced
2183 * information and the complete data in structured fields. */
2184
2185 (void) unit_get_cpu_usage(u, &nsec);
2186 if (nsec != NSEC_INFINITY) {
2187 char buf[FORMAT_TIMESPAN_MAX] = "";
2188
2189 /* Format the CPU time for inclusion in the structured log message */
2190 if (asprintf(&t, "CPU_USAGE_NSEC=%" PRIu64, nsec) < 0) {
2191 r = log_oom();
2192 goto finish;
2193 }
2194 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2195
2196 /* Format the CPU time for inclusion in the human language message string */
2197 format_timespan(buf, sizeof(buf), nsec / NSEC_PER_USEC, USEC_PER_MSEC);
2198 t = strjoin("consumed ", buf, " CPU time");
2199 if (!t) {
2200 r = log_oom();
2201 goto finish;
2202 }
2203
2204 message_parts[n_message_parts++] = t;
2205
2206 log_level = raise_level(log_level,
2207 nsec > NOTICEWORTHY_CPU_NSEC,
2208 nsec > MENTIONWORTHY_CPU_NSEC);
2209 }
2210
2211 for (CGroupIOAccountingMetric k = 0; k < _CGROUP_IO_ACCOUNTING_METRIC_MAX; k++) {
2212 char buf[FORMAT_BYTES_MAX] = "";
2213 uint64_t value = UINT64_MAX;
2214
2215 assert(io_fields[k]);
2216
2217 (void) unit_get_io_accounting(u, k, k > 0, &value);
2218 if (value == UINT64_MAX)
2219 continue;
2220
2221 have_io_accounting = true;
2222 if (value > 0)
2223 any_io = true;
2224
2225 /* Format IO accounting data for inclusion in the structured log message */
2226 if (asprintf(&t, "%s=%" PRIu64, io_fields[k], value) < 0) {
2227 r = log_oom();
2228 goto finish;
2229 }
2230 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2231
2232 /* Format the IO accounting data for inclusion in the human language message string, but only
2233 * for the bytes counters (and not for the operations counters) */
2234 if (k == CGROUP_IO_READ_BYTES) {
2235 assert(!rr);
2236 rr = strjoin("read ", format_bytes(buf, sizeof(buf), value), " from disk");
2237 if (!rr) {
2238 r = log_oom();
2239 goto finish;
2240 }
2241 } else if (k == CGROUP_IO_WRITE_BYTES) {
2242 assert(!wr);
2243 wr = strjoin("written ", format_bytes(buf, sizeof(buf), value), " to disk");
2244 if (!wr) {
2245 r = log_oom();
2246 goto finish;
2247 }
2248 }
2249
2250 if (IN_SET(k, CGROUP_IO_READ_BYTES, CGROUP_IO_WRITE_BYTES))
2251 log_level = raise_level(log_level,
2252 value > MENTIONWORTHY_IO_BYTES,
2253 value > NOTICEWORTHY_IO_BYTES);
2254 }
2255
2256 if (have_io_accounting) {
2257 if (any_io) {
2258 if (rr)
2259 message_parts[n_message_parts++] = TAKE_PTR(rr);
2260 if (wr)
2261 message_parts[n_message_parts++] = TAKE_PTR(wr);
2262
2263 } else {
2264 char *k;
2265
2266 k = strdup("no IO");
2267 if (!k) {
2268 r = log_oom();
2269 goto finish;
2270 }
2271
2272 message_parts[n_message_parts++] = k;
2273 }
2274 }
2275
2276 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
2277 char buf[FORMAT_BYTES_MAX] = "";
2278 uint64_t value = UINT64_MAX;
2279
2280 assert(ip_fields[m]);
2281
2282 (void) unit_get_ip_accounting(u, m, &value);
2283 if (value == UINT64_MAX)
2284 continue;
2285
2286 have_ip_accounting = true;
2287 if (value > 0)
2288 any_traffic = true;
2289
2290 /* Format IP accounting data for inclusion in the structured log message */
2291 if (asprintf(&t, "%s=%" PRIu64, ip_fields[m], value) < 0) {
2292 r = log_oom();
2293 goto finish;
2294 }
2295 iovec[n_iovec++] = IOVEC_MAKE_STRING(t);
2296
2297 /* Format the IP accounting data for inclusion in the human language message string, but only for the
2298 * bytes counters (and not for the packets counters) */
2299 if (m == CGROUP_IP_INGRESS_BYTES) {
2300 assert(!igress);
2301 igress = strjoin("received ", format_bytes(buf, sizeof(buf), value), " IP traffic");
2302 if (!igress) {
2303 r = log_oom();
2304 goto finish;
2305 }
2306 } else if (m == CGROUP_IP_EGRESS_BYTES) {
2307 assert(!egress);
2308 egress = strjoin("sent ", format_bytes(buf, sizeof(buf), value), " IP traffic");
2309 if (!egress) {
2310 r = log_oom();
2311 goto finish;
2312 }
2313 }
2314
2315 if (IN_SET(m, CGROUP_IP_INGRESS_BYTES, CGROUP_IP_EGRESS_BYTES))
2316 log_level = raise_level(log_level,
2317 value > MENTIONWORTHY_IP_BYTES,
2318 value > NOTICEWORTHY_IP_BYTES);
2319 }
2320
2321 if (have_ip_accounting) {
2322 if (any_traffic) {
2323 if (igress)
2324 message_parts[n_message_parts++] = TAKE_PTR(igress);
2325 if (egress)
2326 message_parts[n_message_parts++] = TAKE_PTR(egress);
2327
2328 } else {
2329 char *k;
2330
2331 k = strdup("no IP traffic");
2332 if (!k) {
2333 r = log_oom();
2334 goto finish;
2335 }
2336
2337 message_parts[n_message_parts++] = k;
2338 }
2339 }
2340
2341 /* Is there any accounting data available at all? */
2342 if (n_iovec == 0) {
2343 r = 0;
2344 goto finish;
2345 }
2346
2347 if (n_message_parts == 0)
2348 t = strjoina("MESSAGE=", u->id, ": Completed.");
2349 else {
2350 _cleanup_free_ char *joined;
2351
2352 message_parts[n_message_parts] = NULL;
2353
2354 joined = strv_join(message_parts, ", ");
2355 if (!joined) {
2356 r = log_oom();
2357 goto finish;
2358 }
2359
2360 joined[0] = ascii_toupper(joined[0]);
2361 t = strjoina("MESSAGE=", u->id, ": ", joined, ".");
2362 }
2363
2364 /* The following four fields we allocate on the stack or are static strings, we hence don't want to free them,
2365 * and hence don't increase n_iovec for them */
2366 iovec[n_iovec] = IOVEC_MAKE_STRING(t);
2367 iovec[n_iovec + 1] = IOVEC_MAKE_STRING("MESSAGE_ID=" SD_MESSAGE_UNIT_RESOURCES_STR);
2368
2369 t = strjoina(u->manager->unit_log_field, u->id);
2370 iovec[n_iovec + 2] = IOVEC_MAKE_STRING(t);
2371
2372 t = strjoina(u->manager->invocation_log_field, u->invocation_id_string);
2373 iovec[n_iovec + 3] = IOVEC_MAKE_STRING(t);
2374
2375 log_struct_iovec(log_level, iovec, n_iovec + 4);
2376 r = 0;
2377
2378 finish:
2379 for (i = 0; i < n_message_parts; i++)
2380 free(message_parts[i]);
2381
2382 for (i = 0; i < n_iovec; i++)
2383 free(iovec[i].iov_base);
2384
2385 return r;
2386
2387 }
2388
2389 static void unit_update_on_console(Unit *u) {
2390 bool b;
2391
2392 assert(u);
2393
2394 b = unit_needs_console(u);
2395 if (u->on_console == b)
2396 return;
2397
2398 u->on_console = b;
2399 if (b)
2400 manager_ref_console(u->manager);
2401 else
2402 manager_unref_console(u->manager);
2403 }
2404
2405 static void unit_emit_audit_start(Unit *u) {
2406 assert(u);
2407
2408 if (u->type != UNIT_SERVICE)
2409 return;
2410
2411 /* Write audit record if we have just finished starting up */
2412 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_START, true);
2413 u->in_audit = true;
2414 }
2415
2416 static void unit_emit_audit_stop(Unit *u, UnitActiveState state) {
2417 assert(u);
2418
2419 if (u->type != UNIT_SERVICE)
2420 return;
2421
2422 if (u->in_audit) {
2423 /* Write audit record if we have just finished shutting down */
2424 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_STOP, state == UNIT_INACTIVE);
2425 u->in_audit = false;
2426 } else {
2427 /* Hmm, if there was no start record written write it now, so that we always have a nice pair */
2428 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_START, state == UNIT_INACTIVE);
2429
2430 if (state == UNIT_INACTIVE)
2431 manager_send_unit_audit(u->manager, u, AUDIT_SERVICE_STOP, true);
2432 }
2433 }
2434
2435 static bool unit_process_job(Job *j, UnitActiveState ns, UnitNotifyFlags flags) {
2436 bool unexpected = false;
2437 JobResult result;
2438
2439 assert(j);
2440
2441 if (j->state == JOB_WAITING)
2442
2443 /* So we reached a different state for this job. Let's see if we can run it now if it failed previously
2444 * due to EAGAIN. */
2445 job_add_to_run_queue(j);
2446
2447 /* Let's check whether the unit's new state constitutes a finished job, or maybe contradicts a running job and
2448 * hence needs to invalidate jobs. */
2449
2450 switch (j->type) {
2451
2452 case JOB_START:
2453 case JOB_VERIFY_ACTIVE:
2454
2455 if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
2456 job_finish_and_invalidate(j, JOB_DONE, true, false);
2457 else if (j->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
2458 unexpected = true;
2459
2460 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2461 if (ns == UNIT_FAILED)
2462 result = JOB_FAILED;
2463 else if (FLAGS_SET(flags, UNIT_NOTIFY_SKIP_CONDITION))
2464 result = JOB_SKIPPED;
2465 else
2466 result = JOB_DONE;
2467
2468 job_finish_and_invalidate(j, result, true, false);
2469 }
2470 }
2471
2472 break;
2473
2474 case JOB_RELOAD:
2475 case JOB_RELOAD_OR_START:
2476 case JOB_TRY_RELOAD:
2477
2478 if (j->state == JOB_RUNNING) {
2479 if (ns == UNIT_ACTIVE)
2480 job_finish_and_invalidate(j, (flags & UNIT_NOTIFY_RELOAD_FAILURE) ? JOB_FAILED : JOB_DONE, true, false);
2481 else if (!IN_SET(ns, UNIT_ACTIVATING, UNIT_RELOADING)) {
2482 unexpected = true;
2483
2484 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2485 job_finish_and_invalidate(j, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true, false);
2486 }
2487 }
2488
2489 break;
2490
2491 case JOB_STOP:
2492 case JOB_RESTART:
2493 case JOB_TRY_RESTART:
2494
2495 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2496 job_finish_and_invalidate(j, JOB_DONE, true, false);
2497 else if (j->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
2498 unexpected = true;
2499 job_finish_and_invalidate(j, JOB_FAILED, true, false);
2500 }
2501
2502 break;
2503
2504 default:
2505 assert_not_reached("Job type unknown");
2506 }
2507
2508 return unexpected;
2509 }
2510
2511 void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, UnitNotifyFlags flags) {
2512 const char *reason;
2513 Manager *m;
2514
2515 assert(u);
2516 assert(os < _UNIT_ACTIVE_STATE_MAX);
2517 assert(ns < _UNIT_ACTIVE_STATE_MAX);
2518
2519 /* Note that this is called for all low-level state changes, even if they might map to the same high-level
2520 * UnitActiveState! That means that ns == os is an expected behavior here. For example: if a mount point is
2521 * remounted this function will be called too! */
2522
2523 m = u->manager;
2524
2525 /* Let's enqueue the change signal early. In case this unit has a job associated we want that this unit is in
2526 * the bus queue, so that any job change signal queued will force out the unit change signal first. */
2527 unit_add_to_dbus_queue(u);
2528
2529 /* Update timestamps for state changes */
2530 if (!MANAGER_IS_RELOADING(m)) {
2531 dual_timestamp_get(&u->state_change_timestamp);
2532
2533 if (UNIT_IS_INACTIVE_OR_FAILED(os) && !UNIT_IS_INACTIVE_OR_FAILED(ns))
2534 u->inactive_exit_timestamp = u->state_change_timestamp;
2535 else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_INACTIVE_OR_FAILED(ns))
2536 u->inactive_enter_timestamp = u->state_change_timestamp;
2537
2538 if (!UNIT_IS_ACTIVE_OR_RELOADING(os) && UNIT_IS_ACTIVE_OR_RELOADING(ns))
2539 u->active_enter_timestamp = u->state_change_timestamp;
2540 else if (UNIT_IS_ACTIVE_OR_RELOADING(os) && !UNIT_IS_ACTIVE_OR_RELOADING(ns))
2541 u->active_exit_timestamp = u->state_change_timestamp;
2542 }
2543
2544 /* Keep track of failed units */
2545 (void) manager_update_failed_units(m, u, ns == UNIT_FAILED);
2546
2547 /* Make sure the cgroup and state files are always removed when we become inactive */
2548 if (UNIT_IS_INACTIVE_OR_FAILED(ns)) {
2549 unit_prune_cgroup(u);
2550 unit_unlink_state_files(u);
2551 }
2552
2553 unit_update_on_console(u);
2554
2555 if (!MANAGER_IS_RELOADING(m)) {
2556 bool unexpected;
2557
2558 /* Let's propagate state changes to the job */
2559 if (u->job)
2560 unexpected = unit_process_job(u->job, ns, flags);
2561 else
2562 unexpected = true;
2563
2564 /* If this state change happened without being requested by a job, then let's retroactively start or
2565 * stop dependencies. We skip that step when deserializing, since we don't want to create any
2566 * additional jobs just because something is already activated. */
2567
2568 if (unexpected) {
2569 if (UNIT_IS_INACTIVE_OR_FAILED(os) && UNIT_IS_ACTIVE_OR_ACTIVATING(ns))
2570 retroactively_start_dependencies(u);
2571 else if (UNIT_IS_ACTIVE_OR_ACTIVATING(os) && UNIT_IS_INACTIVE_OR_DEACTIVATING(ns))
2572 retroactively_stop_dependencies(u);
2573 }
2574
2575 /* stop unneeded units regardless if going down was expected or not */
2576 if (UNIT_IS_INACTIVE_OR_FAILED(ns))
2577 check_unneeded_dependencies(u);
2578
2579 if (ns != os && ns == UNIT_FAILED) {
2580 log_unit_debug(u, "Unit entered failed state.");
2581
2582 if (!(flags & UNIT_NOTIFY_WILL_AUTO_RESTART))
2583 unit_start_on_failure(u);
2584 }
2585
2586 if (UNIT_IS_ACTIVE_OR_RELOADING(ns) && !UNIT_IS_ACTIVE_OR_RELOADING(os)) {
2587 /* This unit just finished starting up */
2588
2589 unit_emit_audit_start(u);
2590 manager_send_unit_plymouth(m, u);
2591 }
2592
2593 if (UNIT_IS_INACTIVE_OR_FAILED(ns) && !UNIT_IS_INACTIVE_OR_FAILED(os)) {
2594 /* This unit just stopped/failed. */
2595
2596 unit_emit_audit_stop(u, ns);
2597 unit_log_resources(u);
2598 }
2599 }
2600
2601 manager_recheck_journal(m);
2602 manager_recheck_dbus(m);
2603
2604 unit_trigger_notify(u);
2605
2606 if (!MANAGER_IS_RELOADING(m)) {
2607 /* Maybe we finished startup and are now ready for being stopped because unneeded? */
2608 unit_submit_to_stop_when_unneeded_queue(u);
2609
2610 /* Maybe we finished startup, but something we needed has vanished? Let's die then. (This happens when
2611 * something BindsTo= to a Type=oneshot unit, as these units go directly from starting to inactive,
2612 * without ever entering started.) */
2613 unit_check_binds_to(u);
2614
2615 if (os != UNIT_FAILED && ns == UNIT_FAILED) {
2616 reason = strjoina("unit ", u->id, " failed");
2617 emergency_action(m, u->failure_action, 0, u->reboot_arg, unit_failure_action_exit_status(u), reason);
2618 } else if (!UNIT_IS_INACTIVE_OR_FAILED(os) && ns == UNIT_INACTIVE) {
2619 reason = strjoina("unit ", u->id, " succeeded");
2620 emergency_action(m, u->success_action, 0, u->reboot_arg, unit_success_action_exit_status(u), reason);
2621 }
2622 }
2623
2624 unit_add_to_gc_queue(u);
2625 }
2626
2627 int unit_watch_pid(Unit *u, pid_t pid, bool exclusive) {
2628 int r;
2629
2630 assert(u);
2631 assert(pid_is_valid(pid));
2632
2633 /* Watch a specific PID */
2634
2635 /* Caller might be sure that this PID belongs to this unit only. Let's take this
2636 * opportunity to remove any stalled references to this PID as they can be created
2637 * easily (when watching a process which is not our direct child). */
2638 if (exclusive)
2639 manager_unwatch_pid(u->manager, pid);
2640
2641 r = set_ensure_allocated(&u->pids, NULL);
2642 if (r < 0)
2643 return r;
2644
2645 r = hashmap_ensure_allocated(&u->manager->watch_pids, NULL);
2646 if (r < 0)
2647 return r;
2648
2649 /* First try, let's add the unit keyed by "pid". */
2650 r = hashmap_put(u->manager->watch_pids, PID_TO_PTR(pid), u);
2651 if (r == -EEXIST) {
2652 Unit **array;
2653 bool found = false;
2654 size_t n = 0;
2655
2656 /* OK, the "pid" key is already assigned to a different unit. Let's see if the "-pid" key (which points
2657 * to an array of Units rather than just a Unit), lists us already. */
2658
2659 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2660 if (array)
2661 for (; array[n]; n++)
2662 if (array[n] == u)
2663 found = true;
2664
2665 if (found) /* Found it already? if so, do nothing */
2666 r = 0;
2667 else {
2668 Unit **new_array;
2669
2670 /* Allocate a new array */
2671 new_array = new(Unit*, n + 2);
2672 if (!new_array)
2673 return -ENOMEM;
2674
2675 memcpy_safe(new_array, array, sizeof(Unit*) * n);
2676 new_array[n] = u;
2677 new_array[n+1] = NULL;
2678
2679 /* Add or replace the old array */
2680 r = hashmap_replace(u->manager->watch_pids, PID_TO_PTR(-pid), new_array);
2681 if (r < 0) {
2682 free(new_array);
2683 return r;
2684 }
2685
2686 free(array);
2687 }
2688 } else if (r < 0)
2689 return r;
2690
2691 r = set_put(u->pids, PID_TO_PTR(pid));
2692 if (r < 0)
2693 return r;
2694
2695 return 0;
2696 }
2697
2698 void unit_unwatch_pid(Unit *u, pid_t pid) {
2699 Unit **array;
2700
2701 assert(u);
2702 assert(pid_is_valid(pid));
2703
2704 /* First let's drop the unit in case it's keyed as "pid". */
2705 (void) hashmap_remove_value(u->manager->watch_pids, PID_TO_PTR(pid), u);
2706
2707 /* Then, let's also drop the unit, in case it's in the array keyed by -pid */
2708 array = hashmap_get(u->manager->watch_pids, PID_TO_PTR(-pid));
2709 if (array) {
2710 size_t n, m = 0;
2711
2712 /* Let's iterate through the array, dropping our own entry */
2713 for (n = 0; array[n]; n++)
2714 if (array[n] != u)
2715 array[m++] = array[n];
2716 array[m] = NULL;
2717
2718 if (m == 0) {
2719 /* The array is now empty, remove the entire entry */
2720 assert_se(hashmap_remove(u->manager->watch_pids, PID_TO_PTR(-pid)) == array);
2721 free(array);
2722 }
2723 }
2724
2725 (void) set_remove(u->pids, PID_TO_PTR(pid));
2726 }
2727
2728 void unit_unwatch_all_pids(Unit *u) {
2729 assert(u);
2730
2731 while (!set_isempty(u->pids))
2732 unit_unwatch_pid(u, PTR_TO_PID(set_first(u->pids)));
2733
2734 u->pids = set_free(u->pids);
2735 }
2736
2737 static void unit_tidy_watch_pids(Unit *u) {
2738 pid_t except1, except2;
2739 Iterator i;
2740 void *e;
2741
2742 assert(u);
2743
2744 /* Cleans dead PIDs from our list */
2745
2746 except1 = unit_main_pid(u);
2747 except2 = unit_control_pid(u);
2748
2749 SET_FOREACH(e, u->pids, i) {
2750 pid_t pid = PTR_TO_PID(e);
2751
2752 if (pid == except1 || pid == except2)
2753 continue;
2754
2755 if (!pid_is_unwaited(pid))
2756 unit_unwatch_pid(u, pid);
2757 }
2758 }
2759
2760 static int on_rewatch_pids_event(sd_event_source *s, void *userdata) {
2761 Unit *u = userdata;
2762
2763 assert(s);
2764 assert(u);
2765
2766 unit_tidy_watch_pids(u);
2767 unit_watch_all_pids(u);
2768
2769 /* If the PID set is empty now, then let's finish this off. */
2770 unit_synthesize_cgroup_empty_event(u);
2771
2772 return 0;
2773 }
2774
2775 int unit_enqueue_rewatch_pids(Unit *u) {
2776 int r;
2777
2778 assert(u);
2779
2780 if (!u->cgroup_path)
2781 return -ENOENT;
2782
2783 r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
2784 if (r < 0)
2785 return r;
2786 if (r > 0) /* On unified we can use proper notifications */
2787 return 0;
2788
2789 /* Enqueues a low-priority job that will clean up dead PIDs from our list of PIDs to watch and subscribe to new
2790 * PIDs that might have appeared. We do this in a delayed job because the work might be quite slow, as it
2791 * involves issuing kill(pid, 0) on all processes we watch. */
2792
2793 if (!u->rewatch_pids_event_source) {
2794 _cleanup_(sd_event_source_unrefp) sd_event_source *s = NULL;
2795
2796 r = sd_event_add_defer(u->manager->event, &s, on_rewatch_pids_event, u);
2797 if (r < 0)
2798 return log_error_errno(r, "Failed to allocate event source for tidying watched PIDs: %m");
2799
2800 r = sd_event_source_set_priority(s, SD_EVENT_PRIORITY_IDLE);
2801 if (r < 0)
2802 return log_error_errno(r, "Failed to adjust priority of event source for tidying watched PIDs: %m");
2803
2804 (void) sd_event_source_set_description(s, "tidy-watch-pids");
2805
2806 u->rewatch_pids_event_source = TAKE_PTR(s);
2807 }
2808
2809 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_ONESHOT);
2810 if (r < 0)
2811 return log_error_errno(r, "Failed to enable event source for tidying watched PIDs: %m");
2812
2813 return 0;
2814 }
2815
2816 void unit_dequeue_rewatch_pids(Unit *u) {
2817 int r;
2818 assert(u);
2819
2820 if (!u->rewatch_pids_event_source)
2821 return;
2822
2823 r = sd_event_source_set_enabled(u->rewatch_pids_event_source, SD_EVENT_OFF);
2824 if (r < 0)
2825 log_warning_errno(r, "Failed to disable event source for tidying watched PIDs, ignoring: %m");
2826
2827 u->rewatch_pids_event_source = sd_event_source_unref(u->rewatch_pids_event_source);
2828 }
2829
2830 bool unit_job_is_applicable(Unit *u, JobType j) {
2831 assert(u);
2832 assert(j >= 0 && j < _JOB_TYPE_MAX);
2833
2834 switch (j) {
2835
2836 case JOB_VERIFY_ACTIVE:
2837 case JOB_START:
2838 case JOB_NOP:
2839 /* Note that we don't check unit_can_start() here. That's because .device units and suchlike are not
2840 * startable by us but may appear due to external events, and it thus makes sense to permit enqueing
2841 * jobs for it. */
2842 return true;
2843
2844 case JOB_STOP:
2845 /* Similar as above. However, perpetual units can never be stopped (neither explicitly nor due to
2846 * external events), hence it makes no sense to permit enqueing such a request either. */
2847 return !u->perpetual;
2848
2849 case JOB_RESTART:
2850 case JOB_TRY_RESTART:
2851 return unit_can_stop(u) && unit_can_start(u);
2852
2853 case JOB_RELOAD:
2854 case JOB_TRY_RELOAD:
2855 return unit_can_reload(u);
2856
2857 case JOB_RELOAD_OR_START:
2858 return unit_can_reload(u) && unit_can_start(u);
2859
2860 default:
2861 assert_not_reached("Invalid job type");
2862 }
2863 }
2864
2865 static void maybe_warn_about_dependency(Unit *u, const char *other, UnitDependency dependency) {
2866 assert(u);
2867
2868 /* Only warn about some unit types */
2869 if (!IN_SET(dependency, UNIT_CONFLICTS, UNIT_CONFLICTED_BY, UNIT_BEFORE, UNIT_AFTER, UNIT_ON_FAILURE, UNIT_TRIGGERS, UNIT_TRIGGERED_BY))
2870 return;
2871
2872 if (streq_ptr(u->id, other))
2873 log_unit_warning(u, "Dependency %s=%s dropped", unit_dependency_to_string(dependency), u->id);
2874 else
2875 log_unit_warning(u, "Dependency %s=%s dropped, merged into %s", unit_dependency_to_string(dependency), strna(other), u->id);
2876 }
2877
2878 static int unit_add_dependency_hashmap(
2879 Hashmap **h,
2880 Unit *other,
2881 UnitDependencyMask origin_mask,
2882 UnitDependencyMask destination_mask) {
2883
2884 UnitDependencyInfo info;
2885 int r;
2886
2887 assert(h);
2888 assert(other);
2889 assert(origin_mask < _UNIT_DEPENDENCY_MASK_FULL);
2890 assert(destination_mask < _UNIT_DEPENDENCY_MASK_FULL);
2891 assert(origin_mask > 0 || destination_mask > 0);
2892
2893 r = hashmap_ensure_allocated(h, NULL);
2894 if (r < 0)
2895 return r;
2896
2897 assert_cc(sizeof(void*) == sizeof(info));
2898
2899 info.data = hashmap_get(*h, other);
2900 if (info.data) {
2901 /* Entry already exists. Add in our mask. */
2902
2903 if (FLAGS_SET(origin_mask, info.origin_mask) &&
2904 FLAGS_SET(destination_mask, info.destination_mask))
2905 return 0; /* NOP */
2906
2907 info.origin_mask |= origin_mask;
2908 info.destination_mask |= destination_mask;
2909
2910 r = hashmap_update(*h, other, info.data);
2911 } else {
2912 info = (UnitDependencyInfo) {
2913 .origin_mask = origin_mask,
2914 .destination_mask = destination_mask,
2915 };
2916
2917 r = hashmap_put(*h, other, info.data);
2918 }
2919 if (r < 0)
2920 return r;
2921
2922 return 1;
2923 }
2924
2925 int unit_add_dependency(
2926 Unit *u,
2927 UnitDependency d,
2928 Unit *other,
2929 bool add_reference,
2930 UnitDependencyMask mask) {
2931
2932 static const UnitDependency inverse_table[_UNIT_DEPENDENCY_MAX] = {
2933 [UNIT_REQUIRES] = UNIT_REQUIRED_BY,
2934 [UNIT_WANTS] = UNIT_WANTED_BY,
2935 [UNIT_REQUISITE] = UNIT_REQUISITE_OF,
2936 [UNIT_BINDS_TO] = UNIT_BOUND_BY,
2937 [UNIT_PART_OF] = UNIT_CONSISTS_OF,
2938 [UNIT_REQUIRED_BY] = UNIT_REQUIRES,
2939 [UNIT_REQUISITE_OF] = UNIT_REQUISITE,
2940 [UNIT_WANTED_BY] = UNIT_WANTS,
2941 [UNIT_BOUND_BY] = UNIT_BINDS_TO,
2942 [UNIT_CONSISTS_OF] = UNIT_PART_OF,
2943 [UNIT_CONFLICTS] = UNIT_CONFLICTED_BY,
2944 [UNIT_CONFLICTED_BY] = UNIT_CONFLICTS,
2945 [UNIT_BEFORE] = UNIT_AFTER,
2946 [UNIT_AFTER] = UNIT_BEFORE,
2947 [UNIT_ON_FAILURE] = _UNIT_DEPENDENCY_INVALID,
2948 [UNIT_REFERENCES] = UNIT_REFERENCED_BY,
2949 [UNIT_REFERENCED_BY] = UNIT_REFERENCES,
2950 [UNIT_TRIGGERS] = UNIT_TRIGGERED_BY,
2951 [UNIT_TRIGGERED_BY] = UNIT_TRIGGERS,
2952 [UNIT_PROPAGATES_RELOAD_TO] = UNIT_RELOAD_PROPAGATED_FROM,
2953 [UNIT_RELOAD_PROPAGATED_FROM] = UNIT_PROPAGATES_RELOAD_TO,
2954 [UNIT_JOINS_NAMESPACE_OF] = UNIT_JOINS_NAMESPACE_OF,
2955 };
2956 Unit *original_u = u, *original_other = other;
2957 int r;
2958
2959 assert(u);
2960 assert(d >= 0 && d < _UNIT_DEPENDENCY_MAX);
2961 assert(other);
2962
2963 u = unit_follow_merge(u);
2964 other = unit_follow_merge(other);
2965
2966 /* We won't allow dependencies on ourselves. We will not
2967 * consider them an error however. */
2968 if (u == other) {
2969 maybe_warn_about_dependency(original_u, original_other->id, d);
2970 return 0;
2971 }
2972
2973 /* Note that ordering a device unit after a unit is permitted since it
2974 * allows to start its job running timeout at a specific time. */
2975 if (d == UNIT_BEFORE && other->type == UNIT_DEVICE) {
2976 log_unit_warning(u, "Dependency Before=%s ignored (.device units cannot be delayed)", other->id);
2977 return 0;
2978 }
2979
2980 if (d == UNIT_ON_FAILURE && !UNIT_VTABLE(u)->can_fail) {
2981 log_unit_warning(u, "Requested dependency OnFailure=%s ignored (%s units cannot fail).", other->id, unit_type_to_string(u->type));
2982 return 0;
2983 }
2984
2985 if (d == UNIT_TRIGGERS && !UNIT_VTABLE(u)->can_trigger)
2986 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
2987 "Requested dependency Triggers=%s refused (%s units cannot trigger other units).", other->id, unit_type_to_string(u->type));
2988 if (d == UNIT_TRIGGERED_BY && !UNIT_VTABLE(other)->can_trigger)
2989 return log_unit_error_errno(u, SYNTHETIC_ERRNO(EINVAL),
2990 "Requested dependency TriggeredBy=%s refused (%s units cannot trigger other units).", other->id, unit_type_to_string(other->type));
2991
2992 r = unit_add_dependency_hashmap(u->dependencies + d, other, mask, 0);
2993 if (r < 0)
2994 return r;
2995
2996 if (inverse_table[d] != _UNIT_DEPENDENCY_INVALID && inverse_table[d] != d) {
2997 r = unit_add_dependency_hashmap(other->dependencies + inverse_table[d], u, 0, mask);
2998 if (r < 0)
2999 return r;
3000 }
3001
3002 if (add_reference) {
3003 r = unit_add_dependency_hashmap(u->dependencies + UNIT_REFERENCES, other, mask, 0);
3004 if (r < 0)
3005 return r;
3006
3007 r = unit_add_dependency_hashmap(other->dependencies + UNIT_REFERENCED_BY, u, 0, mask);
3008 if (r < 0)
3009 return r;
3010 }
3011
3012 unit_add_to_dbus_queue(u);
3013 return 0;
3014 }
3015
3016 int unit_add_two_dependencies(Unit *u, UnitDependency d, UnitDependency e, Unit *other, bool add_reference, UnitDependencyMask mask) {
3017 int r;
3018
3019 assert(u);
3020
3021 r = unit_add_dependency(u, d, other, add_reference, mask);
3022 if (r < 0)
3023 return r;
3024
3025 return unit_add_dependency(u, e, other, add_reference, mask);
3026 }
3027
3028 static int resolve_template(Unit *u, const char *name, char **buf, const char **ret) {
3029 int r;
3030
3031 assert(u);
3032 assert(name);
3033 assert(buf);
3034 assert(ret);
3035
3036 if (!unit_name_is_valid(name, UNIT_NAME_TEMPLATE)) {
3037 *buf = NULL;
3038 *ret = name;
3039 return 0;
3040 }
3041
3042 if (u->instance)
3043 r = unit_name_replace_instance(name, u->instance, buf);
3044 else {
3045 _cleanup_free_ char *i = NULL;
3046
3047 r = unit_name_to_prefix(u->id, &i);
3048 if (r < 0)
3049 return r;
3050
3051 r = unit_name_replace_instance(name, i, buf);
3052 }
3053 if (r < 0)
3054 return r;
3055
3056 *ret = *buf;
3057 return 0;
3058 }
3059
3060 int unit_add_dependency_by_name(Unit *u, UnitDependency d, const char *name, bool add_reference, UnitDependencyMask mask) {
3061 _cleanup_free_ char *buf = NULL;
3062 Unit *other;
3063 int r;
3064
3065 assert(u);
3066 assert(name);
3067
3068 r = resolve_template(u, name, &buf, &name);
3069 if (r < 0)
3070 return r;
3071
3072 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
3073 if (r < 0)
3074 return r;
3075
3076 return unit_add_dependency(u, d, other, add_reference, mask);
3077 }
3078
3079 int unit_add_two_dependencies_by_name(Unit *u, UnitDependency d, UnitDependency e, const char *name, bool add_reference, UnitDependencyMask mask) {
3080 _cleanup_free_ char *buf = NULL;
3081 Unit *other;
3082 int r;
3083
3084 assert(u);
3085 assert(name);
3086
3087 r = resolve_template(u, name, &buf, &name);
3088 if (r < 0)
3089 return r;
3090
3091 r = manager_load_unit(u->manager, name, NULL, NULL, &other);
3092 if (r < 0)
3093 return r;
3094
3095 return unit_add_two_dependencies(u, d, e, other, add_reference, mask);
3096 }
3097
3098 int set_unit_path(const char *p) {
3099 /* This is mostly for debug purposes */
3100 if (setenv("SYSTEMD_UNIT_PATH", p, 1) < 0)
3101 return -errno;
3102
3103 return 0;
3104 }
3105
3106 char *unit_dbus_path(Unit *u) {
3107 assert(u);
3108
3109 if (!u->id)
3110 return NULL;
3111
3112 return unit_dbus_path_from_name(u->id);
3113 }
3114
3115 char *unit_dbus_path_invocation_id(Unit *u) {
3116 assert(u);
3117
3118 if (sd_id128_is_null(u->invocation_id))
3119 return NULL;
3120
3121 return unit_dbus_path_from_name(u->invocation_id_string);
3122 }
3123
3124 int unit_set_slice(Unit *u, Unit *slice) {
3125 assert(u);
3126 assert(slice);
3127
3128 /* Sets the unit slice if it has not been set before. Is extra
3129 * careful, to only allow this for units that actually have a
3130 * cgroup context. Also, we don't allow to set this for slices
3131 * (since the parent slice is derived from the name). Make
3132 * sure the unit we set is actually a slice. */
3133
3134 if (!UNIT_HAS_CGROUP_CONTEXT(u))
3135 return -EOPNOTSUPP;
3136
3137 if (u->type == UNIT_SLICE)
3138 return -EINVAL;
3139
3140 if (unit_active_state(u) != UNIT_INACTIVE)
3141 return -EBUSY;
3142
3143 if (slice->type != UNIT_SLICE)
3144 return -EINVAL;
3145
3146 if (unit_has_name(u, SPECIAL_INIT_SCOPE) &&
3147 !unit_has_name(slice, SPECIAL_ROOT_SLICE))
3148 return -EPERM;
3149
3150 if (UNIT_DEREF(u->slice) == slice)
3151 return 0;
3152
3153 /* Disallow slice changes if @u is already bound to cgroups */
3154 if (UNIT_ISSET(u->slice) && u->cgroup_realized)
3155 return -EBUSY;
3156
3157 unit_ref_set(&u->slice, u, slice);
3158 return 1;
3159 }
3160
3161 int unit_set_default_slice(Unit *u) {
3162 const char *slice_name;
3163 Unit *slice;
3164 int r;
3165
3166 assert(u);
3167
3168 if (UNIT_ISSET(u->slice))
3169 return 0;
3170
3171 if (u->instance) {
3172 _cleanup_free_ char *prefix = NULL, *escaped = NULL;
3173
3174 /* Implicitly place all instantiated units in their
3175 * own per-template slice */
3176
3177 r = unit_name_to_prefix(u->id, &prefix);
3178 if (r < 0)
3179 return r;
3180
3181 /* The prefix is already escaped, but it might include
3182 * "-" which has a special meaning for slice units,
3183 * hence escape it here extra. */
3184 escaped = unit_name_escape(prefix);
3185 if (!escaped)
3186 return -ENOMEM;
3187
3188 if (MANAGER_IS_SYSTEM(u->manager))
3189 slice_name = strjoina("system-", escaped, ".slice");
3190 else
3191 slice_name = strjoina(escaped, ".slice");
3192 } else
3193 slice_name =
3194 MANAGER_IS_SYSTEM(u->manager) && !unit_has_name(u, SPECIAL_INIT_SCOPE)
3195 ? SPECIAL_SYSTEM_SLICE
3196 : SPECIAL_ROOT_SLICE;
3197
3198 r = manager_load_unit(u->manager, slice_name, NULL, NULL, &slice);
3199 if (r < 0)
3200 return r;
3201
3202 return unit_set_slice(u, slice);
3203 }
3204
3205 const char *unit_slice_name(Unit *u) {
3206 assert(u);
3207
3208 if (!UNIT_ISSET(u->slice))
3209 return NULL;
3210
3211 return UNIT_DEREF(u->slice)->id;
3212 }
3213
3214 int unit_load_related_unit(Unit *u, const char *type, Unit **_found) {
3215 _cleanup_free_ char *t = NULL;
3216 int r;
3217
3218 assert(u);
3219 assert(type);
3220 assert(_found);
3221
3222 r = unit_name_change_suffix(u->id, type, &t);
3223 if (r < 0)
3224 return r;
3225 if (unit_has_name(u, t))
3226 return -EINVAL;
3227
3228 r = manager_load_unit(u->manager, t, NULL, NULL, _found);
3229 assert(r < 0 || *_found != u);
3230 return r;
3231 }
3232
3233 static int signal_name_owner_changed(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3234 const char *new_owner;
3235 Unit *u = userdata;
3236 int r;
3237
3238 assert(message);
3239 assert(u);
3240
3241 r = sd_bus_message_read(message, "sss", NULL, NULL, &new_owner);
3242 if (r < 0) {
3243 bus_log_parse_error(r);
3244 return 0;
3245 }
3246
3247 if (UNIT_VTABLE(u)->bus_name_owner_change)
3248 UNIT_VTABLE(u)->bus_name_owner_change(u, empty_to_null(new_owner));
3249
3250 return 0;
3251 }
3252
3253 static int get_name_owner_handler(sd_bus_message *message, void *userdata, sd_bus_error *error) {
3254 const sd_bus_error *e;
3255 const char *new_owner;
3256 Unit *u = userdata;
3257 int r;
3258
3259 assert(message);
3260 assert(u);
3261
3262 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3263
3264 e = sd_bus_message_get_error(message);
3265 if (e) {
3266 if (!sd_bus_error_has_name(e, "org.freedesktop.DBus.Error.NameHasNoOwner"))
3267 log_unit_error(u, "Unexpected error response from GetNameOwner(): %s", e->message);
3268
3269 new_owner = NULL;
3270 } else {
3271 r = sd_bus_message_read(message, "s", &new_owner);
3272 if (r < 0)
3273 return bus_log_parse_error(r);
3274
3275 assert(!isempty(new_owner));
3276 }
3277
3278 if (UNIT_VTABLE(u)->bus_name_owner_change)
3279 UNIT_VTABLE(u)->bus_name_owner_change(u, new_owner);
3280
3281 return 0;
3282 }
3283
3284 int unit_install_bus_match(Unit *u, sd_bus *bus, const char *name) {
3285 const char *match;
3286 int r;
3287
3288 assert(u);
3289 assert(bus);
3290 assert(name);
3291
3292 if (u->match_bus_slot || u->get_name_owner_slot)
3293 return -EBUSY;
3294
3295 match = strjoina("type='signal',"
3296 "sender='org.freedesktop.DBus',"
3297 "path='/org/freedesktop/DBus',"
3298 "interface='org.freedesktop.DBus',"
3299 "member='NameOwnerChanged',"
3300 "arg0='", name, "'");
3301
3302 r = sd_bus_add_match_async(bus, &u->match_bus_slot, match, signal_name_owner_changed, NULL, u);
3303 if (r < 0)
3304 return r;
3305
3306 r = sd_bus_call_method_async(
3307 bus,
3308 &u->get_name_owner_slot,
3309 "org.freedesktop.DBus",
3310 "/org/freedesktop/DBus",
3311 "org.freedesktop.DBus",
3312 "GetNameOwner",
3313 get_name_owner_handler,
3314 u,
3315 "s", name);
3316 if (r < 0) {
3317 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3318 return r;
3319 }
3320
3321 log_unit_debug(u, "Watching D-Bus name '%s'.", name);
3322 return 0;
3323 }
3324
3325 int unit_watch_bus_name(Unit *u, const char *name) {
3326 int r;
3327
3328 assert(u);
3329 assert(name);
3330
3331 /* Watch a specific name on the bus. We only support one unit
3332 * watching each name for now. */
3333
3334 if (u->manager->api_bus) {
3335 /* If the bus is already available, install the match directly.
3336 * Otherwise, just put the name in the list. bus_setup_api() will take care later. */
3337 r = unit_install_bus_match(u, u->manager->api_bus, name);
3338 if (r < 0)
3339 return log_warning_errno(r, "Failed to subscribe to NameOwnerChanged signal for '%s': %m", name);
3340 }
3341
3342 r = hashmap_put(u->manager->watch_bus, name, u);
3343 if (r < 0) {
3344 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3345 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3346 return log_warning_errno(r, "Failed to put bus name to hashmap: %m");
3347 }
3348
3349 return 0;
3350 }
3351
3352 void unit_unwatch_bus_name(Unit *u, const char *name) {
3353 assert(u);
3354 assert(name);
3355
3356 (void) hashmap_remove_value(u->manager->watch_bus, name, u);
3357 u->match_bus_slot = sd_bus_slot_unref(u->match_bus_slot);
3358 u->get_name_owner_slot = sd_bus_slot_unref(u->get_name_owner_slot);
3359 }
3360
3361 bool unit_can_serialize(Unit *u) {
3362 assert(u);
3363
3364 return UNIT_VTABLE(u)->serialize && UNIT_VTABLE(u)->deserialize_item;
3365 }
3366
3367 static int serialize_cgroup_mask(FILE *f, const char *key, CGroupMask mask) {
3368 _cleanup_free_ char *s = NULL;
3369 int r;
3370
3371 assert(f);
3372 assert(key);
3373
3374 if (mask == 0)
3375 return 0;
3376
3377 r = cg_mask_to_string(mask, &s);
3378 if (r < 0)
3379 return log_error_errno(r, "Failed to format cgroup mask: %m");
3380
3381 return serialize_item(f, key, s);
3382 }
3383
3384 static const char *const ip_accounting_metric_field[_CGROUP_IP_ACCOUNTING_METRIC_MAX] = {
3385 [CGROUP_IP_INGRESS_BYTES] = "ip-accounting-ingress-bytes",
3386 [CGROUP_IP_INGRESS_PACKETS] = "ip-accounting-ingress-packets",
3387 [CGROUP_IP_EGRESS_BYTES] = "ip-accounting-egress-bytes",
3388 [CGROUP_IP_EGRESS_PACKETS] = "ip-accounting-egress-packets",
3389 };
3390
3391 static const char *const io_accounting_metric_field_base[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
3392 [CGROUP_IO_READ_BYTES] = "io-accounting-read-bytes-base",
3393 [CGROUP_IO_WRITE_BYTES] = "io-accounting-write-bytes-base",
3394 [CGROUP_IO_READ_OPERATIONS] = "io-accounting-read-operations-base",
3395 [CGROUP_IO_WRITE_OPERATIONS] = "io-accounting-write-operations-base",
3396 };
3397
3398 static const char *const io_accounting_metric_field_last[_CGROUP_IO_ACCOUNTING_METRIC_MAX] = {
3399 [CGROUP_IO_READ_BYTES] = "io-accounting-read-bytes-last",
3400 [CGROUP_IO_WRITE_BYTES] = "io-accounting-write-bytes-last",
3401 [CGROUP_IO_READ_OPERATIONS] = "io-accounting-read-operations-last",
3402 [CGROUP_IO_WRITE_OPERATIONS] = "io-accounting-write-operations-last",
3403 };
3404
3405 int unit_serialize(Unit *u, FILE *f, FDSet *fds, bool serialize_jobs) {
3406 CGroupIPAccountingMetric m;
3407 int r;
3408
3409 assert(u);
3410 assert(f);
3411 assert(fds);
3412
3413 if (unit_can_serialize(u)) {
3414 r = UNIT_VTABLE(u)->serialize(u, f, fds);
3415 if (r < 0)
3416 return r;
3417 }
3418
3419 (void) serialize_dual_timestamp(f, "state-change-timestamp", &u->state_change_timestamp);
3420
3421 (void) serialize_dual_timestamp(f, "inactive-exit-timestamp", &u->inactive_exit_timestamp);
3422 (void) serialize_dual_timestamp(f, "active-enter-timestamp", &u->active_enter_timestamp);
3423 (void) serialize_dual_timestamp(f, "active-exit-timestamp", &u->active_exit_timestamp);
3424 (void) serialize_dual_timestamp(f, "inactive-enter-timestamp", &u->inactive_enter_timestamp);
3425
3426 (void) serialize_dual_timestamp(f, "condition-timestamp", &u->condition_timestamp);
3427 (void) serialize_dual_timestamp(f, "assert-timestamp", &u->assert_timestamp);
3428
3429 if (dual_timestamp_is_set(&u->condition_timestamp))
3430 (void) serialize_bool(f, "condition-result", u->condition_result);
3431
3432 if (dual_timestamp_is_set(&u->assert_timestamp))
3433 (void) serialize_bool(f, "assert-result", u->assert_result);
3434
3435 (void) serialize_bool(f, "transient", u->transient);
3436 (void) serialize_bool(f, "in-audit", u->in_audit);
3437
3438 (void) serialize_bool(f, "exported-invocation-id", u->exported_invocation_id);
3439 (void) serialize_bool(f, "exported-log-level-max", u->exported_log_level_max);
3440 (void) serialize_bool(f, "exported-log-extra-fields", u->exported_log_extra_fields);
3441 (void) serialize_bool(f, "exported-log-rate-limit-interval", u->exported_log_ratelimit_interval);
3442 (void) serialize_bool(f, "exported-log-rate-limit-burst", u->exported_log_ratelimit_burst);
3443
3444 (void) serialize_item_format(f, "cpu-usage-base", "%" PRIu64, u->cpu_usage_base);
3445 if (u->cpu_usage_last != NSEC_INFINITY)
3446 (void) serialize_item_format(f, "cpu-usage-last", "%" PRIu64, u->cpu_usage_last);
3447
3448 if (u->oom_kill_last > 0)
3449 (void) serialize_item_format(f, "oom-kill-last", "%" PRIu64, u->oom_kill_last);
3450
3451 for (CGroupIOAccountingMetric im = 0; im < _CGROUP_IO_ACCOUNTING_METRIC_MAX; im++) {
3452 (void) serialize_item_format(f, io_accounting_metric_field_base[im], "%" PRIu64, u->io_accounting_base[im]);
3453
3454 if (u->io_accounting_last[im] != UINT64_MAX)
3455 (void) serialize_item_format(f, io_accounting_metric_field_last[im], "%" PRIu64, u->io_accounting_last[im]);
3456 }
3457
3458 if (u->cgroup_path)
3459 (void) serialize_item(f, "cgroup", u->cgroup_path);
3460
3461 (void) serialize_bool(f, "cgroup-realized", u->cgroup_realized);
3462 (void) serialize_cgroup_mask(f, "cgroup-realized-mask", u->cgroup_realized_mask);
3463 (void) serialize_cgroup_mask(f, "cgroup-enabled-mask", u->cgroup_enabled_mask);
3464 (void) serialize_cgroup_mask(f, "cgroup-invalidated-mask", u->cgroup_invalidated_mask);
3465
3466 if (uid_is_valid(u->ref_uid))
3467 (void) serialize_item_format(f, "ref-uid", UID_FMT, u->ref_uid);
3468 if (gid_is_valid(u->ref_gid))
3469 (void) serialize_item_format(f, "ref-gid", GID_FMT, u->ref_gid);
3470
3471 if (!sd_id128_is_null(u->invocation_id))
3472 (void) serialize_item_format(f, "invocation-id", SD_ID128_FORMAT_STR, SD_ID128_FORMAT_VAL(u->invocation_id));
3473
3474 bus_track_serialize(u->bus_track, f, "ref");
3475
3476 for (m = 0; m < _CGROUP_IP_ACCOUNTING_METRIC_MAX; m++) {
3477 uint64_t v;
3478
3479 r = unit_get_ip_accounting(u, m, &v);
3480 if (r >= 0)
3481 (void) serialize_item_format(f, ip_accounting_metric_field[m], "%" PRIu64, v);
3482 }
3483
3484 if (serialize_jobs) {
3485 if (u->job) {
3486 fputs("job\n", f);
3487 job_serialize(u->job, f);
3488 }
3489
3490 if (u->nop_job) {
3491 fputs("job\n", f);
3492 job_serialize(u->nop_job, f);
3493 }
3494 }
3495
3496 /* End marker */
3497 fputc('\n', f);
3498 return 0;
3499 }
3500
3501 static int unit_deserialize_job(Unit *u, FILE *f) {
3502 _cleanup_(job_freep) Job *j = NULL;
3503 int r;
3504
3505 assert(u);
3506 assert(f);
3507
3508 j = job_new_raw(u);
3509 if (!j)
3510 return log_oom();
3511
3512 r = job_deserialize(j, f);
3513 if (r < 0)
3514 return r;
3515
3516 r = job_install_deserialized(j);
3517 if (r < 0)
3518 return r;
3519
3520 TAKE_PTR(j);
3521 return 0;
3522 }
3523
3524 int unit_deserialize(Unit *u, FILE *f, FDSet *fds) {
3525 int r;
3526
3527 assert(u);
3528 assert(f);
3529 assert(fds);
3530
3531 for (;;) {
3532 _cleanup_free_ char *line = NULL;
3533 char *l, *v;
3534 ssize_t m;
3535 size_t k;
3536
3537 r = read_line(f, LONG_LINE_MAX, &line);
3538 if (r < 0)
3539 return log_error_errno(r, "Failed to read serialization line: %m");
3540 if (r == 0) /* eof */
3541 break;
3542
3543 l = strstrip(line);
3544 if (isempty(l)) /* End marker */
3545 break;
3546
3547 k = strcspn(l, "=");
3548
3549 if (l[k] == '=') {
3550 l[k] = 0;
3551 v = l+k+1;
3552 } else
3553 v = l+k;
3554
3555 if (streq(l, "job")) {
3556 if (v[0] == '\0') {
3557 /* New-style serialized job */
3558 r = unit_deserialize_job(u, f);
3559 if (r < 0)
3560 return r;
3561 } else /* Legacy for pre-44 */
3562 log_unit_warning(u, "Update from too old systemd versions are unsupported, cannot deserialize job: %s", v);
3563 continue;
3564 } else if (streq(l, "state-change-timestamp")) {
3565 (void) deserialize_dual_timestamp(v, &u->state_change_timestamp);
3566 continue;
3567 } else if (streq(l, "inactive-exit-timestamp")) {
3568 (void) deserialize_dual_timestamp(v, &u->inactive_exit_timestamp);
3569 continue;
3570 } else if (streq(l, "active-enter-timestamp")) {
3571 (void) deserialize_dual_timestamp(v, &u->active_enter_timestamp);
3572 continue;
3573 } else if (streq(l, "active-exit-timestamp")) {
3574 (void) deserialize_dual_timestamp(v, &u->active_exit_timestamp);
3575 continue;
3576 } else if (streq(l, "inactive-enter-timestamp")) {
3577 (void) deserialize_dual_timestamp(v, &u->inactive_enter_timestamp);
3578 continue;
3579 } else if (streq(l, "condition-timestamp")) {
3580 (void) deserialize_dual_timestamp(v, &u->condition_timestamp);
3581 continue;
3582 } else if (streq(l, "assert-timestamp")) {
3583 (void) deserialize_dual_timestamp(v, &u->assert_timestamp);
3584 continue;
3585 } else if (streq(l, "condition-result")) {
3586
3587 r = parse_boolean(v);
3588 if (r < 0)
3589 log_unit_debug(u, "Failed to parse condition result value %s, ignoring.", v);
3590 else
3591 u->condition_result = r;
3592
3593 continue;
3594
3595 } else if (streq(l, "assert-result")) {
3596
3597 r = parse_boolean(v);
3598 if (r < 0)
3599 log_unit_debug(u, "Failed to parse assert result value %s, ignoring.", v);
3600 else
3601 u->assert_result = r;
3602
3603 continue;
3604
3605 } else if (streq(l, "transient")) {
3606
3607 r = parse_boolean(v);
3608 if (r < 0)
3609 log_unit_debug(u, "Failed to parse transient bool %s, ignoring.", v);
3610 else
3611 u->transient = r;
3612
3613 continue;
3614
3615 } else if (streq(l, "in-audit")) {
3616
3617 r = parse_boolean(v);
3618 if (r < 0)
3619 log_unit_debug(u, "Failed to parse in-audit bool %s, ignoring.", v);
3620 else
3621 u->in_audit = r;
3622
3623 continue;
3624
3625 } else if (streq(l, "exported-invocation-id")) {
3626
3627 r = parse_boolean(v);
3628 if (r < 0)
3629 log_unit_debug(u, "Failed to parse exported invocation ID bool %s, ignoring.", v);
3630 else
3631 u->exported_invocation_id = r;
3632
3633 continue;
3634
3635 } else if (streq(l, "exported-log-level-max")) {
3636
3637 r = parse_boolean(v);
3638 if (r < 0)
3639 log_unit_debug(u, "Failed to parse exported log level max bool %s, ignoring.", v);
3640 else
3641 u->exported_log_level_max = r;
3642
3643 continue;
3644
3645 } else if (streq(l, "exported-log-extra-fields")) {
3646
3647 r = parse_boolean(v);
3648 if (r < 0)
3649 log_unit_debug(u, "Failed to parse exported log extra fields bool %s, ignoring.", v);
3650 else
3651 u->exported_log_extra_fields = r;
3652
3653 continue;
3654
3655 } else if (streq(l, "exported-log-rate-limit-interval")) {
3656
3657 r = parse_boolean(v);
3658 if (r < 0)
3659 log_unit_debug(u, "Failed to parse exported log rate limit interval %s, ignoring.", v);
3660 else
3661 u->exported_log_ratelimit_interval = r;
3662
3663 continue;
3664
3665 } else if (streq(l, "exported-log-rate-limit-burst")) {
3666
3667 r = parse_boolean(v);
3668 if (r < 0)
3669 log_unit_debug(u, "Failed to parse exported log rate limit burst %s, ignoring.", v);
3670 else
3671 u->exported_log_ratelimit_burst = r;
3672
3673 continue;
3674
3675 } else if (STR_IN_SET(l, "cpu-usage-base", "cpuacct-usage-base")) {
3676
3677 r = safe_atou64(v, &u->cpu_usage_base);
3678 if (r < 0)
3679 log_unit_debug(u, "Failed to parse CPU usage base %s, ignoring.", v);
3680
3681 continue;
3682
3683 } else if (streq(l, "cpu-usage-last")) {
3684
3685 r = safe_atou64(v, &u->cpu_usage_last);
3686 if (r < 0)
3687 log_unit_debug(u, "Failed to read CPU usage last %s, ignoring.", v);
3688
3689 continue;
3690
3691 } else if (streq(l, "oom-kill-last")) {
3692
3693 r = safe_atou64(v, &u->oom_kill_last);
3694 if (r < 0)
3695 log_unit_debug(u, "Failed to read OOM kill last %s, ignoring.", v);
3696
3697 continue;
3698
3699 } else if (streq(l, "cgroup")) {
3700
3701 r = unit_set_cgroup_path(u, v);
3702 if (r < 0)
3703 log_unit_debug_errno(u, r, "Failed to set cgroup path %s, ignoring: %m", v);
3704
3705 (void) unit_watch_cgroup(u);
3706 (void) unit_watch_cgroup_memory(u);
3707
3708 continue;
3709 } else if (streq(l, "cgroup-realized")) {
3710 int b;
3711
3712 b = parse_boolean(v);
3713 if (b < 0)
3714 log_unit_debug(u, "Failed to parse cgroup-realized bool %s, ignoring.", v);
3715 else
3716 u->cgroup_realized = b;
3717
3718 continue;
3719
3720 } else if (streq(l, "cgroup-realized-mask")) {
3721
3722 r = cg_mask_from_string(v, &u->cgroup_realized_mask);
3723 if (r < 0)
3724 log_unit_debug(u, "Failed to parse cgroup-realized-mask %s, ignoring.", v);
3725 continue;
3726
3727 } else if (streq(l, "cgroup-enabled-mask")) {
3728
3729 r = cg_mask_from_string(v, &u->cgroup_enabled_mask);
3730 if (r < 0)
3731 log_unit_debug(u, "Failed to parse cgroup-enabled-mask %s, ignoring.", v);
3732 continue;
3733
3734 } else if (streq(l, "cgroup-invalidated-mask")) {
3735
3736 r = cg_mask_from_string(v, &u->cgroup_invalidated_mask);
3737 if (r < 0)
3738 log_unit_debug(u, "Failed to parse cgroup-invalidated-mask %s, ignoring.", v);
3739 continue;
3740
3741 } else if (streq(l, "ref-uid")) {
3742 uid_t uid;
3743
3744 r = parse_uid(v, &uid);
3745 if (r < 0)
3746 log_unit_debug(u, "Failed to parse referenced UID %s, ignoring.", v);
3747 else
3748 unit_ref_uid_gid(u, uid, GID_INVALID);
3749
3750 continue;
3751
3752 } else if (streq(l, "ref-gid")) {
3753 gid_t gid;
3754
3755 r = parse_gid(v, &gid);
3756 if (r < 0)
3757 log_unit_debug(u, "Failed to parse referenced GID %s, ignoring.", v);
3758 else
3759 unit_ref_uid_gid(u, UID_INVALID, gid);
3760
3761 continue;
3762
3763 } else if (streq(l, "ref")) {
3764
3765 r = strv_extend(&u->deserialized_refs, v);
3766 if (r < 0)
3767 return log_oom();
3768
3769 continue;
3770 } else if (streq(l, "invocation-id")) {
3771 sd_id128_t id;
3772
3773 r = sd_id128_from_string(v, &id);
3774 if (r < 0)
3775 log_unit_debug(u, "Failed to parse invocation id %s, ignoring.", v);
3776 else {
3777 r = unit_set_invocation_id(u, id);
3778 if (r < 0)
3779 log_unit_warning_errno(u, r, "Failed to set invocation ID for unit: %m");
3780 }
3781
3782 continue;
3783 }
3784
3785 /* Check if this is an IP accounting metric serialization field */
3786 m = string_table_lookup(ip_accounting_metric_field, ELEMENTSOF(ip_accounting_metric_field), l);
3787 if (m >= 0) {
3788 uint64_t c;
3789
3790 r = safe_atou64(v, &c);
3791 if (r < 0)
3792 log_unit_debug(u, "Failed to parse IP accounting value %s, ignoring.", v);
3793 else
3794 u->ip_accounting_extra[m] = c;
3795 continue;
3796 }
3797
3798 m = string_table_lookup(io_accounting_metric_field_base, ELEMENTSOF(io_accounting_metric_field_base), l);
3799 if (m >= 0) {
3800 uint64_t c;
3801
3802 r = safe_atou64(v, &c);
3803 if (r < 0)
3804 log_unit_debug(u, "Failed to parse IO accounting base value %s, ignoring.", v);
3805 else
3806 u->io_accounting_base[m] = c;
3807 continue;
3808 }
3809
3810 m = string_table_lookup(io_accounting_metric_field_last, ELEMENTSOF(io_accounting_metric_field_last), l);
3811 if (m >= 0) {
3812 uint64_t c;
3813
3814 r = safe_atou64(v, &c);
3815 if (r < 0)
3816 log_unit_debug(u, "Failed to parse IO accounting last value %s, ignoring.", v);
3817 else
3818 u->io_accounting_last[m] = c;
3819 continue;
3820 }
3821
3822 if (unit_can_serialize(u)) {
3823 r = exec_runtime_deserialize_compat(u, l, v, fds);
3824 if (r < 0) {
3825 log_unit_warning(u, "Failed to deserialize runtime parameter '%s', ignoring.", l);
3826 continue;
3827 }
3828
3829 /* Returns positive if key was handled by the call */
3830 if (r > 0)
3831 continue;
3832
3833 r = UNIT_VTABLE(u)->deserialize_item(u, l, v, fds);
3834 if (r < 0)
3835 log_unit_warning(u, "Failed to deserialize unit parameter '%s', ignoring.", l);
3836 }
3837 }
3838
3839 /* Versions before 228 did not carry a state change timestamp. In this case, take the current time. This is
3840 * useful, so that timeouts based on this timestamp don't trigger too early, and is in-line with the logic from
3841 * before 228 where the base for timeouts was not persistent across reboots. */
3842
3843 if (!dual_timestamp_is_set(&u->state_change_timestamp))
3844 dual_timestamp_get(&u->state_change_timestamp);
3845
3846 /* Let's make sure that everything that is deserialized also gets any potential new cgroup settings applied
3847 * after we are done. For that we invalidate anything already realized, so that we can realize it again. */
3848 unit_invalidate_cgroup(u, _CGROUP_MASK_ALL);
3849 unit_invalidate_cgroup_bpf(u);
3850
3851 return 0;
3852 }
3853
3854 int unit_deserialize_skip(FILE *f) {
3855 int r;
3856 assert(f);
3857
3858 /* Skip serialized data for this unit. We don't know what it is. */
3859
3860 for (;;) {
3861 _cleanup_free_ char *line = NULL;
3862 char *l;
3863
3864 r = read_line(f, LONG_LINE_MAX, &line);
3865 if (r < 0)
3866 return log_error_errno(r, "Failed to read serialization line: %m");
3867 if (r == 0)
3868 return 0;
3869
3870 l = strstrip(line);
3871
3872 /* End marker */
3873 if (isempty(l))
3874 return 1;
3875 }
3876 }
3877
3878 int unit_add_node_dependency(Unit *u, const char *what, UnitDependency dep, UnitDependencyMask mask) {
3879 _cleanup_free_ char *e = NULL;
3880 Unit *device;
3881 int r;
3882
3883 assert(u);
3884
3885 /* Adds in links to the device node that this unit is based on */
3886 if (isempty(what))
3887 return 0;
3888
3889 if (!is_device_path(what))
3890 return 0;
3891
3892 /* When device units aren't supported (such as in a container), don't create dependencies on them. */
3893 if (!unit_type_supported(UNIT_DEVICE))
3894 return 0;
3895
3896 r = unit_name_from_path(what, ".device", &e);
3897 if (r < 0)
3898 return r;
3899
3900 r = manager_load_unit(u->manager, e, NULL, NULL, &device);
3901 if (r < 0)
3902 return r;
3903
3904 if (dep == UNIT_REQUIRES && device_shall_be_bound_by(device, u))
3905 dep = UNIT_BINDS_TO;
3906
3907 return unit_add_two_dependencies(u, UNIT_AFTER,
3908 MANAGER_IS_SYSTEM(u->manager) ? dep : UNIT_WANTS,
3909 device, true, mask);
3910 }
3911
3912 int unit_add_blockdev_dependency(Unit *u, const char *what, UnitDependencyMask mask) {
3913 _cleanup_free_ char *escaped = NULL, *target = NULL;
3914 int r;
3915
3916 assert(u);
3917
3918 if (isempty(what))
3919 return 0;
3920
3921 if (!path_startswith(what, "/dev/"))
3922 return 0;
3923
3924 /* If we don't support devices, then also don't bother with blockdev@.target */
3925 if (!unit_type_supported(UNIT_DEVICE))
3926 return 0;
3927
3928 r = unit_name_path_escape(what, &escaped);
3929 if (r < 0)
3930 return r;
3931
3932 r = unit_name_build("blockdev", escaped, ".target", &target);
3933 if (r < 0)
3934 return r;
3935
3936 return unit_add_dependency_by_name(u, UNIT_AFTER, target, true, mask);
3937 }
3938
3939 int unit_coldplug(Unit *u) {
3940 int r = 0, q;
3941 char **i;
3942 Job *uj;
3943
3944 assert(u);
3945
3946 /* Make sure we don't enter a loop, when coldplugging recursively. */
3947 if (u->coldplugged)
3948 return 0;
3949
3950 u->coldplugged = true;
3951
3952 STRV_FOREACH(i, u->deserialized_refs) {
3953 q = bus_unit_track_add_name(u, *i);
3954 if (q < 0 && r >= 0)
3955 r = q;
3956 }
3957 u->deserialized_refs = strv_free(u->deserialized_refs);
3958
3959 if (UNIT_VTABLE(u)->coldplug) {
3960 q = UNIT_VTABLE(u)->coldplug(u);
3961 if (q < 0 && r >= 0)
3962 r = q;
3963 }
3964
3965 uj = u->job ?: u->nop_job;
3966 if (uj) {
3967 q = job_coldplug(uj);
3968 if (q < 0 && r >= 0)
3969 r = q;
3970 }
3971
3972 return r;
3973 }
3974
3975 void unit_catchup(Unit *u) {
3976 assert(u);
3977
3978 if (UNIT_VTABLE(u)->catchup)
3979 UNIT_VTABLE(u)->catchup(u);
3980 }
3981
3982 static bool fragment_mtime_newer(const char *path, usec_t mtime, bool path_masked) {
3983 struct stat st;
3984
3985 if (!path)
3986 return false;
3987
3988 /* If the source is some virtual kernel file system, then we assume we watch it anyway, and hence pretend we
3989 * are never out-of-date. */
3990 if (PATH_STARTSWITH_SET(path, "/proc", "/sys"))
3991 return false;
3992
3993 if (stat(path, &st) < 0)
3994 /* What, cannot access this anymore? */
3995 return true;
3996
3997 if (path_masked)
3998 /* For masked files check if they are still so */
3999 return !null_or_empty(&st);
4000 else
4001 /* For non-empty files check the mtime */
4002 return timespec_load(&st.st_mtim) > mtime;
4003
4004 return false;
4005 }
4006
4007 bool unit_need_daemon_reload(Unit *u) {
4008 _cleanup_strv_free_ char **t = NULL;
4009 char **path;
4010
4011 assert(u);
4012
4013 /* For unit files, we allow masking… */
4014 if (fragment_mtime_newer(u->fragment_path, u->fragment_mtime,
4015 u->load_state == UNIT_MASKED))
4016 return true;
4017
4018 /* Source paths should not be masked… */
4019 if (fragment_mtime_newer(u->source_path, u->source_mtime, false))
4020 return true;
4021
4022 if (u->load_state == UNIT_LOADED)
4023 (void) unit_find_dropin_paths(u, &t);
4024 if (!strv_equal(u->dropin_paths, t))
4025 return true;
4026
4027 /* … any drop-ins that are masked are simply omitted from the list. */
4028 STRV_FOREACH(path, u->dropin_paths)
4029 if (fragment_mtime_newer(*path, u->dropin_mtime, false))
4030 return true;
4031
4032 return false;
4033 }
4034
4035 void unit_reset_failed(Unit *u) {
4036 assert(u);
4037
4038 if (UNIT_VTABLE(u)->reset_failed)
4039 UNIT_VTABLE(u)->reset_failed(u);
4040
4041 ratelimit_reset(&u->start_ratelimit);
4042 u->start_limit_hit = false;
4043 }
4044
4045 Unit *unit_following(Unit *u) {
4046 assert(u);
4047
4048 if (UNIT_VTABLE(u)->following)
4049 return UNIT_VTABLE(u)->following(u);
4050
4051 return NULL;
4052 }
4053
4054 bool unit_stop_pending(Unit *u) {
4055 assert(u);
4056
4057 /* This call does check the current state of the unit. It's
4058 * hence useful to be called from state change calls of the
4059 * unit itself, where the state isn't updated yet. This is
4060 * different from unit_inactive_or_pending() which checks both
4061 * the current state and for a queued job. */
4062
4063 return unit_has_job_type(u, JOB_STOP);
4064 }
4065
4066 bool unit_inactive_or_pending(Unit *u) {
4067 assert(u);
4068
4069 /* Returns true if the unit is inactive or going down */
4070
4071 if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(u)))
4072 return true;
4073
4074 if (unit_stop_pending(u))
4075 return true;
4076
4077 return false;
4078 }
4079
4080 bool unit_active_or_pending(Unit *u) {
4081 assert(u);
4082
4083 /* Returns true if the unit is active or going up */
4084
4085 if (UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(u)))
4086 return true;
4087
4088 if (u->job &&
4089 IN_SET(u->job->type, JOB_START, JOB_RELOAD_OR_START, JOB_RESTART))
4090 return true;
4091
4092 return false;
4093 }
4094
4095 bool unit_will_restart_default(Unit *u) {
4096 assert(u);
4097
4098 return unit_has_job_type(u, JOB_START);
4099 }
4100
4101 bool unit_will_restart(Unit *u) {
4102 assert(u);
4103
4104 if (!UNIT_VTABLE(u)->will_restart)
4105 return false;
4106
4107 return UNIT_VTABLE(u)->will_restart(u);
4108 }
4109
4110 int unit_kill(Unit *u, KillWho w, int signo, sd_bus_error *error) {
4111 assert(u);
4112 assert(w >= 0 && w < _KILL_WHO_MAX);
4113 assert(SIGNAL_VALID(signo));
4114
4115 if (!UNIT_VTABLE(u)->kill)
4116 return -EOPNOTSUPP;
4117
4118 return UNIT_VTABLE(u)->kill(u, w, signo, error);
4119 }
4120
4121 static Set *unit_pid_set(pid_t main_pid, pid_t control_pid) {
4122 _cleanup_set_free_ Set *pid_set = NULL;
4123 int r;
4124
4125 pid_set = set_new(NULL);
4126 if (!pid_set)
4127 return NULL;
4128
4129 /* Exclude the main/control pids from being killed via the cgroup */
4130 if (main_pid > 0) {
4131 r = set_put(pid_set, PID_TO_PTR(main_pid));
4132 if (r < 0)
4133 return NULL;
4134 }
4135
4136 if (control_pid > 0) {
4137 r = set_put(pid_set, PID_TO_PTR(control_pid));
4138 if (r < 0)
4139 return NULL;
4140 }
4141
4142 return TAKE_PTR(pid_set);
4143 }
4144
4145 int unit_kill_common(
4146 Unit *u,
4147 KillWho who,
4148 int signo,
4149 pid_t main_pid,
4150 pid_t control_pid,
4151 sd_bus_error *error) {
4152
4153 int r = 0;
4154 bool killed = false;
4155
4156 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL)) {
4157 if (main_pid < 0)
4158 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no main processes", unit_type_to_string(u->type));
4159 else if (main_pid == 0)
4160 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No main process to kill");
4161 }
4162
4163 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL)) {
4164 if (control_pid < 0)
4165 return sd_bus_error_setf(error, BUS_ERROR_NO_SUCH_PROCESS, "%s units have no control processes", unit_type_to_string(u->type));
4166 else if (control_pid == 0)
4167 return sd_bus_error_set_const(error, BUS_ERROR_NO_SUCH_PROCESS, "No control process to kill");
4168 }
4169
4170 if (IN_SET(who, KILL_CONTROL, KILL_CONTROL_FAIL, KILL_ALL, KILL_ALL_FAIL))
4171 if (control_pid > 0) {
4172 if (kill(control_pid, signo) < 0)
4173 r = -errno;
4174 else
4175 killed = true;
4176 }
4177
4178 if (IN_SET(who, KILL_MAIN, KILL_MAIN_FAIL, KILL_ALL, KILL_ALL_FAIL))
4179 if (main_pid > 0) {
4180 if (kill(main_pid, signo) < 0)
4181 r = -errno;
4182 else
4183 killed = true;
4184 }
4185
4186 if (IN_SET(who, KILL_ALL, KILL_ALL_FAIL) && u->cgroup_path) {
4187 _cleanup_set_free_ Set *pid_set = NULL;
4188 int q;
4189
4190 /* Exclude the main/control pids from being killed via the cgroup */
4191 pid_set = unit_pid_set(main_pid, control_pid);
4192 if (!pid_set)
4193 return -ENOMEM;
4194
4195 q = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, signo, 0, pid_set, NULL, NULL);
4196 if (q < 0 && !IN_SET(q, -EAGAIN, -ESRCH, -ENOENT))
4197 r = q;
4198 else
4199 killed = true;
4200 }
4201
4202 if (r == 0 && !killed && IN_SET(who, KILL_ALL_FAIL, KILL_CONTROL_FAIL))
4203 return -ESRCH;
4204
4205 return r;
4206 }
4207
4208 int unit_following_set(Unit *u, Set **s) {
4209 assert(u);
4210 assert(s);
4211
4212 if (UNIT_VTABLE(u)->following_set)
4213 return UNIT_VTABLE(u)->following_set(u, s);
4214
4215 *s = NULL;
4216 return 0;
4217 }
4218
4219 UnitFileState unit_get_unit_file_state(Unit *u) {
4220 int r;
4221
4222 assert(u);
4223
4224 if (u->unit_file_state < 0 && u->fragment_path) {
4225 r = unit_file_get_state(
4226 u->manager->unit_file_scope,
4227 NULL,
4228 u->id,
4229 &u->unit_file_state);
4230 if (r < 0)
4231 u->unit_file_state = UNIT_FILE_BAD;
4232 }
4233
4234 return u->unit_file_state;
4235 }
4236
4237 int unit_get_unit_file_preset(Unit *u) {
4238 assert(u);
4239
4240 if (u->unit_file_preset < 0 && u->fragment_path)
4241 u->unit_file_preset = unit_file_query_preset(
4242 u->manager->unit_file_scope,
4243 NULL,
4244 basename(u->fragment_path));
4245
4246 return u->unit_file_preset;
4247 }
4248
4249 Unit* unit_ref_set(UnitRef *ref, Unit *source, Unit *target) {
4250 assert(ref);
4251 assert(source);
4252 assert(target);
4253
4254 if (ref->target)
4255 unit_ref_unset(ref);
4256
4257 ref->source = source;
4258 ref->target = target;
4259 LIST_PREPEND(refs_by_target, target->refs_by_target, ref);
4260 return target;
4261 }
4262
4263 void unit_ref_unset(UnitRef *ref) {
4264 assert(ref);
4265
4266 if (!ref->target)
4267 return;
4268
4269 /* We are about to drop a reference to the unit, make sure the garbage collection has a look at it as it might
4270 * be unreferenced now. */
4271 unit_add_to_gc_queue(ref->target);
4272
4273 LIST_REMOVE(refs_by_target, ref->target->refs_by_target, ref);
4274 ref->source = ref->target = NULL;
4275 }
4276
4277 static int user_from_unit_name(Unit *u, char **ret) {
4278
4279 static const uint8_t hash_key[] = {
4280 0x58, 0x1a, 0xaf, 0xe6, 0x28, 0x58, 0x4e, 0x96,
4281 0xb4, 0x4e, 0xf5, 0x3b, 0x8c, 0x92, 0x07, 0xec
4282 };
4283
4284 _cleanup_free_ char *n = NULL;
4285 int r;
4286
4287 r = unit_name_to_prefix(u->id, &n);
4288 if (r < 0)
4289 return r;
4290
4291 if (valid_user_group_name(n, 0)) {
4292 *ret = TAKE_PTR(n);
4293 return 0;
4294 }
4295
4296 /* If we can't use the unit name as a user name, then let's hash it and use that */
4297 if (asprintf(ret, "_du%016" PRIx64, siphash24(n, strlen(n), hash_key)) < 0)
4298 return -ENOMEM;
4299
4300 return 0;
4301 }
4302
4303 int unit_patch_contexts(Unit *u) {
4304 CGroupContext *cc;
4305 ExecContext *ec;
4306 unsigned i;
4307 int r;
4308
4309 assert(u);
4310
4311 /* Patch in the manager defaults into the exec and cgroup
4312 * contexts, _after_ the rest of the settings have been
4313 * initialized */
4314
4315 ec = unit_get_exec_context(u);
4316 if (ec) {
4317 /* This only copies in the ones that need memory */
4318 for (i = 0; i < _RLIMIT_MAX; i++)
4319 if (u->manager->rlimit[i] && !ec->rlimit[i]) {
4320 ec->rlimit[i] = newdup(struct rlimit, u->manager->rlimit[i], 1);
4321 if (!ec->rlimit[i])
4322 return -ENOMEM;
4323 }
4324
4325 if (MANAGER_IS_USER(u->manager) &&
4326 !ec->working_directory) {
4327
4328 r = get_home_dir(&ec->working_directory);
4329 if (r < 0)
4330 return r;
4331
4332 /* Allow user services to run, even if the
4333 * home directory is missing */
4334 ec->working_directory_missing_ok = true;
4335 }
4336
4337 if (ec->private_devices)
4338 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_MKNOD) | (UINT64_C(1) << CAP_SYS_RAWIO));
4339
4340 if (ec->protect_kernel_modules)
4341 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYS_MODULE);
4342
4343 if (ec->protect_kernel_logs)
4344 ec->capability_bounding_set &= ~(UINT64_C(1) << CAP_SYSLOG);
4345
4346 if (ec->protect_clock)
4347 ec->capability_bounding_set &= ~((UINT64_C(1) << CAP_SYS_TIME) | (UINT64_C(1) << CAP_WAKE_ALARM));
4348
4349 if (ec->dynamic_user) {
4350 if (!ec->user) {
4351 r = user_from_unit_name(u, &ec->user);
4352 if (r < 0)
4353 return r;
4354 }
4355
4356 if (!ec->group) {
4357 ec->group = strdup(ec->user);
4358 if (!ec->group)
4359 return -ENOMEM;
4360 }
4361
4362 /* If the dynamic user option is on, let's make sure that the unit can't leave its
4363 * UID/GID around in the file system or on IPC objects. Hence enforce a strict
4364 * sandbox. */
4365
4366 ec->private_tmp = true;
4367 ec->remove_ipc = true;
4368 ec->protect_system = PROTECT_SYSTEM_STRICT;
4369 if (ec->protect_home == PROTECT_HOME_NO)
4370 ec->protect_home = PROTECT_HOME_READ_ONLY;
4371
4372 /* Make sure this service can neither benefit from SUID/SGID binaries nor create
4373 * them. */
4374 ec->no_new_privileges = true;
4375 ec->restrict_suid_sgid = true;
4376 }
4377 }
4378
4379 cc = unit_get_cgroup_context(u);
4380 if (cc && ec) {
4381
4382 if (ec->private_devices &&
4383 cc->device_policy == CGROUP_DEVICE_POLICY_AUTO)
4384 cc->device_policy = CGROUP_DEVICE_POLICY_CLOSED;
4385
4386 if (ec->root_image &&
4387 (cc->device_policy != CGROUP_DEVICE_POLICY_AUTO || cc->device_allow)) {
4388
4389 /* When RootImage= is specified, the following devices are touched. */
4390 r = cgroup_add_device_allow(cc, "/dev/loop-control", "rw");
4391 if (r < 0)
4392 return r;
4393
4394 r = cgroup_add_device_allow(cc, "block-loop", "rwm");
4395 if (r < 0)
4396 return r;
4397
4398 r = cgroup_add_device_allow(cc, "block-blkext", "rwm");
4399 if (r < 0)
4400 return r;
4401
4402 /* Make sure "block-loop" can be resolved, i.e. make sure "loop" shows up in /proc/devices */
4403 r = unit_add_two_dependencies_by_name(u, UNIT_AFTER, UNIT_WANTS, "modprobe@loop.service", true, UNIT_DEPENDENCY_FILE);
4404 if (r < 0)
4405 return r;
4406 }
4407
4408 if (ec->protect_clock) {
4409 r = cgroup_add_device_allow(cc, "char-rtc", "r");
4410 if (r < 0)
4411 return r;
4412 }
4413 }
4414
4415 return 0;
4416 }
4417
4418 ExecContext *unit_get_exec_context(Unit *u) {
4419 size_t offset;
4420 assert(u);
4421
4422 if (u->type < 0)
4423 return NULL;
4424
4425 offset = UNIT_VTABLE(u)->exec_context_offset;
4426 if (offset <= 0)
4427 return NULL;
4428
4429 return (ExecContext*) ((uint8_t*) u + offset);
4430 }
4431
4432 KillContext *unit_get_kill_context(Unit *u) {
4433 size_t offset;
4434 assert(u);
4435
4436 if (u->type < 0)
4437 return NULL;
4438
4439 offset = UNIT_VTABLE(u)->kill_context_offset;
4440 if (offset <= 0)
4441 return NULL;
4442
4443 return (KillContext*) ((uint8_t*) u + offset);
4444 }
4445
4446 CGroupContext *unit_get_cgroup_context(Unit *u) {
4447 size_t offset;
4448
4449 if (u->type < 0)
4450 return NULL;
4451
4452 offset = UNIT_VTABLE(u)->cgroup_context_offset;
4453 if (offset <= 0)
4454 return NULL;
4455
4456 return (CGroupContext*) ((uint8_t*) u + offset);
4457 }
4458
4459 ExecRuntime *unit_get_exec_runtime(Unit *u) {
4460 size_t offset;
4461
4462 if (u->type < 0)
4463 return NULL;
4464
4465 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4466 if (offset <= 0)
4467 return NULL;
4468
4469 return *(ExecRuntime**) ((uint8_t*) u + offset);
4470 }
4471
4472 static const char* unit_drop_in_dir(Unit *u, UnitWriteFlags flags) {
4473 assert(u);
4474
4475 if (UNIT_WRITE_FLAGS_NOOP(flags))
4476 return NULL;
4477
4478 if (u->transient) /* Redirect drop-ins for transient units always into the transient directory. */
4479 return u->manager->lookup_paths.transient;
4480
4481 if (flags & UNIT_PERSISTENT)
4482 return u->manager->lookup_paths.persistent_control;
4483
4484 if (flags & UNIT_RUNTIME)
4485 return u->manager->lookup_paths.runtime_control;
4486
4487 return NULL;
4488 }
4489
4490 char* unit_escape_setting(const char *s, UnitWriteFlags flags, char **buf) {
4491 char *ret = NULL;
4492
4493 if (!s)
4494 return NULL;
4495
4496 /* Escapes the input string as requested. Returns the escaped string. If 'buf' is specified then the allocated
4497 * return buffer pointer is also written to *buf, except if no escaping was necessary, in which case *buf is
4498 * set to NULL, and the input pointer is returned as-is. This means the return value always contains a properly
4499 * escaped version, but *buf when passed only contains a pointer if an allocation was necessary. If *buf is
4500 * not specified, then the return value always needs to be freed. Callers can use this to optimize memory
4501 * allocations. */
4502
4503 if (flags & UNIT_ESCAPE_SPECIFIERS) {
4504 ret = specifier_escape(s);
4505 if (!ret)
4506 return NULL;
4507
4508 s = ret;
4509 }
4510
4511 if (flags & UNIT_ESCAPE_C) {
4512 char *a;
4513
4514 a = cescape(s);
4515 free(ret);
4516 if (!a)
4517 return NULL;
4518
4519 ret = a;
4520 }
4521
4522 if (buf) {
4523 *buf = ret;
4524 return ret ?: (char*) s;
4525 }
4526
4527 return ret ?: strdup(s);
4528 }
4529
4530 char* unit_concat_strv(char **l, UnitWriteFlags flags) {
4531 _cleanup_free_ char *result = NULL;
4532 size_t n = 0, allocated = 0;
4533 char **i;
4534
4535 /* Takes a list of strings, escapes them, and concatenates them. This may be used to format command lines in a
4536 * way suitable for ExecStart= stanzas */
4537
4538 STRV_FOREACH(i, l) {
4539 _cleanup_free_ char *buf = NULL;
4540 const char *p;
4541 size_t a;
4542 char *q;
4543
4544 p = unit_escape_setting(*i, flags, &buf);
4545 if (!p)
4546 return NULL;
4547
4548 a = (n > 0) + 1 + strlen(p) + 1; /* separating space + " + entry + " */
4549 if (!GREEDY_REALLOC(result, allocated, n + a + 1))
4550 return NULL;
4551
4552 q = result + n;
4553 if (n > 0)
4554 *(q++) = ' ';
4555
4556 *(q++) = '"';
4557 q = stpcpy(q, p);
4558 *(q++) = '"';
4559
4560 n += a;
4561 }
4562
4563 if (!GREEDY_REALLOC(result, allocated, n + 1))
4564 return NULL;
4565
4566 result[n] = 0;
4567
4568 return TAKE_PTR(result);
4569 }
4570
4571 int unit_write_setting(Unit *u, UnitWriteFlags flags, const char *name, const char *data) {
4572 _cleanup_free_ char *p = NULL, *q = NULL, *escaped = NULL;
4573 const char *dir, *wrapped;
4574 int r;
4575
4576 assert(u);
4577 assert(name);
4578 assert(data);
4579
4580 if (UNIT_WRITE_FLAGS_NOOP(flags))
4581 return 0;
4582
4583 data = unit_escape_setting(data, flags, &escaped);
4584 if (!data)
4585 return -ENOMEM;
4586
4587 /* Prefix the section header. If we are writing this out as transient file, then let's suppress this if the
4588 * previous section header is the same */
4589
4590 if (flags & UNIT_PRIVATE) {
4591 if (!UNIT_VTABLE(u)->private_section)
4592 return -EINVAL;
4593
4594 if (!u->transient_file || u->last_section_private < 0)
4595 data = strjoina("[", UNIT_VTABLE(u)->private_section, "]\n", data);
4596 else if (u->last_section_private == 0)
4597 data = strjoina("\n[", UNIT_VTABLE(u)->private_section, "]\n", data);
4598 } else {
4599 if (!u->transient_file || u->last_section_private < 0)
4600 data = strjoina("[Unit]\n", data);
4601 else if (u->last_section_private > 0)
4602 data = strjoina("\n[Unit]\n", data);
4603 }
4604
4605 if (u->transient_file) {
4606 /* When this is a transient unit file in creation, then let's not create a new drop-in but instead
4607 * write to the transient unit file. */
4608 fputs(data, u->transient_file);
4609
4610 if (!endswith(data, "\n"))
4611 fputc('\n', u->transient_file);
4612
4613 /* Remember which section we wrote this entry to */
4614 u->last_section_private = !!(flags & UNIT_PRIVATE);
4615 return 0;
4616 }
4617
4618 dir = unit_drop_in_dir(u, flags);
4619 if (!dir)
4620 return -EINVAL;
4621
4622 wrapped = strjoina("# This is a drop-in unit file extension, created via \"systemctl set-property\"\n"
4623 "# or an equivalent operation. Do not edit.\n",
4624 data,
4625 "\n");
4626
4627 r = drop_in_file(dir, u->id, 50, name, &p, &q);
4628 if (r < 0)
4629 return r;
4630
4631 (void) mkdir_p_label(p, 0755);
4632
4633 /* Make sure the drop-in dir is registered in our path cache. This way we don't need to stupidly
4634 * recreate the cache after every drop-in we write. */
4635 if (u->manager->unit_path_cache) {
4636 r = set_put_strdup(u->manager->unit_path_cache, p);
4637 if (r < 0)
4638 return r;
4639 }
4640
4641 r = write_string_file_atomic_label(q, wrapped);
4642 if (r < 0)
4643 return r;
4644
4645 r = strv_push(&u->dropin_paths, q);
4646 if (r < 0)
4647 return r;
4648 q = NULL;
4649
4650 strv_uniq(u->dropin_paths);
4651
4652 u->dropin_mtime = now(CLOCK_REALTIME);
4653
4654 return 0;
4655 }
4656
4657 int unit_write_settingf(Unit *u, UnitWriteFlags flags, const char *name, const char *format, ...) {
4658 _cleanup_free_ char *p = NULL;
4659 va_list ap;
4660 int r;
4661
4662 assert(u);
4663 assert(name);
4664 assert(format);
4665
4666 if (UNIT_WRITE_FLAGS_NOOP(flags))
4667 return 0;
4668
4669 va_start(ap, format);
4670 r = vasprintf(&p, format, ap);
4671 va_end(ap);
4672
4673 if (r < 0)
4674 return -ENOMEM;
4675
4676 return unit_write_setting(u, flags, name, p);
4677 }
4678
4679 int unit_make_transient(Unit *u) {
4680 _cleanup_free_ char *path = NULL;
4681 FILE *f;
4682
4683 assert(u);
4684
4685 if (!UNIT_VTABLE(u)->can_transient)
4686 return -EOPNOTSUPP;
4687
4688 (void) mkdir_p_label(u->manager->lookup_paths.transient, 0755);
4689
4690 path = path_join(u->manager->lookup_paths.transient, u->id);
4691 if (!path)
4692 return -ENOMEM;
4693
4694 /* Let's open the file we'll write the transient settings into. This file is kept open as long as we are
4695 * creating the transient, and is closed in unit_load(), as soon as we start loading the file. */
4696
4697 RUN_WITH_UMASK(0022) {
4698 f = fopen(path, "we");
4699 if (!f)
4700 return -errno;
4701 }
4702
4703 safe_fclose(u->transient_file);
4704 u->transient_file = f;
4705
4706 free_and_replace(u->fragment_path, path);
4707
4708 u->source_path = mfree(u->source_path);
4709 u->dropin_paths = strv_free(u->dropin_paths);
4710 u->fragment_mtime = u->source_mtime = u->dropin_mtime = 0;
4711
4712 u->load_state = UNIT_STUB;
4713 u->load_error = 0;
4714 u->transient = true;
4715
4716 unit_add_to_dbus_queue(u);
4717 unit_add_to_gc_queue(u);
4718
4719 fputs("# This is a transient unit file, created programmatically via the systemd API. Do not edit.\n",
4720 u->transient_file);
4721
4722 return 0;
4723 }
4724
4725 static int log_kill(pid_t pid, int sig, void *userdata) {
4726 _cleanup_free_ char *comm = NULL;
4727
4728 (void) get_process_comm(pid, &comm);
4729
4730 /* Don't log about processes marked with brackets, under the assumption that these are temporary processes
4731 only, like for example systemd's own PAM stub process. */
4732 if (comm && comm[0] == '(')
4733 return 0;
4734
4735 log_unit_notice(userdata,
4736 "Killing process " PID_FMT " (%s) with signal SIG%s.",
4737 pid,
4738 strna(comm),
4739 signal_to_string(sig));
4740
4741 return 1;
4742 }
4743
4744 static int operation_to_signal(const KillContext *c, KillOperation k, bool *noteworthy) {
4745 assert(c);
4746
4747 switch (k) {
4748
4749 case KILL_TERMINATE:
4750 case KILL_TERMINATE_AND_LOG:
4751 *noteworthy = false;
4752 return c->kill_signal;
4753
4754 case KILL_RESTART:
4755 *noteworthy = false;
4756 return restart_kill_signal(c);
4757
4758 case KILL_KILL:
4759 *noteworthy = true;
4760 return c->final_kill_signal;
4761
4762 case KILL_WATCHDOG:
4763 *noteworthy = true;
4764 return c->watchdog_signal;
4765
4766 default:
4767 assert_not_reached("KillOperation unknown");
4768 }
4769 }
4770
4771 int unit_kill_context(
4772 Unit *u,
4773 KillContext *c,
4774 KillOperation k,
4775 pid_t main_pid,
4776 pid_t control_pid,
4777 bool main_pid_alien) {
4778
4779 bool wait_for_exit = false, send_sighup;
4780 cg_kill_log_func_t log_func = NULL;
4781 int sig, r;
4782
4783 assert(u);
4784 assert(c);
4785
4786 /* Kill the processes belonging to this unit, in preparation for shutting the unit down.
4787 * Returns > 0 if we killed something worth waiting for, 0 otherwise. */
4788
4789 if (c->kill_mode == KILL_NONE)
4790 return 0;
4791
4792 bool noteworthy;
4793 sig = operation_to_signal(c, k, &noteworthy);
4794 if (noteworthy)
4795 log_func = log_kill;
4796
4797 send_sighup =
4798 c->send_sighup &&
4799 IN_SET(k, KILL_TERMINATE, KILL_TERMINATE_AND_LOG) &&
4800 sig != SIGHUP;
4801
4802 if (main_pid > 0) {
4803 if (log_func)
4804 log_func(main_pid, sig, u);
4805
4806 r = kill_and_sigcont(main_pid, sig);
4807 if (r < 0 && r != -ESRCH) {
4808 _cleanup_free_ char *comm = NULL;
4809 (void) get_process_comm(main_pid, &comm);
4810
4811 log_unit_warning_errno(u, r, "Failed to kill main process " PID_FMT " (%s), ignoring: %m", main_pid, strna(comm));
4812 } else {
4813 if (!main_pid_alien)
4814 wait_for_exit = true;
4815
4816 if (r != -ESRCH && send_sighup)
4817 (void) kill(main_pid, SIGHUP);
4818 }
4819 }
4820
4821 if (control_pid > 0) {
4822 if (log_func)
4823 log_func(control_pid, sig, u);
4824
4825 r = kill_and_sigcont(control_pid, sig);
4826 if (r < 0 && r != -ESRCH) {
4827 _cleanup_free_ char *comm = NULL;
4828 (void) get_process_comm(control_pid, &comm);
4829
4830 log_unit_warning_errno(u, r, "Failed to kill control process " PID_FMT " (%s), ignoring: %m", control_pid, strna(comm));
4831 } else {
4832 wait_for_exit = true;
4833
4834 if (r != -ESRCH && send_sighup)
4835 (void) kill(control_pid, SIGHUP);
4836 }
4837 }
4838
4839 if (u->cgroup_path &&
4840 (c->kill_mode == KILL_CONTROL_GROUP || (c->kill_mode == KILL_MIXED && k == KILL_KILL))) {
4841 _cleanup_set_free_ Set *pid_set = NULL;
4842
4843 /* Exclude the main/control pids from being killed via the cgroup */
4844 pid_set = unit_pid_set(main_pid, control_pid);
4845 if (!pid_set)
4846 return -ENOMEM;
4847
4848 r = cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4849 sig,
4850 CGROUP_SIGCONT|CGROUP_IGNORE_SELF,
4851 pid_set,
4852 log_func, u);
4853 if (r < 0) {
4854 if (!IN_SET(r, -EAGAIN, -ESRCH, -ENOENT))
4855 log_unit_warning_errno(u, r, "Failed to kill control group %s, ignoring: %m", u->cgroup_path);
4856
4857 } else if (r > 0) {
4858
4859 /* FIXME: For now, on the legacy hierarchy, we will not wait for the cgroup members to die if
4860 * we are running in a container or if this is a delegation unit, simply because cgroup
4861 * notification is unreliable in these cases. It doesn't work at all in containers, and outside
4862 * of containers it can be confused easily by left-over directories in the cgroup — which
4863 * however should not exist in non-delegated units. On the unified hierarchy that's different,
4864 * there we get proper events. Hence rely on them. */
4865
4866 if (cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) > 0 ||
4867 (detect_container() == 0 && !unit_cgroup_delegate(u)))
4868 wait_for_exit = true;
4869
4870 if (send_sighup) {
4871 set_free(pid_set);
4872
4873 pid_set = unit_pid_set(main_pid, control_pid);
4874 if (!pid_set)
4875 return -ENOMEM;
4876
4877 cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path,
4878 SIGHUP,
4879 CGROUP_IGNORE_SELF,
4880 pid_set,
4881 NULL, NULL);
4882 }
4883 }
4884 }
4885
4886 return wait_for_exit;
4887 }
4888
4889 int unit_require_mounts_for(Unit *u, const char *path, UnitDependencyMask mask) {
4890 _cleanup_free_ char *p = NULL;
4891 UnitDependencyInfo di;
4892 int r;
4893
4894 assert(u);
4895 assert(path);
4896
4897 /* Registers a unit for requiring a certain path and all its prefixes. We keep a hashtable of these paths in
4898 * the unit (from the path to the UnitDependencyInfo structure indicating how to the dependency came to
4899 * be). However, we build a prefix table for all possible prefixes so that new appearing mount units can easily
4900 * determine which units to make themselves a dependency of. */
4901
4902 if (!path_is_absolute(path))
4903 return -EINVAL;
4904
4905 r = hashmap_ensure_allocated(&u->requires_mounts_for, &path_hash_ops);
4906 if (r < 0)
4907 return r;
4908
4909 p = strdup(path);
4910 if (!p)
4911 return -ENOMEM;
4912
4913 path = path_simplify(p, true);
4914
4915 if (!path_is_normalized(path))
4916 return -EPERM;
4917
4918 if (hashmap_contains(u->requires_mounts_for, path))
4919 return 0;
4920
4921 di = (UnitDependencyInfo) {
4922 .origin_mask = mask
4923 };
4924
4925 r = hashmap_put(u->requires_mounts_for, path, di.data);
4926 if (r < 0)
4927 return r;
4928 p = NULL;
4929
4930 char prefix[strlen(path) + 1];
4931 PATH_FOREACH_PREFIX_MORE(prefix, path) {
4932 Set *x;
4933
4934 x = hashmap_get(u->manager->units_requiring_mounts_for, prefix);
4935 if (!x) {
4936 _cleanup_free_ char *q = NULL;
4937
4938 r = hashmap_ensure_allocated(&u->manager->units_requiring_mounts_for, &path_hash_ops);
4939 if (r < 0)
4940 return r;
4941
4942 q = strdup(prefix);
4943 if (!q)
4944 return -ENOMEM;
4945
4946 x = set_new(NULL);
4947 if (!x)
4948 return -ENOMEM;
4949
4950 r = hashmap_put(u->manager->units_requiring_mounts_for, q, x);
4951 if (r < 0) {
4952 set_free(x);
4953 return r;
4954 }
4955 q = NULL;
4956 }
4957
4958 r = set_put(x, u);
4959 if (r < 0)
4960 return r;
4961 }
4962
4963 return 0;
4964 }
4965
4966 int unit_setup_exec_runtime(Unit *u) {
4967 ExecRuntime **rt;
4968 size_t offset;
4969 Unit *other;
4970 Iterator i;
4971 void *v;
4972 int r;
4973
4974 offset = UNIT_VTABLE(u)->exec_runtime_offset;
4975 assert(offset > 0);
4976
4977 /* Check if there already is an ExecRuntime for this unit? */
4978 rt = (ExecRuntime**) ((uint8_t*) u + offset);
4979 if (*rt)
4980 return 0;
4981
4982 /* Try to get it from somebody else */
4983 HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_JOINS_NAMESPACE_OF], i) {
4984 r = exec_runtime_acquire(u->manager, NULL, other->id, false, rt);
4985 if (r == 1)
4986 return 1;
4987 }
4988
4989 return exec_runtime_acquire(u->manager, unit_get_exec_context(u), u->id, true, rt);
4990 }
4991
4992 int unit_setup_dynamic_creds(Unit *u) {
4993 ExecContext *ec;
4994 DynamicCreds *dcreds;
4995 size_t offset;
4996
4997 assert(u);
4998
4999 offset = UNIT_VTABLE(u)->dynamic_creds_offset;
5000 assert(offset > 0);
5001 dcreds = (DynamicCreds*) ((uint8_t*) u + offset);
5002
5003 ec = unit_get_exec_context(u);
5004 assert(ec);
5005
5006 if (!ec->dynamic_user)
5007 return 0;
5008
5009 return dynamic_creds_acquire(dcreds, u->manager, ec->user, ec->group);
5010 }
5011
5012 bool unit_type_supported(UnitType t) {
5013 if (_unlikely_(t < 0))
5014 return false;
5015 if (_unlikely_(t >= _UNIT_TYPE_MAX))
5016 return false;
5017
5018 if (!unit_vtable[t]->supported)
5019 return true;
5020
5021 return unit_vtable[t]->supported();
5022 }
5023
5024 void unit_warn_if_dir_nonempty(Unit *u, const char* where) {
5025 int r;
5026
5027 assert(u);
5028 assert(where);
5029
5030 r = dir_is_empty(where);
5031 if (r > 0 || r == -ENOTDIR)
5032 return;
5033 if (r < 0) {
5034 log_unit_warning_errno(u, r, "Failed to check directory %s: %m", where);
5035 return;
5036 }
5037
5038 log_struct(LOG_NOTICE,
5039 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
5040 LOG_UNIT_ID(u),
5041 LOG_UNIT_INVOCATION_ID(u),
5042 LOG_UNIT_MESSAGE(u, "Directory %s to mount over is not empty, mounting anyway.", where),
5043 "WHERE=%s", where);
5044 }
5045
5046 int unit_fail_if_noncanonical(Unit *u, const char* where) {
5047 _cleanup_free_ char *canonical_where = NULL;
5048 int r;
5049
5050 assert(u);
5051 assert(where);
5052
5053 r = chase_symlinks(where, NULL, CHASE_NONEXISTENT, &canonical_where, NULL);
5054 if (r < 0) {
5055 log_unit_debug_errno(u, r, "Failed to check %s for symlinks, ignoring: %m", where);
5056 return 0;
5057 }
5058
5059 /* We will happily ignore a trailing slash (or any redundant slashes) */
5060 if (path_equal(where, canonical_where))
5061 return 0;
5062
5063 /* No need to mention "." or "..", they would already have been rejected by unit_name_from_path() */
5064 log_struct(LOG_ERR,
5065 "MESSAGE_ID=" SD_MESSAGE_OVERMOUNTING_STR,
5066 LOG_UNIT_ID(u),
5067 LOG_UNIT_INVOCATION_ID(u),
5068 LOG_UNIT_MESSAGE(u, "Mount path %s is not canonical (contains a symlink).", where),
5069 "WHERE=%s", where);
5070
5071 return -ELOOP;
5072 }
5073
5074 bool unit_is_pristine(Unit *u) {
5075 assert(u);
5076
5077 /* Check if the unit already exists or is already around,
5078 * in a number of different ways. Note that to cater for unit
5079 * types such as slice, we are generally fine with units that
5080 * are marked UNIT_LOADED even though nothing was actually
5081 * loaded, as those unit types don't require a file on disk. */
5082
5083 return !(!IN_SET(u->load_state, UNIT_NOT_FOUND, UNIT_LOADED) ||
5084 u->fragment_path ||
5085 u->source_path ||
5086 !strv_isempty(u->dropin_paths) ||
5087 u->job ||
5088 u->merged_into);
5089 }
5090
5091 pid_t unit_control_pid(Unit *u) {
5092 assert(u);
5093
5094 if (UNIT_VTABLE(u)->control_pid)
5095 return UNIT_VTABLE(u)->control_pid(u);
5096
5097 return 0;
5098 }
5099
5100 pid_t unit_main_pid(Unit *u) {
5101 assert(u);
5102
5103 if (UNIT_VTABLE(u)->main_pid)
5104 return UNIT_VTABLE(u)->main_pid(u);
5105
5106 return 0;
5107 }
5108
5109 static void unit_unref_uid_internal(
5110 Unit *u,
5111 uid_t *ref_uid,
5112 bool destroy_now,
5113 void (*_manager_unref_uid)(Manager *m, uid_t uid, bool destroy_now)) {
5114
5115 assert(u);
5116 assert(ref_uid);
5117 assert(_manager_unref_uid);
5118
5119 /* Generic implementation of both unit_unref_uid() and unit_unref_gid(), under the assumption that uid_t and
5120 * gid_t are actually the same time, with the same validity rules.
5121 *
5122 * Drops a reference to UID/GID from a unit. */
5123
5124 assert_cc(sizeof(uid_t) == sizeof(gid_t));
5125 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
5126
5127 if (!uid_is_valid(*ref_uid))
5128 return;
5129
5130 _manager_unref_uid(u->manager, *ref_uid, destroy_now);
5131 *ref_uid = UID_INVALID;
5132 }
5133
5134 static void unit_unref_uid(Unit *u, bool destroy_now) {
5135 unit_unref_uid_internal(u, &u->ref_uid, destroy_now, manager_unref_uid);
5136 }
5137
5138 static void unit_unref_gid(Unit *u, bool destroy_now) {
5139 unit_unref_uid_internal(u, (uid_t*) &u->ref_gid, destroy_now, manager_unref_gid);
5140 }
5141
5142 void unit_unref_uid_gid(Unit *u, bool destroy_now) {
5143 assert(u);
5144
5145 unit_unref_uid(u, destroy_now);
5146 unit_unref_gid(u, destroy_now);
5147 }
5148
5149 static int unit_ref_uid_internal(
5150 Unit *u,
5151 uid_t *ref_uid,
5152 uid_t uid,
5153 bool clean_ipc,
5154 int (*_manager_ref_uid)(Manager *m, uid_t uid, bool clean_ipc)) {
5155
5156 int r;
5157
5158 assert(u);
5159 assert(ref_uid);
5160 assert(uid_is_valid(uid));
5161 assert(_manager_ref_uid);
5162
5163 /* Generic implementation of both unit_ref_uid() and unit_ref_guid(), under the assumption that uid_t and gid_t
5164 * are actually the same type, and have the same validity rules.
5165 *
5166 * Adds a reference on a specific UID/GID to this unit. Each unit referencing the same UID/GID maintains a
5167 * reference so that we can destroy the UID/GID's IPC resources as soon as this is requested and the counter
5168 * drops to zero. */
5169
5170 assert_cc(sizeof(uid_t) == sizeof(gid_t));
5171 assert_cc(UID_INVALID == (uid_t) GID_INVALID);
5172
5173 if (*ref_uid == uid)
5174 return 0;
5175
5176 if (uid_is_valid(*ref_uid)) /* Already set? */
5177 return -EBUSY;
5178
5179 r = _manager_ref_uid(u->manager, uid, clean_ipc);
5180 if (r < 0)
5181 return r;
5182
5183 *ref_uid = uid;
5184 return 1;
5185 }
5186
5187 static int unit_ref_uid(Unit *u, uid_t uid, bool clean_ipc) {
5188 return unit_ref_uid_internal(u, &u->ref_uid, uid, clean_ipc, manager_ref_uid);
5189 }
5190
5191 static int unit_ref_gid(Unit *u, gid_t gid, bool clean_ipc) {
5192 return unit_ref_uid_internal(u, (uid_t*) &u->ref_gid, (uid_t) gid, clean_ipc, manager_ref_gid);
5193 }
5194
5195 static int unit_ref_uid_gid_internal(Unit *u, uid_t uid, gid_t gid, bool clean_ipc) {
5196 int r = 0, q = 0;
5197
5198 assert(u);
5199
5200 /* Reference both a UID and a GID in one go. Either references both, or neither. */
5201
5202 if (uid_is_valid(uid)) {
5203 r = unit_ref_uid(u, uid, clean_ipc);
5204 if (r < 0)
5205 return r;
5206 }
5207
5208 if (gid_is_valid(gid)) {
5209 q = unit_ref_gid(u, gid, clean_ipc);
5210 if (q < 0) {
5211 if (r > 0)
5212 unit_unref_uid(u, false);
5213
5214 return q;
5215 }
5216 }
5217
5218 return r > 0 || q > 0;
5219 }
5220
5221 int unit_ref_uid_gid(Unit *u, uid_t uid, gid_t gid) {
5222 ExecContext *c;
5223 int r;
5224
5225 assert(u);
5226
5227 c = unit_get_exec_context(u);
5228
5229 r = unit_ref_uid_gid_internal(u, uid, gid, c ? c->remove_ipc : false);
5230 if (r < 0)
5231 return log_unit_warning_errno(u, r, "Couldn't add UID/GID reference to unit, proceeding without: %m");
5232
5233 return r;
5234 }
5235
5236 void unit_notify_user_lookup(Unit *u, uid_t uid, gid_t gid) {
5237 int r;
5238
5239 assert(u);
5240
5241 /* This is invoked whenever one of the forked off processes let's us know the UID/GID its user name/group names
5242 * resolved to. We keep track of which UID/GID is currently assigned in order to be able to destroy its IPC
5243 * objects when no service references the UID/GID anymore. */
5244
5245 r = unit_ref_uid_gid(u, uid, gid);
5246 if (r > 0)
5247 unit_add_to_dbus_queue(u);
5248 }
5249
5250 int unit_set_invocation_id(Unit *u, sd_id128_t id) {
5251 int r;
5252
5253 assert(u);
5254
5255 /* Set the invocation ID for this unit. If we cannot, this will not roll back, but reset the whole thing. */
5256
5257 if (sd_id128_equal(u->invocation_id, id))
5258 return 0;
5259
5260 if (!sd_id128_is_null(u->invocation_id))
5261 (void) hashmap_remove_value(u->manager->units_by_invocation_id, &u->invocation_id, u);
5262
5263 if (sd_id128_is_null(id)) {
5264 r = 0;
5265 goto reset;
5266 }
5267
5268 r = hashmap_ensure_allocated(&u->manager->units_by_invocation_id, &id128_hash_ops);
5269 if (r < 0)
5270 goto reset;
5271
5272 u->invocation_id = id;
5273 sd_id128_to_string(id, u->invocation_id_string);
5274
5275 r = hashmap_put(u->manager->units_by_invocation_id, &u->invocation_id, u);
5276 if (r < 0)
5277 goto reset;
5278
5279 return 0;
5280
5281 reset:
5282 u->invocation_id = SD_ID128_NULL;
5283 u->invocation_id_string[0] = 0;
5284 return r;
5285 }
5286
5287 int unit_acquire_invocation_id(Unit *u) {
5288 sd_id128_t id;
5289 int r;
5290
5291 assert(u);
5292
5293 r = sd_id128_randomize(&id);
5294 if (r < 0)
5295 return log_unit_error_errno(u, r, "Failed to generate invocation ID for unit: %m");
5296
5297 r = unit_set_invocation_id(u, id);
5298 if (r < 0)
5299 return log_unit_error_errno(u, r, "Failed to set invocation ID for unit: %m");
5300
5301 unit_add_to_dbus_queue(u);
5302 return 0;
5303 }
5304
5305 int unit_set_exec_params(Unit *u, ExecParameters *p) {
5306 int r;
5307
5308 assert(u);
5309 assert(p);
5310
5311 /* Copy parameters from manager */
5312 r = manager_get_effective_environment(u->manager, &p->environment);
5313 if (r < 0)
5314 return r;
5315
5316 p->confirm_spawn = manager_get_confirm_spawn(u->manager);
5317 p->cgroup_supported = u->manager->cgroup_supported;
5318 p->prefix = u->manager->prefix;
5319 SET_FLAG(p->flags, EXEC_PASS_LOG_UNIT|EXEC_CHOWN_DIRECTORIES, MANAGER_IS_SYSTEM(u->manager));
5320
5321 /* Copy parameters from unit */
5322 p->cgroup_path = u->cgroup_path;
5323 SET_FLAG(p->flags, EXEC_CGROUP_DELEGATE, unit_cgroup_delegate(u));
5324
5325 return 0;
5326 }
5327
5328 int unit_fork_helper_process(Unit *u, const char *name, pid_t *ret) {
5329 int r;
5330
5331 assert(u);
5332 assert(ret);
5333
5334 /* Forks off a helper process and makes sure it is a member of the unit's cgroup. Returns == 0 in the child,
5335 * and > 0 in the parent. The pid parameter is always filled in with the child's PID. */
5336
5337 (void) unit_realize_cgroup(u);
5338
5339 r = safe_fork(name, FORK_REOPEN_LOG, ret);
5340 if (r != 0)
5341 return r;
5342
5343 (void) default_signals(SIGNALS_CRASH_HANDLER, SIGNALS_IGNORE, -1);
5344 (void) ignore_signals(SIGPIPE, -1);
5345
5346 (void) prctl(PR_SET_PDEATHSIG, SIGTERM);
5347
5348 if (u->cgroup_path) {
5349 r = cg_attach_everywhere(u->manager->cgroup_supported, u->cgroup_path, 0, NULL, NULL);
5350 if (r < 0) {
5351 log_unit_error_errno(u, r, "Failed to join unit cgroup %s: %m", u->cgroup_path);
5352 _exit(EXIT_CGROUP);
5353 }
5354 }
5355
5356 return 0;
5357 }
5358
5359 int unit_fork_and_watch_rm_rf(Unit *u, char **paths, pid_t *ret_pid) {
5360 pid_t pid;
5361 int r;
5362
5363 assert(u);
5364 assert(ret_pid);
5365
5366 r = unit_fork_helper_process(u, "(sd-rmrf)", &pid);
5367 if (r < 0)
5368 return r;
5369 if (r == 0) {
5370 int ret = EXIT_SUCCESS;
5371 char **i;
5372
5373 STRV_FOREACH(i, paths) {
5374 r = rm_rf(*i, REMOVE_ROOT|REMOVE_PHYSICAL|REMOVE_MISSING_OK);
5375 if (r < 0) {
5376 log_error_errno(r, "Failed to remove '%s': %m", *i);
5377 ret = EXIT_FAILURE;
5378 }
5379 }
5380
5381 _exit(ret);
5382 }
5383
5384 r = unit_watch_pid(u, pid, true);
5385 if (r < 0)
5386 return r;
5387
5388 *ret_pid = pid;
5389 return 0;
5390 }
5391
5392 static void unit_update_dependency_mask(Unit *u, UnitDependency d, Unit *other, UnitDependencyInfo di) {
5393 assert(u);
5394 assert(d >= 0);
5395 assert(d < _UNIT_DEPENDENCY_MAX);
5396 assert(other);
5397
5398 if (di.origin_mask == 0 && di.destination_mask == 0) {
5399 /* No bit set anymore, let's drop the whole entry */
5400 assert_se(hashmap_remove(u->dependencies[d], other));
5401 log_unit_debug(u, "lost dependency %s=%s", unit_dependency_to_string(d), other->id);
5402 } else
5403 /* Mask was reduced, let's update the entry */
5404 assert_se(hashmap_update(u->dependencies[d], other, di.data) == 0);
5405 }
5406
5407 void unit_remove_dependencies(Unit *u, UnitDependencyMask mask) {
5408 UnitDependency d;
5409
5410 assert(u);
5411
5412 /* Removes all dependencies u has on other units marked for ownership by 'mask'. */
5413
5414 if (mask == 0)
5415 return;
5416
5417 for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++) {
5418 bool done;
5419
5420 do {
5421 UnitDependencyInfo di;
5422 Unit *other;
5423 Iterator i;
5424
5425 done = true;
5426
5427 HASHMAP_FOREACH_KEY(di.data, other, u->dependencies[d], i) {
5428 UnitDependency q;
5429
5430 if ((di.origin_mask & ~mask) == di.origin_mask)
5431 continue;
5432 di.origin_mask &= ~mask;
5433 unit_update_dependency_mask(u, d, other, di);
5434
5435 /* We updated the dependency from our unit to the other unit now. But most dependencies
5436 * imply a reverse dependency. Hence, let's delete that one too. For that we go through
5437 * all dependency types on the other unit and delete all those which point to us and
5438 * have the right mask set. */
5439
5440 for (q = 0; q < _UNIT_DEPENDENCY_MAX; q++) {
5441 UnitDependencyInfo dj;
5442
5443 dj.data = hashmap_get(other->dependencies[q], u);
5444 if ((dj.destination_mask & ~mask) == dj.destination_mask)
5445 continue;
5446 dj.destination_mask &= ~mask;
5447
5448 unit_update_dependency_mask(other, q, u, dj);
5449 }
5450
5451 unit_add_to_gc_queue(other);
5452
5453 done = false;
5454 break;
5455 }
5456
5457 } while (!done);
5458 }
5459 }
5460
5461 static int unit_get_invocation_path(Unit *u, char **ret) {
5462 char *p;
5463 int r;
5464
5465 assert(u);
5466 assert(ret);
5467
5468 if (MANAGER_IS_SYSTEM(u->manager))
5469 p = strjoin("/run/systemd/units/invocation:", u->id);
5470 else {
5471 _cleanup_free_ char *user_path = NULL;
5472 r = xdg_user_runtime_dir(&user_path, "/systemd/units/invocation:");
5473 if (r < 0)
5474 return r;
5475 p = strjoin(user_path, u->id);
5476 }
5477
5478 if (!p)
5479 return -ENOMEM;
5480
5481 *ret = p;
5482 return 0;
5483 }
5484
5485 static int unit_export_invocation_id(Unit *u) {
5486 _cleanup_free_ char *p = NULL;
5487 int r;
5488
5489 assert(u);
5490
5491 if (u->exported_invocation_id)
5492 return 0;
5493
5494 if (sd_id128_is_null(u->invocation_id))
5495 return 0;
5496
5497 r = unit_get_invocation_path(u, &p);
5498 if (r < 0)
5499 return log_unit_debug_errno(u, r, "Failed to get invocation path: %m");
5500
5501 r = symlink_atomic(u->invocation_id_string, p);
5502 if (r < 0)
5503 return log_unit_debug_errno(u, r, "Failed to create invocation ID symlink %s: %m", p);
5504
5505 u->exported_invocation_id = true;
5506 return 0;
5507 }
5508
5509 static int unit_export_log_level_max(Unit *u, const ExecContext *c) {
5510 const char *p;
5511 char buf[2];
5512 int r;
5513
5514 assert(u);
5515 assert(c);
5516
5517 if (u->exported_log_level_max)
5518 return 0;
5519
5520 if (c->log_level_max < 0)
5521 return 0;
5522
5523 assert(c->log_level_max <= 7);
5524
5525 buf[0] = '0' + c->log_level_max;
5526 buf[1] = 0;
5527
5528 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5529 r = symlink_atomic(buf, p);
5530 if (r < 0)
5531 return log_unit_debug_errno(u, r, "Failed to create maximum log level symlink %s: %m", p);
5532
5533 u->exported_log_level_max = true;
5534 return 0;
5535 }
5536
5537 static int unit_export_log_extra_fields(Unit *u, const ExecContext *c) {
5538 _cleanup_close_ int fd = -1;
5539 struct iovec *iovec;
5540 const char *p;
5541 char *pattern;
5542 le64_t *sizes;
5543 ssize_t n;
5544 size_t i;
5545 int r;
5546
5547 if (u->exported_log_extra_fields)
5548 return 0;
5549
5550 if (c->n_log_extra_fields <= 0)
5551 return 0;
5552
5553 sizes = newa(le64_t, c->n_log_extra_fields);
5554 iovec = newa(struct iovec, c->n_log_extra_fields * 2);
5555
5556 for (i = 0; i < c->n_log_extra_fields; i++) {
5557 sizes[i] = htole64(c->log_extra_fields[i].iov_len);
5558
5559 iovec[i*2] = IOVEC_MAKE(sizes + i, sizeof(le64_t));
5560 iovec[i*2+1] = c->log_extra_fields[i];
5561 }
5562
5563 p = strjoina("/run/systemd/units/log-extra-fields:", u->id);
5564 pattern = strjoina(p, ".XXXXXX");
5565
5566 fd = mkostemp_safe(pattern);
5567 if (fd < 0)
5568 return log_unit_debug_errno(u, fd, "Failed to create extra fields file %s: %m", p);
5569
5570 n = writev(fd, iovec, c->n_log_extra_fields*2);
5571 if (n < 0) {
5572 r = log_unit_debug_errno(u, errno, "Failed to write extra fields: %m");
5573 goto fail;
5574 }
5575
5576 (void) fchmod(fd, 0644);
5577
5578 if (rename(pattern, p) < 0) {
5579 r = log_unit_debug_errno(u, errno, "Failed to rename extra fields file: %m");
5580 goto fail;
5581 }
5582
5583 u->exported_log_extra_fields = true;
5584 return 0;
5585
5586 fail:
5587 (void) unlink(pattern);
5588 return r;
5589 }
5590
5591 static int unit_export_log_ratelimit_interval(Unit *u, const ExecContext *c) {
5592 _cleanup_free_ char *buf = NULL;
5593 const char *p;
5594 int r;
5595
5596 assert(u);
5597 assert(c);
5598
5599 if (u->exported_log_ratelimit_interval)
5600 return 0;
5601
5602 if (c->log_ratelimit_interval_usec == 0)
5603 return 0;
5604
5605 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5606
5607 if (asprintf(&buf, "%" PRIu64, c->log_ratelimit_interval_usec) < 0)
5608 return log_oom();
5609
5610 r = symlink_atomic(buf, p);
5611 if (r < 0)
5612 return log_unit_debug_errno(u, r, "Failed to create log rate limit interval symlink %s: %m", p);
5613
5614 u->exported_log_ratelimit_interval = true;
5615 return 0;
5616 }
5617
5618 static int unit_export_log_ratelimit_burst(Unit *u, const ExecContext *c) {
5619 _cleanup_free_ char *buf = NULL;
5620 const char *p;
5621 int r;
5622
5623 assert(u);
5624 assert(c);
5625
5626 if (u->exported_log_ratelimit_burst)
5627 return 0;
5628
5629 if (c->log_ratelimit_burst == 0)
5630 return 0;
5631
5632 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5633
5634 if (asprintf(&buf, "%u", c->log_ratelimit_burst) < 0)
5635 return log_oom();
5636
5637 r = symlink_atomic(buf, p);
5638 if (r < 0)
5639 return log_unit_debug_errno(u, r, "Failed to create log rate limit burst symlink %s: %m", p);
5640
5641 u->exported_log_ratelimit_burst = true;
5642 return 0;
5643 }
5644
5645 void unit_export_state_files(Unit *u) {
5646 const ExecContext *c;
5647
5648 assert(u);
5649
5650 if (!u->id)
5651 return;
5652
5653 if (MANAGER_IS_TEST_RUN(u->manager))
5654 return;
5655
5656 /* Exports a couple of unit properties to /run/systemd/units/, so that journald can quickly query this data
5657 * from there. Ideally, journald would use IPC to query this, like everybody else, but that's hard, as long as
5658 * the IPC system itself and PID 1 also log to the journal.
5659 *
5660 * Note that these files really shouldn't be considered API for anyone else, as use a runtime file system as
5661 * IPC replacement is not compatible with today's world of file system namespaces. However, this doesn't really
5662 * apply to communication between the journal and systemd, as we assume that these two daemons live in the same
5663 * namespace at least.
5664 *
5665 * Note that some of the "files" exported here are actually symlinks and not regular files. Symlinks work
5666 * better for storing small bits of data, in particular as we can write them with two system calls, and read
5667 * them with one. */
5668
5669 (void) unit_export_invocation_id(u);
5670
5671 if (!MANAGER_IS_SYSTEM(u->manager))
5672 return;
5673
5674 c = unit_get_exec_context(u);
5675 if (c) {
5676 (void) unit_export_log_level_max(u, c);
5677 (void) unit_export_log_extra_fields(u, c);
5678 (void) unit_export_log_ratelimit_interval(u, c);
5679 (void) unit_export_log_ratelimit_burst(u, c);
5680 }
5681 }
5682
5683 void unit_unlink_state_files(Unit *u) {
5684 const char *p;
5685
5686 assert(u);
5687
5688 if (!u->id)
5689 return;
5690
5691 /* Undoes the effect of unit_export_state() */
5692
5693 if (u->exported_invocation_id) {
5694 _cleanup_free_ char *invocation_path = NULL;
5695 int r = unit_get_invocation_path(u, &invocation_path);
5696 if (r >= 0) {
5697 (void) unlink(invocation_path);
5698 u->exported_invocation_id = false;
5699 }
5700 }
5701
5702 if (!MANAGER_IS_SYSTEM(u->manager))
5703 return;
5704
5705 if (u->exported_log_level_max) {
5706 p = strjoina("/run/systemd/units/log-level-max:", u->id);
5707 (void) unlink(p);
5708
5709 u->exported_log_level_max = false;
5710 }
5711
5712 if (u->exported_log_extra_fields) {
5713 p = strjoina("/run/systemd/units/extra-fields:", u->id);
5714 (void) unlink(p);
5715
5716 u->exported_log_extra_fields = false;
5717 }
5718
5719 if (u->exported_log_ratelimit_interval) {
5720 p = strjoina("/run/systemd/units/log-rate-limit-interval:", u->id);
5721 (void) unlink(p);
5722
5723 u->exported_log_ratelimit_interval = false;
5724 }
5725
5726 if (u->exported_log_ratelimit_burst) {
5727 p = strjoina("/run/systemd/units/log-rate-limit-burst:", u->id);
5728 (void) unlink(p);
5729
5730 u->exported_log_ratelimit_burst = false;
5731 }
5732 }
5733
5734 int unit_prepare_exec(Unit *u) {
5735 int r;
5736
5737 assert(u);
5738
5739 /* Load any custom firewall BPF programs here once to test if they are existing and actually loadable.
5740 * Fail here early since later errors in the call chain unit_realize_cgroup to cgroup_context_apply are ignored. */
5741 r = bpf_firewall_load_custom(u);
5742 if (r < 0)
5743 return r;
5744
5745 /* Prepares everything so that we can fork of a process for this unit */
5746
5747 (void) unit_realize_cgroup(u);
5748
5749 if (u->reset_accounting) {
5750 (void) unit_reset_accounting(u);
5751 u->reset_accounting = false;
5752 }
5753
5754 unit_export_state_files(u);
5755
5756 r = unit_setup_exec_runtime(u);
5757 if (r < 0)
5758 return r;
5759
5760 r = unit_setup_dynamic_creds(u);
5761 if (r < 0)
5762 return r;
5763
5764 return 0;
5765 }
5766
5767 static int log_leftover(pid_t pid, int sig, void *userdata) {
5768 _cleanup_free_ char *comm = NULL;
5769
5770 (void) get_process_comm(pid, &comm);
5771
5772 if (comm && comm[0] == '(') /* Most likely our own helper process (PAM?), ignore */
5773 return 0;
5774
5775 log_unit_warning(userdata,
5776 "Found left-over process " PID_FMT " (%s) in control group while starting unit. Ignoring.\n"
5777 "This usually indicates unclean termination of a previous run, or service implementation deficiencies.",
5778 pid, strna(comm));
5779
5780 return 1;
5781 }
5782
5783 int unit_warn_leftover_processes(Unit *u) {
5784 assert(u);
5785
5786 (void) unit_pick_cgroup_path(u);
5787
5788 if (!u->cgroup_path)
5789 return 0;
5790
5791 return cg_kill_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, 0, 0, NULL, log_leftover, u);
5792 }
5793
5794 bool unit_needs_console(Unit *u) {
5795 ExecContext *ec;
5796 UnitActiveState state;
5797
5798 assert(u);
5799
5800 state = unit_active_state(u);
5801
5802 if (UNIT_IS_INACTIVE_OR_FAILED(state))
5803 return false;
5804
5805 if (UNIT_VTABLE(u)->needs_console)
5806 return UNIT_VTABLE(u)->needs_console(u);
5807
5808 /* If this unit type doesn't implement this call, let's use a generic fallback implementation: */
5809 ec = unit_get_exec_context(u);
5810 if (!ec)
5811 return false;
5812
5813 return exec_context_may_touch_console(ec);
5814 }
5815
5816 const char *unit_label_path(const Unit *u) {
5817 const char *p;
5818
5819 assert(u);
5820
5821 /* Returns the file system path to use for MAC access decisions, i.e. the file to read the SELinux label off
5822 * when validating access checks. */
5823
5824 p = u->source_path ?: u->fragment_path;
5825 if (!p)
5826 return NULL;
5827
5828 /* If a unit is masked, then don't read the SELinux label of /dev/null, as that really makes no sense */
5829 if (path_equal(p, "/dev/null"))
5830 return NULL;
5831
5832 return p;
5833 }
5834
5835 int unit_pid_attachable(Unit *u, pid_t pid, sd_bus_error *error) {
5836 int r;
5837
5838 assert(u);
5839
5840 /* Checks whether the specified PID is generally good for attaching, i.e. a valid PID, not our manager itself,
5841 * and not a kernel thread either */
5842
5843 /* First, a simple range check */
5844 if (!pid_is_valid(pid))
5845 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process identifier " PID_FMT " is not valid.", pid);
5846
5847 /* Some extra safety check */
5848 if (pid == 1 || pid == getpid_cached())
5849 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a manager process, refusing.", pid);
5850
5851 /* Don't even begin to bother with kernel threads */
5852 r = is_kernel_thread(pid);
5853 if (r == -ESRCH)
5854 return sd_bus_error_setf(error, SD_BUS_ERROR_UNIX_PROCESS_ID_UNKNOWN, "Process with ID " PID_FMT " does not exist.", pid);
5855 if (r < 0)
5856 return sd_bus_error_set_errnof(error, r, "Failed to determine whether process " PID_FMT " is a kernel thread: %m", pid);
5857 if (r > 0)
5858 return sd_bus_error_setf(error, SD_BUS_ERROR_INVALID_ARGS, "Process " PID_FMT " is a kernel thread, refusing.", pid);
5859
5860 return 0;
5861 }
5862
5863 void unit_log_success(Unit *u) {
5864 assert(u);
5865
5866 log_struct(LOG_INFO,
5867 "MESSAGE_ID=" SD_MESSAGE_UNIT_SUCCESS_STR,
5868 LOG_UNIT_ID(u),
5869 LOG_UNIT_INVOCATION_ID(u),
5870 LOG_UNIT_MESSAGE(u, "Succeeded."));
5871 }
5872
5873 void unit_log_failure(Unit *u, const char *result) {
5874 assert(u);
5875 assert(result);
5876
5877 log_struct(LOG_WARNING,
5878 "MESSAGE_ID=" SD_MESSAGE_UNIT_FAILURE_RESULT_STR,
5879 LOG_UNIT_ID(u),
5880 LOG_UNIT_INVOCATION_ID(u),
5881 LOG_UNIT_MESSAGE(u, "Failed with result '%s'.", result),
5882 "UNIT_RESULT=%s", result);
5883 }
5884
5885 void unit_log_skip(Unit *u, const char *result) {
5886 assert(u);
5887 assert(result);
5888
5889 log_struct(LOG_INFO,
5890 "MESSAGE_ID=" SD_MESSAGE_UNIT_SKIPPED_STR,
5891 LOG_UNIT_ID(u),
5892 LOG_UNIT_INVOCATION_ID(u),
5893 LOG_UNIT_MESSAGE(u, "Skipped due to '%s'.", result),
5894 "UNIT_RESULT=%s", result);
5895 }
5896
5897 void unit_log_process_exit(
5898 Unit *u,
5899 const char *kind,
5900 const char *command,
5901 bool success,
5902 int code,
5903 int status) {
5904
5905 int level;
5906
5907 assert(u);
5908 assert(kind);
5909
5910 /* If this is a successful exit, let's log about the exit code on DEBUG level. If this is a failure
5911 * and the process exited on its own via exit(), then let's make this a NOTICE, under the assumption
5912 * that the service already logged the reason at a higher log level on its own. Otherwise, make it a
5913 * WARNING. */
5914 if (success)
5915 level = LOG_DEBUG;
5916 else if (code == CLD_EXITED)
5917 level = LOG_NOTICE;
5918 else
5919 level = LOG_WARNING;
5920
5921 log_struct(level,
5922 "MESSAGE_ID=" SD_MESSAGE_UNIT_PROCESS_EXIT_STR,
5923 LOG_UNIT_MESSAGE(u, "%s exited, code=%s, status=%i/%s",
5924 kind,
5925 sigchld_code_to_string(code), status,
5926 strna(code == CLD_EXITED
5927 ? exit_status_to_string(status, EXIT_STATUS_FULL)
5928 : signal_to_string(status))),
5929 "EXIT_CODE=%s", sigchld_code_to_string(code),
5930 "EXIT_STATUS=%i", status,
5931 "COMMAND=%s", strna(command),
5932 LOG_UNIT_ID(u),
5933 LOG_UNIT_INVOCATION_ID(u));
5934 }
5935
5936 int unit_exit_status(Unit *u) {
5937 assert(u);
5938
5939 /* Returns the exit status to propagate for the most recent cycle of this unit. Returns a value in the range
5940 * 0…255 if there's something to propagate. EOPNOTSUPP if the concept does not apply to this unit type, ENODATA
5941 * if no data is currently known (for example because the unit hasn't deactivated yet) and EBADE if the main
5942 * service process has exited abnormally (signal/coredump). */
5943
5944 if (!UNIT_VTABLE(u)->exit_status)
5945 return -EOPNOTSUPP;
5946
5947 return UNIT_VTABLE(u)->exit_status(u);
5948 }
5949
5950 int unit_failure_action_exit_status(Unit *u) {
5951 int r;
5952
5953 assert(u);
5954
5955 /* Returns the exit status to propagate on failure, or an error if there's nothing to propagate */
5956
5957 if (u->failure_action_exit_status >= 0)
5958 return u->failure_action_exit_status;
5959
5960 r = unit_exit_status(u);
5961 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
5962 return 255;
5963
5964 return r;
5965 }
5966
5967 int unit_success_action_exit_status(Unit *u) {
5968 int r;
5969
5970 assert(u);
5971
5972 /* Returns the exit status to propagate on success, or an error if there's nothing to propagate */
5973
5974 if (u->success_action_exit_status >= 0)
5975 return u->success_action_exit_status;
5976
5977 r = unit_exit_status(u);
5978 if (r == -EBADE) /* Exited, but not cleanly (i.e. by signal or such) */
5979 return 255;
5980
5981 return r;
5982 }
5983
5984 int unit_test_trigger_loaded(Unit *u) {
5985 Unit *trigger;
5986
5987 /* Tests whether the unit to trigger is loaded */
5988
5989 trigger = UNIT_TRIGGER(u);
5990 if (!trigger)
5991 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT),
5992 "Refusing to start, no unit to trigger.");
5993 if (trigger->load_state != UNIT_LOADED)
5994 return log_unit_error_errno(u, SYNTHETIC_ERRNO(ENOENT),
5995 "Refusing to start, unit %s to trigger not loaded.", trigger->id);
5996
5997 return 0;
5998 }
5999
6000 void unit_destroy_runtime_directory(Unit *u, const ExecContext *context) {
6001 if (context->runtime_directory_preserve_mode == EXEC_PRESERVE_NO ||
6002 (context->runtime_directory_preserve_mode == EXEC_PRESERVE_RESTART && !unit_will_restart(u)))
6003 exec_context_destroy_runtime_directory(context, u->manager->prefix[EXEC_DIRECTORY_RUNTIME]);
6004 }
6005
6006 int unit_clean(Unit *u, ExecCleanMask mask) {
6007 UnitActiveState state;
6008
6009 assert(u);
6010
6011 /* Special return values:
6012 *
6013 * -EOPNOTSUPP → cleaning not supported for this unit type
6014 * -EUNATCH → cleaning not defined for this resource type
6015 * -EBUSY → unit currently can't be cleaned since it's running or not properly loaded, or has
6016 * a job queued or similar
6017 */
6018
6019 if (!UNIT_VTABLE(u)->clean)
6020 return -EOPNOTSUPP;
6021
6022 if (mask == 0)
6023 return -EUNATCH;
6024
6025 if (u->load_state != UNIT_LOADED)
6026 return -EBUSY;
6027
6028 if (u->job)
6029 return -EBUSY;
6030
6031 state = unit_active_state(u);
6032 if (!IN_SET(state, UNIT_INACTIVE))
6033 return -EBUSY;
6034
6035 return UNIT_VTABLE(u)->clean(u, mask);
6036 }
6037
6038 int unit_can_clean(Unit *u, ExecCleanMask *ret) {
6039 assert(u);
6040
6041 if (!UNIT_VTABLE(u)->clean ||
6042 u->load_state != UNIT_LOADED) {
6043 *ret = 0;
6044 return 0;
6045 }
6046
6047 /* When the clean() method is set, can_clean() really should be set too */
6048 assert(UNIT_VTABLE(u)->can_clean);
6049
6050 return UNIT_VTABLE(u)->can_clean(u, ret);
6051 }
6052
6053 static const char* const collect_mode_table[_COLLECT_MODE_MAX] = {
6054 [COLLECT_INACTIVE] = "inactive",
6055 [COLLECT_INACTIVE_OR_FAILED] = "inactive-or-failed",
6056 };
6057
6058 DEFINE_STRING_TABLE_LOOKUP(collect_mode, CollectMode);