]> git.proxmox.com Git - pve-ha-manager.git/blob - src/PVE/HA/LRM.pm
08f65085e3bf324e757a9bcc16b86d11dea64e8e
[pve-ha-manager.git] / src / PVE / HA / LRM.pm
1 package PVE::HA::LRM;
2
3 # Local Resource Manager
4
5 use strict;
6 use warnings;
7 use POSIX qw(:sys_wait_h);
8
9 use PVE::SafeSyslog;
10 use PVE::Tools;
11 use PVE::HA::Tools ':exit_codes';
12 use PVE::HA::Resources;
13
14 # Server can have several states:
15
16 my $valid_states = {
17 wait_for_agent_lock => "waiting for agent lock",
18 active => "got agent_lock",
19 maintenance => "going into maintenance",
20 lost_agent_lock => "lost agent_lock",
21 };
22
23 # we sleep ~10s per 'active' round, so if no services is available for >= 10 min we'd go in wait
24 # state giving up the watchdog and the LRM lock voluntary, ensuring the WD can do no harm
25 my $max_active_idle_rounds = 60;
26
27 sub new {
28 my ($this, $haenv) = @_;
29
30 my $class = ref($this) || $this;
31
32 my $self = bless {
33 haenv => $haenv,
34 status => { state => 'startup' },
35 workers => {},
36 results => {},
37 restart_tries => {},
38 shutdown_request => 0,
39 shutdown_errors => 0,
40 # mode can be: active, reboot, shutdown, restart
41 mode => 'active',
42 cluster_state_update => 0,
43 active_idle_rounds => 0,
44 }, $class;
45
46 $self->set_local_status({ state => 'wait_for_agent_lock' });
47
48 return $self;
49 }
50
51 sub shutdown_request {
52 my ($self) = @_;
53
54 return if $self->{shutdown_request}; # already in shutdown mode
55
56 my $haenv = $self->{haenv};
57
58 my $nodename = $haenv->nodename();
59
60 my ($shutdown, $reboot) = $haenv->is_node_shutdown();
61
62 my $dc_ha_cfg = $haenv->get_ha_settings();
63 my $shutdown_policy = $dc_ha_cfg->{shutdown_policy} // 'conditional';
64
65 if ($shutdown) { # don't log this on service restart, only on node shutdown
66 $haenv->log('info', "got shutdown request with shutdown policy '$shutdown_policy'");
67 }
68
69 my $freeze_all;
70 my $maintenance;
71 if ($shutdown_policy eq 'conditional') {
72 $freeze_all = $reboot;
73 } elsif ($shutdown_policy eq 'freeze') {
74 $freeze_all = 1;
75 } elsif ($shutdown_policy eq 'failover') {
76 $freeze_all = 0;
77 } elsif ($shutdown_policy eq 'migrate') {
78 $maintenance = 1;
79 } else {
80 $haenv->log('err', "unknown shutdown policy '$shutdown_policy', fall back to conditional");
81 $freeze_all = $reboot;
82 }
83
84 if ($maintenance) {
85 # we get marked as unaivalable by the manager, then all services will
86 # be migrated away, we'll still have the same "can we exit" clause than
87 # a normal shutdown -> no running service on this node
88 # FIXME: after X minutes, add shutdown command for remaining services,
89 # e.g., if they have no alternative node???
90 } elsif ($shutdown) {
91 # *always* queue stop jobs for all services if the node shuts down,
92 # independent if it's a reboot or a poweroff, else we may corrupt
93 # services or hinder node shutdown
94 my $ss = $self->{service_status};
95
96 foreach my $sid (keys %$ss) {
97 my $sd = $ss->{$sid};
98 next if !$sd->{node};
99 next if $sd->{node} ne $nodename;
100 # Note: use undef uid to mark shutdown/stop jobs
101 $self->queue_resource_command($sid, undef, 'request_stop');
102 }
103 }
104
105 if ($shutdown) {
106 my $shutdown_type = $reboot ? 'reboot' : 'shutdown';
107 if ($maintenance) {
108 $haenv->log('info', "$shutdown_type LRM, doing maintenance, removing this node from active list");
109 $self->{mode} = 'maintenance';
110 } elsif ($freeze_all) {
111 $haenv->log('info', "$shutdown_type LRM, stop and freeze all services");
112 $self->{mode} = 'restart';
113 } else {
114 $haenv->log('info', "shutdown LRM, stop all services");
115 $self->{mode} = 'shutdown';
116 }
117 } else {
118 $haenv->log('info', "restart LRM, freeze all services");
119 $self->{mode} = 'restart';
120 }
121
122 $self->{shutdown_request} = $haenv->get_time();
123
124 eval { $self->update_lrm_status() or die "not quorate?\n"; };
125 if (my $err = $@) {
126 $haenv->log('err', "unable to update lrm status file - $err");
127 }
128 }
129
130 sub get_local_status {
131 my ($self) = @_;
132
133 return $self->{status};
134 }
135
136 sub set_local_status {
137 my ($self, $new) = @_;
138
139 die "invalid state '$new->{state}'" if !$valid_states->{$new->{state}};
140
141 my $haenv = $self->{haenv};
142
143 my $old = $self->{status};
144
145 # important: only update if if really changed
146 return if $old->{state} eq $new->{state};
147
148 $haenv->log('info', "status change $old->{state} => $new->{state}");
149
150 $new->{state_change_time} = $haenv->get_time();
151
152 $self->{status} = $new;
153 }
154
155 sub update_lrm_status {
156 my ($self) = @_;
157
158 my $haenv = $self->{haenv};
159
160 return 0 if !$haenv->quorate();
161
162 my $lrm_status = {
163 state => $self->{status}->{state},
164 mode => $self->{mode},
165 results => $self->{results},
166 timestamp => $haenv->get_time(),
167 };
168
169 eval { $haenv->write_lrm_status($lrm_status); };
170 if (my $err = $@) {
171 $haenv->log('err', "unable to write lrm status file - $err");
172 return 0;
173 }
174
175 return 1;
176 }
177
178 sub update_service_status {
179 my ($self) = @_;
180
181 my $haenv = $self->{haenv};
182
183 my $ms = eval { $haenv->read_manager_status(); };
184 if (my $err = $@) {
185 $haenv->log('err', "updating service status from manager failed: $err");
186 return undef;
187 } else {
188 $self->{service_status} = $ms->{service_status} || {};
189 return 1;
190 }
191 }
192
193 sub get_protected_ha_agent_lock {
194 my ($self) = @_;
195
196 my $haenv = $self->{haenv};
197
198 my $count = 0;
199 my $starttime = $haenv->get_time();
200
201 for (;;) {
202
203 if ($haenv->get_ha_agent_lock()) {
204 if ($self->{ha_agent_wd}) {
205 $haenv->watchdog_update($self->{ha_agent_wd});
206 } else {
207 my $wfh = $haenv->watchdog_open();
208 $self->{ha_agent_wd} = $wfh;
209 }
210 return 1;
211 }
212
213 last if ++$count > 5; # try max 5 time
214
215 my $delay = $haenv->get_time() - $starttime;
216 last if $delay > 5; # for max 5 seconds
217
218 $haenv->sleep(1);
219 }
220
221 return 0;
222 }
223
224 # only cares if any service has the local node as their node, independent of which req.state it is
225 sub has_configured_service_on_local_node {
226 my ($self) = @_;
227
228 my $haenv = $self->{haenv};
229 my $nodename = $haenv->nodename();
230
231 my $ss = $self->{service_status};
232 foreach my $sid (keys %$ss) {
233 my $sd = $ss->{$sid};
234 next if !$sd->{node} || $sd->{node} ne $nodename;
235
236 return 1;
237 }
238 return 0;
239 }
240
241 sub active_service_count {
242 my ($self) = @_;
243
244 my $haenv = $self->{haenv};
245 my $nodename = $haenv->nodename();
246
247 my $ss = $self->{service_status};
248
249 my $count = 0;
250 foreach my $sid (keys %$ss) {
251 my $sd = $ss->{$sid};
252 next if !$sd->{node};
253 next if $sd->{node} ne $nodename;
254 my $req_state = $sd->{state};
255 next if !defined($req_state);
256 next if $req_state eq 'stopped';
257 # NOTE: 'ignored' ones are already dropped by the manager from service_status
258 next if $req_state eq 'freeze';
259 # erroneous services are not managed by HA, don't count them as active
260 next if $req_state eq 'error';
261
262 $count++;
263 }
264
265 return $count;
266 }
267
268 my $wrote_lrm_status_at_startup = 0;
269
270 sub do_one_iteration {
271 my ($self) = @_;
272
273 my $haenv = $self->{haenv};
274
275 $haenv->loop_start_hook();
276
277 $self->{cluster_state_update} = $haenv->cluster_state_update();
278
279 my $res = $self->work();
280
281 $haenv->loop_end_hook();
282
283 return $res;
284 }
285
286 # NOTE: this is disabling the self-fence mechanism, so it must NOT be called with active services
287 # It's normally *only* OK on graceful shutdown (with no services, or all services frozen)
288 my sub give_up_watchdog_protection {
289 my ($self) = @_;
290
291 if ($self->{ha_agent_wd}) {
292 $self->{haenv}->watchdog_close($self->{ha_agent_wd});
293 delete $self->{ha_agent_wd}; # only delete after close!
294 }
295 }
296
297 sub work {
298 my ($self) = @_;
299
300 my $haenv = $self->{haenv};
301
302 if (!$wrote_lrm_status_at_startup) {
303 if ($self->update_lrm_status()) {
304 $wrote_lrm_status_at_startup = 1;
305 } else {
306 # do nothing
307 $haenv->sleep(5);
308 return $self->{shutdown_request} ? 0 : 1;
309 }
310 }
311
312 my $status = $self->get_local_status();
313 my $state = $status->{state};
314
315 $self->update_service_status();
316
317 my $fence_request = PVE::HA::Tools::count_fenced_services($self->{service_status}, $haenv->nodename());
318
319 # do state changes first
320
321 my $ctime = $haenv->get_time();
322
323 if ($state eq 'wait_for_agent_lock') {
324
325 my $service_count = $self->active_service_count();
326
327 if (!$fence_request && $service_count && $haenv->quorate()) {
328 if ($self->get_protected_ha_agent_lock()) {
329 $self->set_local_status({ state => 'active' });
330 }
331 }
332
333 } elsif ($state eq 'lost_agent_lock') {
334
335 if (!$fence_request && $haenv->quorate()) {
336 if ($self->get_protected_ha_agent_lock()) {
337 $self->set_local_status({ state => 'active' });
338 }
339 }
340
341 } elsif ($state eq 'active') {
342
343 if ($fence_request) {
344 $haenv->log('err', "node need to be fenced - releasing agent_lock\n");
345 $self->set_local_status({ state => 'lost_agent_lock'});
346 } elsif (!$self->get_protected_ha_agent_lock()) {
347 $self->set_local_status({ state => 'lost_agent_lock'});
348 } elsif ($self->{mode} eq 'maintenance') {
349 $self->set_local_status({ state => 'maintenance'});
350 } else {
351 if (!$self->has_configured_service_on_local_node() && !$self->run_workers()) {
352 # no active service configured for this node and all (old) workers are done
353 $self->{active_idle_rounds}++;
354 if ($self->{active_idle_rounds} > $max_active_idle_rounds) {
355 $haenv->log('info', "node had no service configured for $max_active_idle_rounds rounds, going idle.\n");
356 # safety: no active service & no running worker for quite some time -> OK
357 $haenv->release_ha_agent_lock();
358 give_up_watchdog_protection($self);
359 $self->set_local_status({ state => 'wait_for_agent_lock'});
360 $self->{active_idle_rounds} = 0;
361 }
362 } elsif ($self->{active_idle_rounds}) {
363 $self->{active_idle_rounds} = 0;
364 }
365 }
366 } elsif ($state eq 'maintenance') {
367
368 if ($fence_request) {
369 $haenv->log('err', "node need to be fenced during maintenance mode - releasing agent_lock\n");
370 $self->set_local_status({ state => 'lost_agent_lock'});
371 } elsif (!$self->get_protected_ha_agent_lock()) {
372 $self->set_local_status({ state => 'lost_agent_lock'});
373 }
374 }
375
376 $status = $self->get_local_status();
377 $state = $status->{state};
378
379 # do work
380
381 if ($state eq 'wait_for_agent_lock') {
382
383 return 0 if $self->{shutdown_request};
384
385 $self->update_lrm_status();
386
387 $haenv->sleep(5);
388
389 } elsif ($state eq 'active') {
390
391 my $startime = $haenv->get_time();
392
393 my $max_time = 10;
394
395 my $shutdown = 0;
396
397 # do work (max_time seconds)
398 eval {
399 # fixme: set alert timer
400
401 # if we could not get the current service status there's no point
402 # in doing anything, try again next round.
403 return if !$self->update_service_status();
404
405 if ($self->{shutdown_request}) {
406
407 if ($self->{mode} eq 'restart') {
408
409 my $service_count = $self->active_service_count();
410
411 if ($service_count == 0) {
412 if ($self->run_workers() == 0) {
413 # safety: no active services or workers -> OK
414 give_up_watchdog_protection($self);
415 $shutdown = 1;
416
417 # restart with no or freezed services, release the lock
418 $haenv->release_ha_agent_lock();
419 }
420 }
421 } else {
422
423 if ($self->run_workers() == 0) {
424 if ($self->{shutdown_errors} == 0) {
425 # safety: no active services and LRM shutdown -> OK
426 give_up_watchdog_protection($self);
427
428 # shutdown with all services stopped thus release the lock
429 $haenv->release_ha_agent_lock();
430 }
431
432 $shutdown = 1;
433 }
434 }
435 } else {
436 if (!$self->{cluster_state_update}) {
437 # update failed but we could still renew our lock (cfs restart?),
438 # safely skip manage and expect to update just fine next round
439 $haenv->log('notice', "temporary inconsistent cluster state " .
440 "(cfs restart?), skip round");
441 return;
442 }
443
444 $self->manage_resources();
445
446 }
447 };
448 if (my $err = $@) {
449 $haenv->log('err', "got unexpected error - $err");
450 }
451
452 $self->update_lrm_status();
453
454 return 0 if $shutdown;
455
456 $haenv->sleep_until($startime + $max_time);
457
458 } elsif ($state eq 'lost_agent_lock') {
459
460 # NOTE: watchdog is active an will trigger soon!
461 # so we hope to get the lock back soon!
462 if ($self->{shutdown_request}) {
463
464 my $service_count = $self->active_service_count();
465
466 if ($service_count > 0) {
467 $haenv->log('err', "get shutdown request in state 'lost_agent_lock' - " .
468 "detected $service_count running services");
469
470 if ($self->{mode} eq 'restart') {
471 my $state_mt = $self->{status}->{state_change_time};
472
473 # watchdog should have already triggered, so either it's set
474 # set to noboot or it failed. As we are in restart mode, and
475 # have infinity stoptimeout -> exit now - we don't touch services
476 # or change state, so this is save, relatively speaking
477 if (($haenv->get_time() - $state_mt) > 90) {
478 $haenv->log('err', "lost agent lock and restart request for over 90 seconds - giving up!");
479 return 0;
480 }
481 }
482 } else {
483 # safety: all services are stopped, so we can close the watchdog
484 give_up_watchdog_protection($self);
485
486 return 0;
487 }
488 }
489
490 $haenv->sleep(5);
491
492 } elsif ($state eq 'maintenance') {
493
494 my $startime = $haenv->get_time();
495 return if !$self->update_service_status();
496
497 # wait until all active services moved away
498 my $service_count = $self->active_service_count();
499
500 my $exit_lrm = 0;
501
502 if ($self->{shutdown_request}) {
503 if ($service_count == 0 && $self->run_workers() == 0) {
504 # safety: going into maintenance and all active services got moved -> OK
505 give_up_watchdog_protection($self);
506
507 $exit_lrm = 1;
508
509 # restart with no or freezed services, release the lock
510 $haenv->release_ha_agent_lock();
511 }
512 }
513
514 $self->manage_resources() if !$exit_lrm;
515
516 $self->update_lrm_status();
517
518 return 0 if $exit_lrm;
519
520 $haenv->sleep_until($startime + 5);
521
522 } else {
523
524 die "got unexpected status '$state'\n";
525
526 }
527
528 return 1;
529 }
530
531 sub run_workers {
532 my ($self) = @_;
533
534 my $haenv = $self->{haenv};
535
536 my $starttime = $haenv->get_time();
537
538 # number of workers to start, if 0 we exec the command directly witouth forking
539 my $max_workers = $haenv->get_max_workers();
540
541 my $sc = $haenv->read_service_config();
542
543 while (($haenv->get_time() - $starttime) < 5) {
544 my $count = $self->check_active_workers();
545
546 foreach my $sid (sort keys %{$self->{workers}}) {
547 last if $count >= $max_workers && $max_workers > 0;
548
549 my $w = $self->{workers}->{$sid};
550 if (!$w->{pid}) {
551 # only fork if we may else call exec_resource_agent
552 # directly (e.g. for regression tests)
553 if ($max_workers > 0) {
554 my $pid = fork();
555 if (!defined($pid)) {
556 $haenv->log('err', "fork worker failed");
557 $count = 0; last; # abort, try later
558 } elsif ($pid == 0) {
559 $haenv->after_fork(); # cleanup
560
561 # do work
562 my $res = -1;
563 eval {
564 $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{params});
565 };
566 if (my $err = $@) {
567 $haenv->log('err', $err);
568 POSIX::_exit(-1);
569 }
570 POSIX::_exit($res);
571 } else {
572 $count++;
573 $w->{pid} = $pid;
574 }
575 } else {
576 my $res = -1;
577 eval {
578 $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{params});
579 $res = $res << 8 if $res > 0;
580 };
581 if (my $err = $@) {
582 $haenv->log('err', $err);
583 }
584 if (defined($w->{uid})) {
585 $self->resource_command_finished($sid, $w->{uid}, $res);
586 } else {
587 $self->stop_command_finished($sid, $res);
588 }
589 }
590 }
591 }
592
593 last if !$count;
594
595 $haenv->sleep(1);
596 }
597
598 return scalar(keys %{$self->{workers}});
599 }
600
601 sub manage_resources {
602 my ($self) = @_;
603
604 my $haenv = $self->{haenv};
605
606 my $nodename = $haenv->nodename();
607
608 my $ss = $self->{service_status};
609
610 foreach my $sid (keys %{$self->{restart_tries}}) {
611 delete $self->{restart_tries}->{$sid} if !$ss->{$sid};
612 }
613
614 foreach my $sid (keys %$ss) {
615 my $sd = $ss->{$sid};
616 next if !$sd->{node};
617 next if !$sd->{uid};
618 next if $sd->{node} ne $nodename;
619 my $req_state = $sd->{state};
620 next if !defined($req_state);
621 # can only happen for restricted groups where the failed node itself needs to be the
622 # reocvery target. Always let the master first do so, it will then marked as 'stopped' and
623 # we can just continue normally. But we must NOT do anything with it while still in recovery
624 next if $req_state eq 'recovery';
625 next if $req_state eq 'freeze';
626
627 $self->queue_resource_command($sid, $sd->{uid}, $req_state, {
628 'target' => $sd->{target},
629 'timeout' => $sd->{timeout},
630 });
631 }
632
633 return $self->run_workers();
634 }
635
636 sub queue_resource_command {
637 my ($self, $sid, $uid, $state, $params) = @_;
638
639 # do not queue the excatly same command twice as this may lead to
640 # an inconsistent HA state when the first command fails but the CRM
641 # does not process its failure right away and the LRM starts a second
642 # try, without the CRM knowing of it (race condition)
643 # The 'stopped' command is an exception as we do not process its result
644 # in the CRM and we want to execute it always (even with no active CRM)
645 return if $state ne 'stopped' && $uid && defined($self->{results}->{$uid});
646
647 if (my $w = $self->{workers}->{$sid}) {
648 return if $w->{pid}; # already started
649 # else, delete and overwrite queue entry with new command
650 delete $self->{workers}->{$sid};
651 }
652
653 $self->{workers}->{$sid} = {
654 sid => $sid,
655 uid => $uid,
656 state => $state,
657 };
658
659 $self->{workers}->{$sid}->{params} = $params if $params;
660 }
661
662 sub check_active_workers {
663 my ($self) = @_;
664
665 # finish/count workers
666 my $count = 0;
667 foreach my $sid (keys %{$self->{workers}}) {
668 my $w = $self->{workers}->{$sid};
669 if (my $pid = $w->{pid}) {
670 # check status
671 my $waitpid = waitpid($pid, WNOHANG);
672 if (defined($waitpid) && ($waitpid == $pid)) {
673 if (defined($w->{uid})) {
674 $self->resource_command_finished($sid, $w->{uid}, $?);
675 } else {
676 $self->stop_command_finished($sid, $?);
677 }
678 } else {
679 $count++;
680 }
681 }
682 }
683
684 return $count;
685 }
686
687 sub stop_command_finished {
688 my ($self, $sid, $status) = @_;
689
690 my $haenv = $self->{haenv};
691
692 my $w = delete $self->{workers}->{$sid};
693 return if !$w; # should not happen
694
695 my $exit_code = -1;
696
697 if ($status == -1) {
698 $haenv->log('err', "resource agent $sid finished - failed to execute");
699 } elsif (my $sig = ($status & 127)) {
700 $haenv->log('err', "resource agent $sid finished - got signal $sig");
701 } else {
702 $exit_code = ($status >> 8);
703 }
704
705 if ($exit_code != 0) {
706 $self->{shutdown_errors}++;
707 }
708 }
709
710 sub resource_command_finished {
711 my ($self, $sid, $uid, $status) = @_;
712
713 my $haenv = $self->{haenv};
714
715 my $w = delete $self->{workers}->{$sid};
716 return if !$w; # should not happen
717
718 my $exit_code = -1;
719
720 if ($status == -1) {
721 $haenv->log('err', "resource agent $sid finished - failed to execute");
722 } elsif (my $sig = ($status & 127)) {
723 $haenv->log('err', "resource agent $sid finished - got signal $sig");
724 } else {
725 $exit_code = ($status >> 8);
726 }
727
728 $exit_code = $self->handle_service_exitcode($sid, $w->{state}, $exit_code);
729
730 return if $exit_code == ETRY_AGAIN; # tell nobody, simply retry
731
732 $self->{results}->{$uid} = {
733 sid => $w->{sid},
734 state => $w->{state},
735 exit_code => $exit_code,
736 };
737
738 my $ss = $self->{service_status};
739
740 # compute hash of valid/existing uids
741 my $valid_uids = {};
742 foreach my $sid (keys %$ss) {
743 my $sd = $ss->{$sid};
744 next if !$sd->{uid};
745 $valid_uids->{$sd->{uid}} = 1;
746 }
747
748 my $results = {};
749 foreach my $id (keys %{$self->{results}}) {
750 next if !$valid_uids->{$id};
751 $results->{$id} = $self->{results}->{$id};
752 }
753 $self->{results} = $results;
754 }
755
756 # processes the exit code from a finished resource agent, so that the CRM knows
757 # if the LRM wants to retry an action based on the current recovery policies for
758 # the failed service, or the CRM itself must try to recover from the failure.
759 sub handle_service_exitcode {
760 my ($self, $sid, $cmd, $exit_code) = @_;
761
762 my $haenv = $self->{haenv};
763 my $tries = $self->{restart_tries};
764
765 my $sc = $haenv->read_service_config();
766
767 my $max_restart = 0;
768
769 if (my $cd = $sc->{$sid}) {
770 $max_restart = $cd->{max_restart};
771 }
772
773 if ($cmd eq 'started') {
774
775 if ($exit_code == SUCCESS) {
776
777 $tries->{$sid} = 0;
778
779 return $exit_code;
780
781 } elsif ($exit_code == ERROR) {
782
783 $tries->{$sid} = 0 if !defined($tries->{$sid});
784
785 if ($tries->{$sid} >= $max_restart) {
786 $haenv->log('err', "unable to start service $sid on local node".
787 " after $tries->{$sid} retries");
788 $tries->{$sid} = 0;
789 return ERROR;
790 }
791
792 $tries->{$sid}++;
793
794 $haenv->log('warning', "restart policy: retry number $tries->{$sid}" .
795 " for service '$sid'");
796 # tell CRM that we retry the start
797 return ETRY_AGAIN;
798 }
799 }
800
801 return $exit_code;
802
803 }
804
805 sub exec_resource_agent {
806 my ($self, $sid, $service_config, $cmd, $params) = @_;
807
808 # setup execution environment
809
810 $ENV{'PATH'} = '/sbin:/bin:/usr/sbin:/usr/bin';
811
812 my $haenv = $self->{haenv};
813
814 my $nodename = $haenv->nodename();
815
816 my (undef, $service_type, $service_name) = $haenv->parse_sid($sid);
817
818 my $plugin = PVE::HA::Resources->lookup($service_type);
819 if (!$plugin) {
820 $haenv->log('err', "service type '$service_type' not implemented");
821 return EUNKNOWN_SERVICE_TYPE;
822 }
823
824 if (!$service_config) {
825 $haenv->log('err', "missing resource configuration for '$sid'");
826 return EUNKNOWN_SERVICE;
827 }
828
829 # process error state early
830 if ($cmd eq 'error') {
831 $haenv->log('err', "service $sid is in an error state and needs manual " .
832 "intervention. Look up 'ERROR RECOVERY' in the documentation.");
833
834 return SUCCESS; # error always succeeds
835 }
836
837 if ($service_config->{node} ne $nodename) {
838 $haenv->log('err', "service '$sid' not on this node");
839 return EWRONG_NODE;
840 }
841
842 my $id = $service_name;
843
844 my $running = $plugin->check_running($haenv, $id);
845
846 if ($cmd eq 'started') {
847
848 return SUCCESS if $running;
849
850 $haenv->log("info", "starting service $sid");
851
852 $plugin->start($haenv, $id);
853
854 $running = $plugin->check_running($haenv, $id);
855
856 if ($running) {
857 $haenv->log("info", "service status $sid started");
858 return SUCCESS;
859 } else {
860 $haenv->log("warning", "unable to start service $sid");
861 return ERROR;
862 }
863
864 } elsif ($cmd eq 'request_stop' || $cmd eq 'stopped') {
865
866 return SUCCESS if !$running;
867
868 if (defined($params->{timeout})) {
869 $haenv->log("info", "stopping service $sid (timeout=$params->{timeout})");
870 } else {
871 $haenv->log("info", "stopping service $sid");
872 }
873
874 $plugin->shutdown($haenv, $id, $params->{timeout});
875
876 $running = $plugin->check_running($haenv, $id);
877
878 if (!$running) {
879 $haenv->log("info", "service status $sid stopped");
880 return SUCCESS;
881 } else {
882 $haenv->log("info", "unable to stop stop service $sid (still running)");
883 return ERROR;
884 }
885
886 } elsif ($cmd eq 'migrate' || $cmd eq 'relocate') {
887
888 my $target = $params->{target};
889 if (!defined($target)) {
890 die "$cmd '$sid' failed - missing target\n" if !defined($target);
891 return EINVALID_PARAMETER;
892 }
893
894 if ($service_config->{node} eq $target) {
895 # already there
896 return SUCCESS;
897 }
898
899 my $online = ($cmd eq 'migrate') ? 1 : 0;
900
901 my $res = $plugin->migrate($haenv, $id, $target, $online);
902
903 # something went wrong if service is still on this node
904 if (!$res) {
905 $haenv->log("err", "service $sid not moved (migration error)");
906 return ERROR;
907 }
908
909 return SUCCESS;
910
911 }
912
913 $haenv->log("err", "implement me (cmd '$cmd')");
914 return EUNKNOWN_COMMAND;
915 }
916
917
918 1;