3 # Local Resource Manager
7 use POSIX
qw(:sys_wait_h);
11 use PVE
::HA
::Tools
':exit_codes';
12 use PVE
::HA
::Resources
;
14 # Server can have several states:
17 wait_for_agent_lock
=> "waiting for agent lock",
18 active
=> "got agent_lock",
19 maintenance
=> "going into maintenance",
20 lost_agent_lock
=> "lost agent_lock",
23 # we sleep ~10s per 'active' round, so if no services is available for >= 10 min we'd go in wait
24 # state giving up the watchdog and the LRM lock voluntary, ensuring the WD can do no harm
25 my $max_active_idle_rounds = 60;
28 my ($this, $haenv) = @_;
30 my $class = ref($this) || $this;
34 status
=> { state => 'startup' },
38 shutdown_request
=> 0,
40 # mode can be: active, reboot, shutdown, restart
42 cluster_state_update
=> 0,
43 active_idle_rounds
=> 0,
46 $self->set_local_status({ state => 'wait_for_agent_lock' });
51 sub shutdown_request
{
54 return if $self->{shutdown_request
}; # already in shutdown mode
56 my $haenv = $self->{haenv
};
58 my $nodename = $haenv->nodename();
60 my ($shutdown, $reboot) = $haenv->is_node_shutdown();
62 my $dc_cfg = $haenv->get_datacenter_settings();
63 my $shutdown_policy = $dc_cfg->{ha
}->{shutdown_policy
} // 'conditional';
65 if ($shutdown) { # don't log this on service restart, only on node shutdown
66 $haenv->log('info', "got shutdown request with shutdown policy '$shutdown_policy'");
71 if ($shutdown_policy eq 'conditional') {
72 $freeze_all = $reboot;
73 } elsif ($shutdown_policy eq 'freeze') {
75 } elsif ($shutdown_policy eq 'failover') {
77 } elsif ($shutdown_policy eq 'migrate') {
80 $haenv->log('err', "unknown shutdown policy '$shutdown_policy', fall back to conditional");
81 $freeze_all = $reboot;
85 # we get marked as unaivalable by the manager, then all services will
86 # be migrated away, we'll still have the same "can we exit" clause than
87 # a normal shutdown -> no running service on this node
88 # FIXME: after X minutes, add shutdown command for remaining services,
89 # e.g., if they have no alternative node???
91 # *always* queue stop jobs for all services if the node shuts down,
92 # independent if it's a reboot or a poweroff, else we may corrupt
93 # services or hinder node shutdown
94 my $ss = $self->{service_status
};
96 foreach my $sid (keys %$ss) {
99 next if $sd->{node
} ne $nodename;
100 # Note: use undef uid to mark shutdown/stop jobs
101 $self->queue_resource_command($sid, undef, 'request_stop');
106 my $shutdown_type = $reboot ?
'reboot' : 'shutdown';
108 $haenv->log('info', "$shutdown_type LRM, doing maintenance, removing this node from active list");
109 $self->{mode
} = 'maintenance';
110 } elsif ($freeze_all) {
111 $haenv->log('info', "$shutdown_type LRM, stop and freeze all services");
112 $self->{mode
} = 'restart';
114 $haenv->log('info', "shutdown LRM, stop all services");
115 $self->{mode
} = 'shutdown';
118 $haenv->log('info', "restart LRM, freeze all services");
119 $self->{mode
} = 'restart';
122 $self->{shutdown_request
} = $haenv->get_time();
124 eval { $self->update_lrm_status() or die "not quorate?\n"; };
126 $haenv->log('err', "unable to update lrm status file - $err");
130 sub get_local_status
{
133 return $self->{status
};
136 sub set_local_status
{
137 my ($self, $new) = @_;
139 die "invalid state '$new->{state}'" if !$valid_states->{$new->{state}};
141 my $haenv = $self->{haenv
};
143 my $old = $self->{status
};
145 # important: only update if if really changed
146 return if $old->{state} eq $new->{state};
148 $haenv->log('info', "status change $old->{state} => $new->{state}");
150 $new->{state_change_time
} = $haenv->get_time();
152 $self->{status
} = $new;
155 sub update_lrm_status
{
158 my $haenv = $self->{haenv
};
160 return 0 if !$haenv->quorate();
163 state => $self->{status
}->{state},
164 mode
=> $self->{mode
},
165 results
=> $self->{results
},
166 timestamp
=> $haenv->get_time(),
169 eval { $haenv->write_lrm_status($lrm_status); };
171 $haenv->log('err', "unable to write lrm status file - $err");
178 sub update_service_status
{
181 my $haenv = $self->{haenv
};
183 my $ms = eval { $haenv->read_manager_status(); };
185 $haenv->log('err', "updating service status from manager failed: $err");
188 $self->{service_status
} = $ms->{service_status
} || {};
189 my $nodename = $haenv->nodename();
190 $self->{node_status
} = $ms->{node_status
}->{$nodename} || 'unknown';
195 sub get_protected_ha_agent_lock
{
198 my $haenv = $self->{haenv
};
201 my $starttime = $haenv->get_time();
205 if ($haenv->get_ha_agent_lock()) {
206 if ($self->{ha_agent_wd
}) {
207 $haenv->watchdog_update($self->{ha_agent_wd
});
209 my $wfh = $haenv->watchdog_open();
210 $self->{ha_agent_wd
} = $wfh;
215 last if ++$count > 5; # try max 5 time
217 my $delay = $haenv->get_time() - $starttime;
218 last if $delay > 5; # for max 5 seconds
226 # only cares if any service has the local node as their node, independent of which req.state it is
227 sub has_configured_service_on_local_node
{
230 my $haenv = $self->{haenv
};
231 my $nodename = $haenv->nodename();
233 my $ss = $self->{service_status
};
234 foreach my $sid (keys %$ss) {
235 my $sd = $ss->{$sid};
236 next if !$sd->{node
} || $sd->{node
} ne $nodename;
243 sub is_fence_requested
{
246 my $haenv = $self->{haenv
};
248 my $nodename = $haenv->nodename();
249 my $ss = $self->{service_status
};
251 my $fenced_services = PVE
::HA
::Tools
::count_fenced_services
($ss, $nodename);
253 return $fenced_services || $self->{node_status
} eq 'fence';
256 sub is_maintenance_requested
{
259 # shutdown maintenance or manual request
260 return $self->{mode
} eq 'maintenance';
263 sub active_service_count
{
266 my $haenv = $self->{haenv
};
267 my $nodename = $haenv->nodename();
269 my $ss = $self->{service_status
};
272 foreach my $sid (keys %$ss) {
273 my $sd = $ss->{$sid};
274 next if !$sd->{node
};
275 next if $sd->{node
} ne $nodename;
276 my $req_state = $sd->{state};
277 next if !defined($req_state);
278 next if $req_state eq 'stopped';
279 # NOTE: 'ignored' ones are already dropped by the manager from service_status
280 next if $req_state eq 'freeze';
281 # erroneous services are not managed by HA, don't count them as active
282 next if $req_state eq 'error';
283 # request_start is for (optional) better node selection for stop -> started transition
284 next if $req_state eq 'request_start';
292 my $wrote_lrm_status_at_startup = 0;
294 sub do_one_iteration
{
297 my $haenv = $self->{haenv
};
299 $haenv->loop_start_hook();
301 $self->{cluster_state_update
} = $haenv->cluster_state_update();
303 my $res = $self->work();
305 $haenv->loop_end_hook();
310 # NOTE: this is disabling the self-fence mechanism, so it must NOT be called with active services
311 # It's normally *only* OK on graceful shutdown (with no services, or all services frozen)
312 my sub give_up_watchdog_protection
{
315 if ($self->{ha_agent_wd
}) {
316 $self->{haenv
}->watchdog_close($self->{ha_agent_wd
});
317 delete $self->{ha_agent_wd
}; # only delete after close!
324 my $haenv = $self->{haenv
};
326 if (!$wrote_lrm_status_at_startup) {
327 if ($self->update_lrm_status()) {
328 $wrote_lrm_status_at_startup = 1;
332 return $self->{shutdown_request
} ?
0 : 1;
336 my $status = $self->get_local_status();
337 my $state = $status->{state};
339 $self->update_service_status();
341 my $fence_request = $self->is_fence_requested();
343 # do state changes first
345 my $ctime = $haenv->get_time();
347 if ($state eq 'wait_for_agent_lock') {
349 my $service_count = $self->active_service_count();
351 if (!$fence_request && $service_count && $haenv->quorate()) {
352 if ($self->get_protected_ha_agent_lock()) {
353 $self->set_local_status({ state => 'active' });
357 } elsif ($state eq 'lost_agent_lock') {
359 if (!$fence_request && $haenv->quorate()) {
360 if ($self->get_protected_ha_agent_lock()) {
361 $self->set_local_status({ state => 'active' });
365 } elsif ($state eq 'active') {
367 if ($fence_request) {
368 $haenv->log('err', "node need to be fenced - releasing agent_lock\n");
369 $self->set_local_status({ state => 'lost_agent_lock'});
370 } elsif (!$self->get_protected_ha_agent_lock()) {
371 $self->set_local_status({ state => 'lost_agent_lock'});
372 } elsif ($self->is_maintenance_requested()) {
373 $self->set_local_status({ state => 'maintenance'});
375 if (!$self->has_configured_service_on_local_node() && !$self->run_workers()) {
376 # no active service configured for this node and all (old) workers are done
377 $self->{active_idle_rounds
}++;
378 if ($self->{active_idle_rounds
} > $max_active_idle_rounds) {
379 $haenv->log('info', "node had no service configured for $max_active_idle_rounds rounds, going idle.\n");
380 # safety: no active service & no running worker for quite some time -> OK
381 $haenv->release_ha_agent_lock();
382 give_up_watchdog_protection
($self);
383 $self->set_local_status({ state => 'wait_for_agent_lock'});
384 $self->{active_idle_rounds
} = 0;
386 } elsif ($self->{active_idle_rounds
}) {
387 $self->{active_idle_rounds
} = 0;
390 } elsif ($state eq 'maintenance') {
392 if ($fence_request) {
393 $haenv->log('err', "node need to be fenced during maintenance mode - releasing agent_lock\n");
394 $self->set_local_status({ state => 'lost_agent_lock'});
395 } elsif (!$self->get_protected_ha_agent_lock()) {
396 $self->set_local_status({ state => 'lost_agent_lock'});
400 $status = $self->get_local_status();
401 $state = $status->{state};
405 if ($state eq 'wait_for_agent_lock') {
407 return 0 if $self->{shutdown_request
};
409 $self->update_lrm_status();
413 } elsif ($state eq 'active') {
415 my $startime = $haenv->get_time();
421 # do work (max_time seconds)
423 # fixme: set alert timer
425 # if we could not get the current service status there's no point
426 # in doing anything, try again next round.
427 return if !$self->update_service_status();
429 if ($self->{shutdown_request
}) {
431 if ($self->{mode
} eq 'restart') {
432 # catch exited workers to update service state
433 my $workers = $self->run_workers();
434 my $service_count = $self->active_service_count();
436 if ($service_count == 0 && $workers == 0) {
437 # safety: no active services or workers -> OK
438 give_up_watchdog_protection
($self);
441 # restart with no or freezed services, release the lock
442 $haenv->release_ha_agent_lock();
446 if ($self->run_workers() == 0) {
447 if ($self->{shutdown_errors
} == 0) {
448 # safety: no active services and LRM shutdown -> OK
449 give_up_watchdog_protection
($self);
451 # shutdown with all services stopped thus release the lock
452 $haenv->release_ha_agent_lock();
459 if (!$self->{cluster_state_update
}) {
460 # update failed but we could still renew our lock (cfs restart?),
461 # safely skip manage and expect to update just fine next round
462 $haenv->log('notice', "temporary inconsistent cluster state " .
463 "(cfs restart?), skip round");
467 $self->manage_resources();
472 $haenv->log('err', "got unexpected error - $err");
475 $self->update_lrm_status();
477 return 0 if $shutdown;
479 $haenv->sleep_until($startime + $max_time);
481 } elsif ($state eq 'lost_agent_lock') {
483 # NOTE: watchdog is active an will trigger soon!
484 # so we hope to get the lock back soon!
485 if ($self->{shutdown_request
}) {
487 my $service_count = $self->active_service_count();
489 if ($service_count > 0) {
490 $haenv->log('err', "get shutdown request in state 'lost_agent_lock' - " .
491 "detected $service_count running services");
493 if ($self->{mode
} eq 'restart') {
494 my $state_mt = $self->{status
}->{state_change_time
};
496 # watchdog should have already triggered, so either it's set
497 # set to noboot or it failed. As we are in restart mode, and
498 # have infinity stoptimeout -> exit now - we don't touch services
499 # or change state, so this is save, relatively speaking
500 if (($haenv->get_time() - $state_mt) > 90) {
501 $haenv->log('err', "lost agent lock and restart request for over 90 seconds - giving up!");
506 # safety: all services are stopped, so we can close the watchdog
507 give_up_watchdog_protection
($self);
515 } elsif ($state eq 'maintenance') {
517 my $startime = $haenv->get_time();
518 return if !$self->update_service_status();
520 # wait until all active services moved away
521 my $service_count = $self->active_service_count();
525 if ($self->{shutdown_request
}) {
526 if ($service_count == 0 && $self->run_workers() == 0) {
527 # safety: going into maintenance and all active services got moved -> OK
528 give_up_watchdog_protection
($self);
532 # restart with no or freezed services, release the lock
533 $haenv->release_ha_agent_lock();
537 $self->manage_resources() if !$exit_lrm;
539 $self->update_lrm_status();
541 return 0 if $exit_lrm;
543 $haenv->sleep_until($startime + 5);
547 die "got unexpected status '$state'\n";
557 my $haenv = $self->{haenv
};
559 my $starttime = $haenv->get_time();
561 # number of workers to start, if 0 we exec the command directly witouth forking
562 my $max_workers = $haenv->get_max_workers();
563 my $sc = $haenv->read_service_config();
565 my $worker = $self->{workers
};
566 # we only got limited time but want to ensure that every queued worker is scheduled
567 # eventually, so sort by the count a worker was seen here in this loop
568 my $fair_sorter = sub {
569 $worker->{$b}->{start_tries
} <=> $worker->{$a}->{start_tries
} || $a cmp $b
572 while (($haenv->get_time() - $starttime) <= 8) {
573 my $count = $self->check_active_workers();
575 for my $sid (sort $fair_sorter grep { !$worker->{$_}->{pid
} } keys %$worker) {
576 my $w = $worker->{$sid};
577 # higher try-count means higher priority especially compared to newly queued jobs, so
578 # count every try to avoid starvation
580 # FIXME: should be last and ensure that check_active_workers is called sooner
581 next if $count >= $max_workers && $max_workers > 0;
583 # only fork if we may, else call exec_resource_agent directly (e.g. for tests)
584 if ($max_workers > 0) {
586 if (!defined($pid)) {
587 $haenv->log('err', "forking worker failed - $!");
588 $count = 0; last; # abort, try later
589 } elsif ($pid == 0) {
590 $haenv->after_fork(); # cleanup
595 $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{params
});
598 $haenv->log('err', $err);
609 $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{params
});
610 $res = $res << 8 if $res > 0;
613 $haenv->log('err', $err);
615 if (defined($w->{uid
})) {
616 $self->resource_command_finished($sid, $w->{uid
}, $res);
618 $self->stop_command_finished($sid, $res);
628 return scalar(keys %{$self->{workers
}});
631 sub manage_resources
{
634 my $haenv = $self->{haenv
};
636 my $nodename = $haenv->nodename();
638 my $ss = $self->{service_status
};
640 foreach my $sid (keys %{$self->{restart_tries
}}) {
641 delete $self->{restart_tries
}->{$sid} if !$ss->{$sid};
644 foreach my $sid (keys %$ss) {
645 my $sd = $ss->{$sid};
646 next if !$sd->{node
} || !$sd->{uid
};
647 next if $sd->{node
} ne $nodename;
648 my $request_state = $sd->{state};
649 next if !defined($request_state);
650 # can only happen for restricted groups where the failed node itself needs to be the
651 # reocvery target. Always let the master first do so, it will then marked as 'stopped' and
652 # we can just continue normally. But we must NOT do anything with it while still in recovery
653 next if $request_state eq 'recovery';
654 next if $request_state eq 'freeze';
655 # intermediate step for optional better node selection on stop -> start request state change
656 next if $request_state eq 'request_start';
658 $self->queue_resource_command($sid, $sd->{uid
}, $request_state, {
659 'target' => $sd->{target
},
660 'timeout' => $sd->{timeout
},
664 return $self->run_workers();
667 sub queue_resource_command
{
668 my ($self, $sid, $uid, $state, $params) = @_;
670 # do not queue the exact same command twice as this may lead to an inconsistent HA state when
671 # the first command fails but the CRM does not process its failure right away and the LRM starts
672 # a second try, without the CRM knowing of it (race condition) The 'stopped' command is an
673 # exception as we do not process its result in the CRM and we want to execute it always (even
674 # with no active CRM)
675 return if $state ne 'stopped' && $uid && defined($self->{results
}->{$uid});
677 if (my $w = $self->{workers
}->{$sid}) {
678 return if $w->{pid
}; # already started
679 # else, delete and overwrite queue entry with new command
680 delete $self->{workers
}->{$sid};
683 $self->{workers
}->{$sid} = {
690 $self->{workers
}->{$sid}->{params
} = $params if $params;
693 sub check_active_workers
{
696 # finish/count workers
698 foreach my $sid (keys %{$self->{workers
}}) {
699 my $w = $self->{workers
}->{$sid};
700 my $pid = $w->{pid
} || next;
702 my $waitpid = waitpid($pid, WNOHANG
); # check status
703 if (defined($waitpid) && ($waitpid == $pid)) {
704 if (defined($w->{uid
})) {
705 $self->resource_command_finished($sid, $w->{uid
}, $?);
707 $self->stop_command_finished($sid, $?);
710 $count++; # still active
717 sub stop_command_finished
{
718 my ($self, $sid, $status) = @_;
720 my $haenv = $self->{haenv
};
722 my $w = delete $self->{workers
}->{$sid};
723 return if !$w; # should not happen
728 $haenv->log('err', "resource agent $sid finished - failed to execute");
729 } elsif (my $sig = ($status & 127)) {
730 $haenv->log('err', "resource agent $sid finished - got signal $sig");
732 $exit_code = ($status >> 8);
735 if ($exit_code != 0) {
736 $self->{shutdown_errors
}++;
740 sub resource_command_finished
{
741 my ($self, $sid, $uid, $status) = @_;
743 my $haenv = $self->{haenv
};
745 my $w = delete $self->{workers
}->{$sid};
746 return if !$w; # should not happen
751 $haenv->log('err', "resource agent $sid finished - failed to execute");
752 } elsif (my $sig = ($status & 127)) {
753 $haenv->log('err', "resource agent $sid finished - got signal $sig");
755 $exit_code = ($status >> 8);
758 $exit_code = $self->handle_service_exitcode($sid, $w->{state}, $exit_code);
760 return if $exit_code == ETRY_AGAIN
; # tell nobody, simply retry
762 $self->{results
}->{$uid} = {
764 state => $w->{state},
765 exit_code
=> $exit_code,
768 my $ss = $self->{service_status
};
770 # compute hash of valid/existing uids
772 foreach my $sid (keys %$ss) {
773 my $sd = $ss->{$sid};
775 $valid_uids->{$sd->{uid
}} = 1;
779 foreach my $id (keys %{$self->{results
}}) {
780 next if !$valid_uids->{$id};
781 $results->{$id} = $self->{results
}->{$id};
783 $self->{results
} = $results;
786 # processes the exit code from a finished resource agent, so that the CRM knows
787 # if the LRM wants to retry an action based on the current recovery policies for
788 # the failed service, or the CRM itself must try to recover from the failure.
789 sub handle_service_exitcode
{
790 my ($self, $sid, $cmd, $exit_code) = @_;
792 my $haenv = $self->{haenv
};
793 my $tries = $self->{restart_tries
};
795 my $sc = $haenv->read_service_config();
799 if (my $cd = $sc->{$sid}) {
800 $max_restart = $cd->{max_restart
};
803 if ($cmd eq 'started') {
805 if ($exit_code == SUCCESS
) {
811 } elsif ($exit_code == ERROR
) {
813 $tries->{$sid} = 0 if !defined($tries->{$sid});
815 if ($tries->{$sid} >= $max_restart) {
816 $haenv->log('err', "unable to start service $sid on local node".
817 " after $tries->{$sid} retries");
824 $haenv->log('warning', "restart policy: retry number $tries->{$sid}" .
825 " for service '$sid'");
826 # tell CRM that we retry the start
835 sub exec_resource_agent
{
836 my ($self, $sid, $service_config, $cmd, $params) = @_;
838 # setup execution environment
840 $ENV{'PATH'} = '/sbin:/bin:/usr/sbin:/usr/bin';
842 my $haenv = $self->{haenv
};
844 my $nodename = $haenv->nodename();
846 my (undef, $service_type, $service_name) = $haenv->parse_sid($sid);
848 my $plugin = PVE
::HA
::Resources-
>lookup($service_type);
850 $haenv->log('err', "service type '$service_type' not implemented");
851 return EUNKNOWN_SERVICE_TYPE
;
854 if (!$service_config) {
855 $haenv->log('err', "missing resource configuration for '$sid'");
856 return EUNKNOWN_SERVICE
;
859 # process error state early
860 if ($cmd eq 'error') {
861 $haenv->log('err', "service $sid is in an error state and needs manual " .
862 "intervention. Look up 'ERROR RECOVERY' in the documentation.");
864 return SUCCESS
; # error always succeeds
867 if ($service_config->{node
} ne $nodename) {
868 $haenv->log('err', "service '$sid' not on this node");
872 my $id = $service_name;
874 my $running = $plugin->check_running($haenv, $id);
876 if ($cmd eq 'started') {
878 return SUCCESS
if $running;
880 $haenv->log("info", "starting service $sid");
882 $plugin->start($haenv, $id);
884 $running = $plugin->check_running($haenv, $id);
887 $haenv->log("info", "service status $sid started");
890 $haenv->log("warning", "unable to start service $sid");
894 } elsif ($cmd eq 'request_stop' || $cmd eq 'stopped') {
896 return SUCCESS
if !$running;
898 if (defined($params->{timeout
})) {
899 $haenv->log("info", "stopping service $sid (timeout=$params->{timeout})");
901 $haenv->log("info", "stopping service $sid");
904 $plugin->shutdown($haenv, $id, $params->{timeout
});
906 $running = $plugin->check_running($haenv, $id);
909 $haenv->log("info", "service status $sid stopped");
912 $haenv->log("info", "unable to stop stop service $sid (still running)");
916 } elsif ($cmd eq 'migrate' || $cmd eq 'relocate' || $cmd eq 'request_start_balance') {
918 my $target = $params->{target
};
919 if (!defined($target)) {
920 die "$cmd '$sid' failed - missing target\n" if !defined($target);
921 return EINVALID_PARAMETER
;
924 if ($service_config->{node
} eq $target) {
929 my $online = ($cmd eq 'migrate') ?
1 : 0;
931 my $res = $plugin->migrate($haenv, $id, $target, $online);
933 # something went wrong if service is still on this node
935 $haenv->log("err", "service $sid not moved (migration error)");
943 $haenv->log("err", "implement me (cmd '$cmd')");
944 return EUNKNOWN_COMMAND
;