3 # Local Resource Manager
7 use POSIX
qw(:sys_wait_h);
11 use PVE
::HA
::Tools
':exit_codes';
12 use PVE
::HA
::Resources
;
14 # Server can have several states:
17 wait_for_agent_lock
=> "waiting for agent lock",
18 active
=> "got agent_lock",
19 maintenance
=> "going into maintenance",
20 lost_agent_lock
=> "lost agent_lock",
23 # we sleep ~10s per 'active' round, so if no services is available for >= 10 min we'd go in wait
24 # state giving up the watchdog and the LRM lock voluntary, ensuring the WD can do no harm
25 my $max_active_idle_rounds = 60;
28 my ($this, $haenv) = @_;
30 my $class = ref($this) || $this;
34 status
=> { state => 'startup' },
38 shutdown_request
=> 0,
40 # mode can be: active, reboot, shutdown, restart
42 cluster_state_update
=> 0,
43 active_idle_rounds
=> 0,
46 $self->set_local_status({ state => 'wait_for_agent_lock' });
51 sub shutdown_request
{
54 return if $self->{shutdown_request
}; # already in shutdown mode
56 my $haenv = $self->{haenv
};
58 my $nodename = $haenv->nodename();
60 my ($shutdown, $reboot) = $haenv->is_node_shutdown();
62 my $dc_cfg = $haenv->get_datacenter_settings();
63 my $shutdown_policy = $dc_cfg->{ha
}->{shutdown_policy
} // 'conditional';
65 if ($shutdown) { # don't log this on service restart, only on node shutdown
66 $haenv->log('info', "got shutdown request with shutdown policy '$shutdown_policy'");
71 if ($shutdown_policy eq 'conditional') {
72 $freeze_all = $reboot;
73 } elsif ($shutdown_policy eq 'freeze') {
75 } elsif ($shutdown_policy eq 'failover') {
77 } elsif ($shutdown_policy eq 'migrate') {
80 $haenv->log('err', "unknown shutdown policy '$shutdown_policy', fall back to conditional");
81 $freeze_all = $reboot;
85 # we get marked as unaivalable by the manager, then all services will
86 # be migrated away, we'll still have the same "can we exit" clause than
87 # a normal shutdown -> no running service on this node
88 # FIXME: after X minutes, add shutdown command for remaining services,
89 # e.g., if they have no alternative node???
91 # *always* queue stop jobs for all services if the node shuts down,
92 # independent if it's a reboot or a poweroff, else we may corrupt
93 # services or hinder node shutdown
94 my $ss = $self->{service_status
};
96 foreach my $sid (keys %$ss) {
99 next if $sd->{node
} ne $nodename;
100 # Note: use undef uid to mark shutdown/stop jobs
101 $self->queue_resource_command($sid, undef, 'request_stop');
106 my $shutdown_type = $reboot ?
'reboot' : 'shutdown';
108 $haenv->log('info', "$shutdown_type LRM, doing maintenance, removing this node from active list");
109 $self->{mode
} = 'maintenance';
110 } elsif ($freeze_all) {
111 $haenv->log('info', "$shutdown_type LRM, stop and freeze all services");
112 $self->{mode
} = 'restart';
114 $haenv->log('info', "shutdown LRM, stop all services");
115 $self->{mode
} = 'shutdown';
118 $haenv->log('info', "restart LRM, freeze all services");
119 $self->{mode
} = 'restart';
122 $self->{shutdown_request
} = $haenv->get_time();
124 eval { $self->update_lrm_status() or die "not quorate?\n"; };
126 $haenv->log('err', "unable to update lrm status file - $err");
130 sub get_local_status
{
133 return $self->{status
};
136 sub set_local_status
{
137 my ($self, $new) = @_;
139 die "invalid state '$new->{state}'" if !$valid_states->{$new->{state}};
141 my $haenv = $self->{haenv
};
143 my $old = $self->{status
};
145 # important: only update if if really changed
146 return if $old->{state} eq $new->{state};
148 $haenv->log('info', "status change $old->{state} => $new->{state}");
150 $new->{state_change_time
} = $haenv->get_time();
152 $self->{status
} = $new;
155 sub update_lrm_status
{
158 my $haenv = $self->{haenv
};
160 return 0 if !$haenv->quorate();
163 state => $self->{status
}->{state},
164 mode
=> $self->{mode
},
165 results
=> $self->{results
},
166 timestamp
=> $haenv->get_time(),
169 eval { $haenv->write_lrm_status($lrm_status); };
171 $haenv->log('err', "unable to write lrm status file - $err");
178 sub update_service_status
{
181 my $haenv = $self->{haenv
};
183 my $ms = eval { $haenv->read_manager_status(); };
185 $haenv->log('err', "updating service status from manager failed: $err");
188 $self->{service_status
} = $ms->{service_status
} || {};
189 my $nodename = $haenv->nodename();
190 $self->{node_status
} = $ms->{node_status
}->{$nodename} || 'unknown';
195 sub get_protected_ha_agent_lock
{
198 my $haenv = $self->{haenv
};
201 my $starttime = $haenv->get_time();
205 if ($haenv->get_ha_agent_lock()) {
206 if ($self->{ha_agent_wd
}) {
207 $haenv->watchdog_update($self->{ha_agent_wd
});
209 my $wfh = $haenv->watchdog_open();
210 $self->{ha_agent_wd
} = $wfh;
215 last if ++$count > 5; # try max 5 time
217 my $delay = $haenv->get_time() - $starttime;
218 last if $delay > 5; # for max 5 seconds
226 # only cares if any service has the local node as their node, independent of which req.state it is
227 sub has_configured_service_on_local_node
{
230 my $haenv = $self->{haenv
};
231 my $nodename = $haenv->nodename();
233 my $ss = $self->{service_status
};
234 foreach my $sid (keys %$ss) {
235 my $sd = $ss->{$sid};
236 next if !$sd->{node
} || $sd->{node
} ne $nodename;
243 sub is_fence_requested
{
246 my $haenv = $self->{haenv
};
248 my $nodename = $haenv->nodename();
249 my $ss = $self->{service_status
};
251 my $fenced_services = PVE
::HA
::Tools
::count_fenced_services
($ss, $nodename);
253 return $fenced_services || $self->{node_status
} eq 'fence';
256 sub active_service_count
{
259 my $haenv = $self->{haenv
};
260 my $nodename = $haenv->nodename();
262 my $ss = $self->{service_status
};
265 foreach my $sid (keys %$ss) {
266 my $sd = $ss->{$sid};
267 next if !$sd->{node
};
268 next if $sd->{node
} ne $nodename;
269 my $req_state = $sd->{state};
270 next if !defined($req_state);
271 next if $req_state eq 'stopped';
272 # NOTE: 'ignored' ones are already dropped by the manager from service_status
273 next if $req_state eq 'freeze';
274 # erroneous services are not managed by HA, don't count them as active
275 next if $req_state eq 'error';
276 # request_start is for (optional) better node selection for stop -> started transition
277 next if $req_state eq 'request_start';
285 my $wrote_lrm_status_at_startup = 0;
287 sub do_one_iteration
{
290 my $haenv = $self->{haenv
};
292 $haenv->loop_start_hook();
294 $self->{cluster_state_update
} = $haenv->cluster_state_update();
296 my $res = $self->work();
298 $haenv->loop_end_hook();
303 # NOTE: this is disabling the self-fence mechanism, so it must NOT be called with active services
304 # It's normally *only* OK on graceful shutdown (with no services, or all services frozen)
305 my sub give_up_watchdog_protection
{
308 if ($self->{ha_agent_wd
}) {
309 $self->{haenv
}->watchdog_close($self->{ha_agent_wd
});
310 delete $self->{ha_agent_wd
}; # only delete after close!
317 my $haenv = $self->{haenv
};
319 if (!$wrote_lrm_status_at_startup) {
320 if ($self->update_lrm_status()) {
321 $wrote_lrm_status_at_startup = 1;
325 return $self->{shutdown_request
} ?
0 : 1;
329 my $status = $self->get_local_status();
330 my $state = $status->{state};
332 $self->update_service_status();
334 my $fence_request = $self->is_fence_requested();
336 # do state changes first
338 my $ctime = $haenv->get_time();
340 if ($state eq 'wait_for_agent_lock') {
342 my $service_count = $self->active_service_count();
344 if (!$fence_request && $service_count && $haenv->quorate()) {
345 if ($self->get_protected_ha_agent_lock()) {
346 $self->set_local_status({ state => 'active' });
350 } elsif ($state eq 'lost_agent_lock') {
352 if (!$fence_request && $haenv->quorate()) {
353 if ($self->get_protected_ha_agent_lock()) {
354 $self->set_local_status({ state => 'active' });
358 } elsif ($state eq 'active') {
360 if ($fence_request) {
361 $haenv->log('err', "node need to be fenced - releasing agent_lock\n");
362 $self->set_local_status({ state => 'lost_agent_lock'});
363 } elsif (!$self->get_protected_ha_agent_lock()) {
364 $self->set_local_status({ state => 'lost_agent_lock'});
365 } elsif ($self->{mode
} eq 'maintenance') {
366 $self->set_local_status({ state => 'maintenance'});
368 if (!$self->has_configured_service_on_local_node() && !$self->run_workers()) {
369 # no active service configured for this node and all (old) workers are done
370 $self->{active_idle_rounds
}++;
371 if ($self->{active_idle_rounds
} > $max_active_idle_rounds) {
372 $haenv->log('info', "node had no service configured for $max_active_idle_rounds rounds, going idle.\n");
373 # safety: no active service & no running worker for quite some time -> OK
374 $haenv->release_ha_agent_lock();
375 give_up_watchdog_protection
($self);
376 $self->set_local_status({ state => 'wait_for_agent_lock'});
377 $self->{active_idle_rounds
} = 0;
379 } elsif ($self->{active_idle_rounds
}) {
380 $self->{active_idle_rounds
} = 0;
383 } elsif ($state eq 'maintenance') {
385 if ($fence_request) {
386 $haenv->log('err', "node need to be fenced during maintenance mode - releasing agent_lock\n");
387 $self->set_local_status({ state => 'lost_agent_lock'});
388 } elsif (!$self->get_protected_ha_agent_lock()) {
389 $self->set_local_status({ state => 'lost_agent_lock'});
393 $status = $self->get_local_status();
394 $state = $status->{state};
398 if ($state eq 'wait_for_agent_lock') {
400 return 0 if $self->{shutdown_request
};
402 $self->update_lrm_status();
406 } elsif ($state eq 'active') {
408 my $startime = $haenv->get_time();
414 # do work (max_time seconds)
416 # fixme: set alert timer
418 # if we could not get the current service status there's no point
419 # in doing anything, try again next round.
420 return if !$self->update_service_status();
422 if ($self->{shutdown_request
}) {
424 if ($self->{mode
} eq 'restart') {
425 # catch exited workers to update service state
426 my $workers = $self->run_workers();
427 my $service_count = $self->active_service_count();
429 if ($service_count == 0 && $workers == 0) {
430 # safety: no active services or workers -> OK
431 give_up_watchdog_protection
($self);
434 # restart with no or freezed services, release the lock
435 $haenv->release_ha_agent_lock();
439 if ($self->run_workers() == 0) {
440 if ($self->{shutdown_errors
} == 0) {
441 # safety: no active services and LRM shutdown -> OK
442 give_up_watchdog_protection
($self);
444 # shutdown with all services stopped thus release the lock
445 $haenv->release_ha_agent_lock();
452 if (!$self->{cluster_state_update
}) {
453 # update failed but we could still renew our lock (cfs restart?),
454 # safely skip manage and expect to update just fine next round
455 $haenv->log('notice', "temporary inconsistent cluster state " .
456 "(cfs restart?), skip round");
460 $self->manage_resources();
465 $haenv->log('err', "got unexpected error - $err");
468 $self->update_lrm_status();
470 return 0 if $shutdown;
472 $haenv->sleep_until($startime + $max_time);
474 } elsif ($state eq 'lost_agent_lock') {
476 # NOTE: watchdog is active an will trigger soon!
477 # so we hope to get the lock back soon!
478 if ($self->{shutdown_request
}) {
480 my $service_count = $self->active_service_count();
482 if ($service_count > 0) {
483 $haenv->log('err', "get shutdown request in state 'lost_agent_lock' - " .
484 "detected $service_count running services");
486 if ($self->{mode
} eq 'restart') {
487 my $state_mt = $self->{status
}->{state_change_time
};
489 # watchdog should have already triggered, so either it's set
490 # set to noboot or it failed. As we are in restart mode, and
491 # have infinity stoptimeout -> exit now - we don't touch services
492 # or change state, so this is save, relatively speaking
493 if (($haenv->get_time() - $state_mt) > 90) {
494 $haenv->log('err', "lost agent lock and restart request for over 90 seconds - giving up!");
499 # safety: all services are stopped, so we can close the watchdog
500 give_up_watchdog_protection
($self);
508 } elsif ($state eq 'maintenance') {
510 my $startime = $haenv->get_time();
511 return if !$self->update_service_status();
513 # wait until all active services moved away
514 my $service_count = $self->active_service_count();
518 if ($self->{shutdown_request
}) {
519 if ($service_count == 0 && $self->run_workers() == 0) {
520 # safety: going into maintenance and all active services got moved -> OK
521 give_up_watchdog_protection
($self);
525 # restart with no or freezed services, release the lock
526 $haenv->release_ha_agent_lock();
530 $self->manage_resources() if !$exit_lrm;
532 $self->update_lrm_status();
534 return 0 if $exit_lrm;
536 $haenv->sleep_until($startime + 5);
540 die "got unexpected status '$state'\n";
550 my $haenv = $self->{haenv
};
552 my $starttime = $haenv->get_time();
554 # number of workers to start, if 0 we exec the command directly witouth forking
555 my $max_workers = $haenv->get_max_workers();
556 my $sc = $haenv->read_service_config();
558 my $worker = $self->{workers
};
559 # we only got limited time but want to ensure that every queued worker is scheduled
560 # eventually, so sort by the count a worker was seen here in this loop
561 my $fair_sorter = sub {
562 $worker->{$b}->{start_tries
} <=> $worker->{$a}->{start_tries
} || $a cmp $b
565 while (($haenv->get_time() - $starttime) <= 8) {
566 my $count = $self->check_active_workers();
568 for my $sid (sort $fair_sorter grep { !$worker->{$_}->{pid
} } keys %$worker) {
569 my $w = $worker->{$sid};
570 # higher try-count means higher priority especially compared to newly queued jobs, so
571 # count every try to avoid starvation
573 # FIXME: should be last and ensure that check_active_workers is called sooner
574 next if $count >= $max_workers && $max_workers > 0;
576 # only fork if we may, else call exec_resource_agent directly (e.g. for tests)
577 if ($max_workers > 0) {
579 if (!defined($pid)) {
580 $haenv->log('err', "forking worker failed - $!");
581 $count = 0; last; # abort, try later
582 } elsif ($pid == 0) {
583 $haenv->after_fork(); # cleanup
588 $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{params
});
591 $haenv->log('err', $err);
602 $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{params
});
603 $res = $res << 8 if $res > 0;
606 $haenv->log('err', $err);
608 if (defined($w->{uid
})) {
609 $self->resource_command_finished($sid, $w->{uid
}, $res);
611 $self->stop_command_finished($sid, $res);
621 return scalar(keys %{$self->{workers
}});
624 sub manage_resources
{
627 my $haenv = $self->{haenv
};
629 my $nodename = $haenv->nodename();
631 my $ss = $self->{service_status
};
633 foreach my $sid (keys %{$self->{restart_tries
}}) {
634 delete $self->{restart_tries
}->{$sid} if !$ss->{$sid};
637 foreach my $sid (keys %$ss) {
638 my $sd = $ss->{$sid};
639 next if !$sd->{node
} || !$sd->{uid
};
640 next if $sd->{node
} ne $nodename;
641 my $request_state = $sd->{state};
642 next if !defined($request_state);
643 # can only happen for restricted groups where the failed node itself needs to be the
644 # reocvery target. Always let the master first do so, it will then marked as 'stopped' and
645 # we can just continue normally. But we must NOT do anything with it while still in recovery
646 next if $request_state eq 'recovery';
647 next if $request_state eq 'freeze';
648 # intermediate step for optional better node selection on stop -> start request state change
649 next if $request_state eq 'request_start';
651 $self->queue_resource_command($sid, $sd->{uid
}, $request_state, {
652 'target' => $sd->{target
},
653 'timeout' => $sd->{timeout
},
657 return $self->run_workers();
660 sub queue_resource_command
{
661 my ($self, $sid, $uid, $state, $params) = @_;
663 # do not queue the exact same command twice as this may lead to an inconsistent HA state when
664 # the first command fails but the CRM does not process its failure right away and the LRM starts
665 # a second try, without the CRM knowing of it (race condition) The 'stopped' command is an
666 # exception as we do not process its result in the CRM and we want to execute it always (even
667 # with no active CRM)
668 return if $state ne 'stopped' && $uid && defined($self->{results
}->{$uid});
670 if (my $w = $self->{workers
}->{$sid}) {
671 return if $w->{pid
}; # already started
672 # else, delete and overwrite queue entry with new command
673 delete $self->{workers
}->{$sid};
676 $self->{workers
}->{$sid} = {
683 $self->{workers
}->{$sid}->{params
} = $params if $params;
686 sub check_active_workers
{
689 # finish/count workers
691 foreach my $sid (keys %{$self->{workers
}}) {
692 my $w = $self->{workers
}->{$sid};
693 my $pid = $w->{pid
} || next;
695 my $waitpid = waitpid($pid, WNOHANG
); # check status
696 if (defined($waitpid) && ($waitpid == $pid)) {
697 if (defined($w->{uid
})) {
698 $self->resource_command_finished($sid, $w->{uid
}, $?);
700 $self->stop_command_finished($sid, $?);
703 $count++; # still active
710 sub stop_command_finished
{
711 my ($self, $sid, $status) = @_;
713 my $haenv = $self->{haenv
};
715 my $w = delete $self->{workers
}->{$sid};
716 return if !$w; # should not happen
721 $haenv->log('err', "resource agent $sid finished - failed to execute");
722 } elsif (my $sig = ($status & 127)) {
723 $haenv->log('err', "resource agent $sid finished - got signal $sig");
725 $exit_code = ($status >> 8);
728 if ($exit_code != 0) {
729 $self->{shutdown_errors
}++;
733 sub resource_command_finished
{
734 my ($self, $sid, $uid, $status) = @_;
736 my $haenv = $self->{haenv
};
738 my $w = delete $self->{workers
}->{$sid};
739 return if !$w; # should not happen
744 $haenv->log('err', "resource agent $sid finished - failed to execute");
745 } elsif (my $sig = ($status & 127)) {
746 $haenv->log('err', "resource agent $sid finished - got signal $sig");
748 $exit_code = ($status >> 8);
751 $exit_code = $self->handle_service_exitcode($sid, $w->{state}, $exit_code);
753 return if $exit_code == ETRY_AGAIN
; # tell nobody, simply retry
755 $self->{results
}->{$uid} = {
757 state => $w->{state},
758 exit_code
=> $exit_code,
761 my $ss = $self->{service_status
};
763 # compute hash of valid/existing uids
765 foreach my $sid (keys %$ss) {
766 my $sd = $ss->{$sid};
768 $valid_uids->{$sd->{uid
}} = 1;
772 foreach my $id (keys %{$self->{results
}}) {
773 next if !$valid_uids->{$id};
774 $results->{$id} = $self->{results
}->{$id};
776 $self->{results
} = $results;
779 # processes the exit code from a finished resource agent, so that the CRM knows
780 # if the LRM wants to retry an action based on the current recovery policies for
781 # the failed service, or the CRM itself must try to recover from the failure.
782 sub handle_service_exitcode
{
783 my ($self, $sid, $cmd, $exit_code) = @_;
785 my $haenv = $self->{haenv
};
786 my $tries = $self->{restart_tries
};
788 my $sc = $haenv->read_service_config();
792 if (my $cd = $sc->{$sid}) {
793 $max_restart = $cd->{max_restart
};
796 if ($cmd eq 'started') {
798 if ($exit_code == SUCCESS
) {
804 } elsif ($exit_code == ERROR
) {
806 $tries->{$sid} = 0 if !defined($tries->{$sid});
808 if ($tries->{$sid} >= $max_restart) {
809 $haenv->log('err', "unable to start service $sid on local node".
810 " after $tries->{$sid} retries");
817 $haenv->log('warning', "restart policy: retry number $tries->{$sid}" .
818 " for service '$sid'");
819 # tell CRM that we retry the start
828 sub exec_resource_agent
{
829 my ($self, $sid, $service_config, $cmd, $params) = @_;
831 # setup execution environment
833 $ENV{'PATH'} = '/sbin:/bin:/usr/sbin:/usr/bin';
835 my $haenv = $self->{haenv
};
837 my $nodename = $haenv->nodename();
839 my (undef, $service_type, $service_name) = $haenv->parse_sid($sid);
841 my $plugin = PVE
::HA
::Resources-
>lookup($service_type);
843 $haenv->log('err', "service type '$service_type' not implemented");
844 return EUNKNOWN_SERVICE_TYPE
;
847 if (!$service_config) {
848 $haenv->log('err', "missing resource configuration for '$sid'");
849 return EUNKNOWN_SERVICE
;
852 # process error state early
853 if ($cmd eq 'error') {
854 $haenv->log('err', "service $sid is in an error state and needs manual " .
855 "intervention. Look up 'ERROR RECOVERY' in the documentation.");
857 return SUCCESS
; # error always succeeds
860 if ($service_config->{node
} ne $nodename) {
861 $haenv->log('err', "service '$sid' not on this node");
865 my $id = $service_name;
867 my $running = $plugin->check_running($haenv, $id);
869 if ($cmd eq 'started') {
871 return SUCCESS
if $running;
873 $haenv->log("info", "starting service $sid");
875 $plugin->start($haenv, $id);
877 $running = $plugin->check_running($haenv, $id);
880 $haenv->log("info", "service status $sid started");
883 $haenv->log("warning", "unable to start service $sid");
887 } elsif ($cmd eq 'request_stop' || $cmd eq 'stopped') {
889 return SUCCESS
if !$running;
891 if (defined($params->{timeout
})) {
892 $haenv->log("info", "stopping service $sid (timeout=$params->{timeout})");
894 $haenv->log("info", "stopping service $sid");
897 $plugin->shutdown($haenv, $id, $params->{timeout
});
899 $running = $plugin->check_running($haenv, $id);
902 $haenv->log("info", "service status $sid stopped");
905 $haenv->log("info", "unable to stop stop service $sid (still running)");
909 } elsif ($cmd eq 'migrate' || $cmd eq 'relocate') {
911 my $target = $params->{target
};
912 if (!defined($target)) {
913 die "$cmd '$sid' failed - missing target\n" if !defined($target);
914 return EINVALID_PARAMETER
;
917 if ($service_config->{node
} eq $target) {
922 my $online = ($cmd eq 'migrate') ?
1 : 0;
924 my $res = $plugin->migrate($haenv, $id, $target, $online);
926 # something went wrong if service is still on this node
928 $haenv->log("err", "service $sid not moved (migration error)");
936 $haenv->log("err", "implement me (cmd '$cmd')");
937 return EUNKNOWN_COMMAND
;