use strict;
use warnings;
-use Data::Dumper;
use POSIX qw(:sys_wait_h);
use PVE::SafeSyslog;
my $valid_states = {
wait_for_agent_lock => "waiting for agent lock",
active => "got agent_lock",
+ maintenance => "going into maintenance",
lost_agent_lock => "lost agent_lock",
};
+# we sleep ~10s per 'active' round, so if no services is available for >= 10 min we'd go in wait
+# state giving up the watchdog and the LRM lock voluntary, ensuring the WD can do no harm
+my $max_active_idle_rounds = 60;
+
sub new {
my ($this, $haenv) = @_;
shutdown_errors => 0,
# mode can be: active, reboot, shutdown, restart
mode => 'active',
+ cluster_state_update => 0,
+ active_idle_rounds => 0,
}, $class;
- $self->set_local_status({ state => 'wait_for_agent_lock' });
+ $self->set_local_status({ state => 'wait_for_agent_lock' });
return $self;
}
my $nodename = $haenv->nodename();
- my $shutdown = $haenv->is_node_shutdown();
+ my ($shutdown, $reboot) = $haenv->is_node_shutdown();
- if ($shutdown) {
- $haenv->log('info', "shutdown LRM, stop all services");
- $self->{mode} = 'shutdown';
+ my $dc_ha_cfg = $haenv->get_ha_settings();
+ my $shutdown_policy = $dc_ha_cfg->{shutdown_policy} // 'conditional';
- # queue stop jobs for all services
+ if ($shutdown) { # don't log this on service restart, only on node shutdown
+ $haenv->log('info', "got shutdown request with shutdown policy '$shutdown_policy'");
+ }
+ my $freeze_all;
+ my $maintenance;
+ if ($shutdown_policy eq 'conditional') {
+ $freeze_all = $reboot;
+ } elsif ($shutdown_policy eq 'freeze') {
+ $freeze_all = 1;
+ } elsif ($shutdown_policy eq 'failover') {
+ $freeze_all = 0;
+ } elsif ($shutdown_policy eq 'migrate') {
+ $maintenance = 1;
+ } else {
+ $haenv->log('err', "unknown shutdown policy '$shutdown_policy', fall back to conditional");
+ $freeze_all = $reboot;
+ }
+
+ if ($maintenance) {
+ # we get marked as unaivalable by the manager, then all services will
+ # be migrated away, we'll still have the same "can we exit" clause than
+ # a normal shutdown -> no running service on this node
+ # FIXME: after X minutes, add shutdown command for remaining services,
+ # e.g., if they have no alternative node???
+ } elsif ($shutdown) {
+ # *always* queue stop jobs for all services if the node shuts down,
+ # independent if it's a reboot or a poweroff, else we may corrupt
+ # services or hinder node shutdown
my $ss = $self->{service_status};
foreach my $sid (keys %$ss) {
# Note: use undef uid to mark shutdown/stop jobs
$self->queue_resource_command($sid, undef, 'request_stop');
}
+ }
+ if ($shutdown) {
+ my $shutdown_type = $reboot ? 'reboot' : 'shutdown';
+ if ($maintenance) {
+ $haenv->log('info', "$shutdown_type LRM, doing maintenance, removing this node from active list");
+ $self->{mode} = 'maintenance';
+ } elsif ($freeze_all) {
+ $haenv->log('info', "$shutdown_type LRM, stop and freeze all services");
+ $self->{mode} = 'restart';
+ } else {
+ $haenv->log('info', "shutdown LRM, stop all services");
+ $self->{mode} = 'shutdown';
+ }
} else {
$haenv->log('info', "restart LRM, freeze all services");
$self->{mode} = 'restart';
}
- $self->{shutdown_request} = 1;
+ $self->{shutdown_request} = $haenv->get_time();
- eval { $self->update_lrm_status(); };
+ eval { $self->update_lrm_status() or die "not quorate?\n"; };
if (my $err = $@) {
- $self->log('err', "unable to update lrm status file - $err");
+ $haenv->log('err', "unable to update lrm status file - $err");
}
}
my $old = $self->{status};
- # important: only update if if really changed
+ # important: only update if if really changed
return if $old->{state} eq $new->{state};
$haenv->log('info', "status change $old->{state} => $new->{state}");
my $haenv = $self->{haenv};
return 0 if !$haenv->quorate();
-
- my $lrm_status = {
+
+ my $lrm_status = {
state => $self->{status}->{state},
mode => $self->{mode},
results => $self->{results},
timestamp => $haenv->get_time(),
};
-
+
eval { $haenv->write_lrm_status($lrm_status); };
if (my $err = $@) {
$haenv->log('err', "unable to write lrm status file - $err");
return 1;
}
+sub update_service_status {
+ my ($self) = @_;
+
+ my $haenv = $self->{haenv};
+
+ my $ms = eval { $haenv->read_manager_status(); };
+ if (my $err = $@) {
+ $haenv->log('err', "updating service status from manager failed: $err");
+ return undef;
+ } else {
+ $self->{service_status} = $ms->{service_status} || {};
+ my $nodename = $haenv->nodename();
+ $self->{node_status} = $ms->{node_status}->{$nodename} || 'unknown';
+ return 1;
+ }
+}
+
sub get_protected_ha_agent_lock {
my ($self) = @_;
my $starttime = $haenv->get_time();
for (;;) {
-
+
if ($haenv->get_ha_agent_lock()) {
if ($self->{ha_agent_wd}) {
$haenv->watchdog_update($self->{ha_agent_wd});
}
return 1;
}
-
+
last if ++$count > 5; # try max 5 time
my $delay = $haenv->get_time() - $starttime;
$haenv->sleep(1);
}
-
+
return 0;
}
-sub active_service_count {
+# only cares if any service has the local node as their node, independent of which req.state it is
+sub has_configured_service_on_local_node {
+ my ($self) = @_;
+
+ my $haenv = $self->{haenv};
+ my $nodename = $haenv->nodename();
+
+ my $ss = $self->{service_status};
+ foreach my $sid (keys %$ss) {
+ my $sd = $ss->{$sid};
+ next if !$sd->{node} || $sd->{node} ne $nodename;
+
+ return 1;
+ }
+ return 0;
+}
+
+sub is_fence_requested {
my ($self) = @_;
-
+
my $haenv = $self->{haenv};
+ my $nodename = $haenv->nodename();
+ my $ss = $self->{service_status};
+
+ my $fenced_services = PVE::HA::Tools::count_fenced_services($ss, $nodename);
+
+ return $fenced_services || $self->{node_status} eq 'fence';
+}
+
+sub active_service_count {
+ my ($self) = @_;
+
+ my $haenv = $self->{haenv};
my $nodename = $haenv->nodename();
my $ss = $self->{service_status};
my $count = 0;
-
foreach my $sid (keys %$ss) {
my $sd = $ss->{$sid};
next if !$sd->{node};
my $req_state = $sd->{state};
next if !defined($req_state);
next if $req_state eq 'stopped';
+ # NOTE: 'ignored' ones are already dropped by the manager from service_status
next if $req_state eq 'freeze';
+ # erroneous services are not managed by HA, don't count them as active
+ next if $req_state eq 'error';
$count++;
}
-
+
return $count;
}
my $haenv = $self->{haenv};
+ $haenv->loop_start_hook();
+
+ $self->{cluster_state_update} = $haenv->cluster_state_update();
+
+ my $res = $self->work();
+
+ $haenv->loop_end_hook();
+
+ return $res;
+}
+
+# NOTE: this is disabling the self-fence mechanism, so it must NOT be called with active services
+# It's normally *only* OK on graceful shutdown (with no services, or all services frozen)
+my sub give_up_watchdog_protection {
+ my ($self) = @_;
+
+ if ($self->{ha_agent_wd}) {
+ $self->{haenv}->watchdog_close($self->{ha_agent_wd});
+ delete $self->{ha_agent_wd}; # only delete after close!
+ }
+}
+
+sub work {
+ my ($self) = @_;
+
+ my $haenv = $self->{haenv};
+
if (!$wrote_lrm_status_at_startup) {
if ($self->update_lrm_status()) {
$wrote_lrm_status_at_startup = 1;
return $self->{shutdown_request} ? 0 : 1;
}
}
-
+
my $status = $self->get_local_status();
my $state = $status->{state};
- my $ms = $haenv->read_manager_status();
- $self->{service_status} = $ms->{service_status} || {};
+ $self->update_service_status();
- my $fence_request = PVE::HA::Tools::count_fenced_services($self->{service_status}, $haenv->nodename());
-
- # do state changes first
+ my $fence_request = $self->is_fence_requested();
+
+ # do state changes first
my $ctime = $haenv->get_time();
$self->set_local_status({ state => 'active' });
}
}
-
+
} elsif ($state eq 'lost_agent_lock') {
if (!$fence_request && $haenv->quorate()) {
} elsif ($state eq 'active') {
- if ($fence_request) {
+ if ($fence_request) {
$haenv->log('err', "node need to be fenced - releasing agent_lock\n");
- $self->set_local_status({ state => 'lost_agent_lock'});
+ $self->set_local_status({ state => 'lost_agent_lock'});
+ } elsif (!$self->get_protected_ha_agent_lock()) {
+ $self->set_local_status({ state => 'lost_agent_lock'});
+ } elsif ($self->{mode} eq 'maintenance') {
+ $self->set_local_status({ state => 'maintenance'});
+ } else {
+ if (!$self->has_configured_service_on_local_node() && !$self->run_workers()) {
+ # no active service configured for this node and all (old) workers are done
+ $self->{active_idle_rounds}++;
+ if ($self->{active_idle_rounds} > $max_active_idle_rounds) {
+ $haenv->log('info', "node had no service configured for $max_active_idle_rounds rounds, going idle.\n");
+ # safety: no active service & no running worker for quite some time -> OK
+ $haenv->release_ha_agent_lock();
+ give_up_watchdog_protection($self);
+ $self->set_local_status({ state => 'wait_for_agent_lock'});
+ $self->{active_idle_rounds} = 0;
+ }
+ } elsif ($self->{active_idle_rounds}) {
+ $self->{active_idle_rounds} = 0;
+ }
+ }
+ } elsif ($state eq 'maintenance') {
+
+ if ($fence_request) {
+ $haenv->log('err', "node need to be fenced during maintenance mode - releasing agent_lock\n");
+ $self->set_local_status({ state => 'lost_agent_lock'});
} elsif (!$self->get_protected_ha_agent_lock()) {
$self->set_local_status({ state => 'lost_agent_lock'});
}
if ($state eq 'wait_for_agent_lock') {
return 0 if $self->{shutdown_request};
-
+
$self->update_lrm_status();
-
+
$haenv->sleep(5);
-
+
} elsif ($state eq 'active') {
my $startime = $haenv->get_time();
eval {
# fixme: set alert timer
+ # if we could not get the current service status there's no point
+ # in doing anything, try again next round.
+ return if !$self->update_service_status();
+
if ($self->{shutdown_request}) {
if ($self->{mode} eq 'restart') {
my $service_count = $self->active_service_count();
if ($service_count == 0) {
-
if ($self->run_workers() == 0) {
- if ($self->{ha_agent_wd}) {
- $haenv->watchdog_close($self->{ha_agent_wd});
- delete $self->{ha_agent_wd};
- }
-
+ # safety: no active services or workers -> OK
+ give_up_watchdog_protection($self);
$shutdown = 1;
# restart with no or freezed services, release the lock
if ($self->run_workers() == 0) {
if ($self->{shutdown_errors} == 0) {
- if ($self->{ha_agent_wd}) {
- $haenv->watchdog_close($self->{ha_agent_wd});
- delete $self->{ha_agent_wd};
- }
+ # safety: no active services and LRM shutdown -> OK
+ give_up_watchdog_protection($self);
# shutdown with all services stopped thus release the lock
$haenv->release_ha_agent_lock();
}
}
} else {
+ if (!$self->{cluster_state_update}) {
+ # update failed but we could still renew our lock (cfs restart?),
+ # safely skip manage and expect to update just fine next round
+ $haenv->log('notice', "temporary inconsistent cluster state " .
+ "(cfs restart?), skip round");
+ return;
+ }
$self->manage_resources();
}
$self->update_lrm_status();
-
+
return 0 if $shutdown;
$haenv->sleep_until($startime + $max_time);
} elsif ($state eq 'lost_agent_lock') {
-
- # Note: watchdog is active an will triger soon!
+ # NOTE: watchdog is active an will trigger soon!
# so we hope to get the lock back soon!
-
if ($self->{shutdown_request}) {
my $service_count = $self->active_service_count();
if ($service_count > 0) {
- $haenv->log('err', "get shutdown request in state 'lost_agent_lock' - " .
+ $haenv->log('err', "get shutdown request in state 'lost_agent_lock' - " .
"detected $service_count running services");
+ if ($self->{mode} eq 'restart') {
+ my $state_mt = $self->{status}->{state_change_time};
+
+ # watchdog should have already triggered, so either it's set
+ # set to noboot or it failed. As we are in restart mode, and
+ # have infinity stoptimeout -> exit now - we don't touch services
+ # or change state, so this is save, relatively speaking
+ if (($haenv->get_time() - $state_mt) > 90) {
+ $haenv->log('err', "lost agent lock and restart request for over 90 seconds - giving up!");
+ return 0;
+ }
+ }
} else {
+ # safety: all services are stopped, so we can close the watchdog
+ give_up_watchdog_protection($self);
- # all services are stopped, so we can close the watchdog
-
- if ($self->{ha_agent_wd}) {
- $haenv->watchdog_close($self->{ha_agent_wd});
- delete $self->{ha_agent_wd};
- }
-
return 0;
}
}
$haenv->sleep(5);
+ } elsif ($state eq 'maintenance') {
+
+ my $startime = $haenv->get_time();
+ return if !$self->update_service_status();
+
+ # wait until all active services moved away
+ my $service_count = $self->active_service_count();
+
+ my $exit_lrm = 0;
+
+ if ($self->{shutdown_request}) {
+ if ($service_count == 0 && $self->run_workers() == 0) {
+ # safety: going into maintenance and all active services got moved -> OK
+ give_up_watchdog_protection($self);
+
+ $exit_lrm = 1;
+
+ # restart with no or freezed services, release the lock
+ $haenv->release_ha_agent_lock();
+ }
+ }
+
+ $self->manage_resources() if !$exit_lrm;
+
+ $self->update_lrm_status();
+
+ return 0 if $exit_lrm;
+
+ $haenv->sleep_until($startime + 5);
+
} else {
die "got unexpected status '$state'\n";
while (($haenv->get_time() - $starttime) < 5) {
my $count = $self->check_active_workers();
- foreach my $sid (keys %{$self->{workers}}) {
+ foreach my $sid (sort keys %{$self->{workers}}) {
last if $count >= $max_workers && $max_workers > 0;
my $w = $self->{workers}->{$sid};
# do work
my $res = -1;
eval {
- $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{target});
+ $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{params});
};
if (my $err = $@) {
$haenv->log('err', $err);
POSIX::_exit(-1);
- }
- POSIX::_exit($res);
+ }
+ POSIX::_exit($res);
} else {
$count++;
$w->{pid} = $pid;
} else {
my $res = -1;
eval {
- $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{target});
+ $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{params});
$res = $res << 8 if $res > 0;
};
if (my $err = $@) {
my $ss = $self->{service_status};
+ foreach my $sid (keys %{$self->{restart_tries}}) {
+ delete $self->{restart_tries}->{$sid} if !$ss->{$sid};
+ }
+
foreach my $sid (keys %$ss) {
my $sd = $ss->{$sid};
next if !$sd->{node};
next if $sd->{node} ne $nodename;
my $req_state = $sd->{state};
next if !defined($req_state);
+ # can only happen for restricted groups where the failed node itself needs to be the
+ # reocvery target. Always let the master first do so, it will then marked as 'stopped' and
+ # we can just continue normally. But we must NOT do anything with it while still in recovery
+ next if $req_state eq 'recovery';
next if $req_state eq 'freeze';
- $self->queue_resource_command($sid, $sd->{uid}, $req_state, $sd->{target});
+
+ $self->queue_resource_command($sid, $sd->{uid}, $req_state, {
+ 'target' => $sd->{target},
+ 'timeout' => $sd->{timeout},
+ });
}
return $self->run_workers();
}
sub queue_resource_command {
- my ($self, $sid, $uid, $state, $target) = @_;
+ my ($self, $sid, $uid, $state, $params) = @_;
+
+ # do not queue the excatly same command twice as this may lead to
+ # an inconsistent HA state when the first command fails but the CRM
+ # does not process its failure right away and the LRM starts a second
+ # try, without the CRM knowing of it (race condition)
+ # The 'stopped' command is an exception as we do not process its result
+ # in the CRM and we want to execute it always (even with no active CRM)
+ return if $state ne 'stopped' && $uid && defined($self->{results}->{$uid});
if (my $w = $self->{workers}->{$sid}) {
return if $w->{pid}; # already started
state => $state,
};
- $self->{workers}->{$sid}->{target} = $target if $target;
+ $self->{workers}->{$sid}->{params} = $params if $params;
}
sub check_active_workers {
}
}
}
-
+
return $count;
}
my $exit_code = -1;
if ($status == -1) {
- $haenv->log('err', "resource agent $sid finished - failed to execute");
+ $haenv->log('err', "resource agent $sid finished - failed to execute");
} elsif (my $sig = ($status & 127)) {
$haenv->log('err', "resource agent $sid finished - got signal $sig");
} else {
$exit_code = $self->handle_service_exitcode($sid, $w->{state}, $exit_code);
+ return if $exit_code == ETRY_AGAIN; # tell nobody, simply retry
+
$self->{results}->{$uid} = {
sid => $w->{sid},
state => $w->{state},
$tries->{$sid} = 0 if !defined($tries->{$sid});
- $tries->{$sid}++;
if ($tries->{$sid} >= $max_restart) {
$haenv->log('err', "unable to start service $sid on local node".
" after $tries->{$sid} retries");
return ERROR;
}
+ $tries->{$sid}++;
+
+ $haenv->log('warning', "restart policy: retry number $tries->{$sid}" .
+ " for service '$sid'");
# tell CRM that we retry the start
return ETRY_AGAIN;
}
}
sub exec_resource_agent {
- my ($self, $sid, $service_config, $cmd, @params) = @_;
+ my ($self, $sid, $service_config, $cmd, $params) = @_;
# setup execution environment
my $nodename = $haenv->nodename();
- my (undef, $service_type, $service_name) = PVE::HA::Tools::parse_sid($sid);
+ my (undef, $service_type, $service_name) = $haenv->parse_sid($sid);
my $plugin = PVE::HA::Resources->lookup($service_type);
if (!$plugin) {
return EUNKNOWN_SERVICE;
}
+ # process error state early
+ if ($cmd eq 'error') {
+ $haenv->log('err', "service $sid is in an error state and needs manual " .
+ "intervention. Look up 'ERROR RECOVERY' in the documentation.");
+
+ return SUCCESS; # error always succeeds
+ }
+
if ($service_config->{node} ne $nodename) {
$haenv->log('err', "service '$sid' not on this node");
return EWRONG_NODE;
return SUCCESS if !$running;
- $haenv->log("info", "stopping service $sid");
+ if (defined($params->{timeout})) {
+ $haenv->log("info", "stopping service $sid (timeout=$params->{timeout})");
+ } else {
+ $haenv->log("info", "stopping service $sid");
+ }
- $plugin->shutdown($haenv, $id);
+ $plugin->shutdown($haenv, $id, $params->{timeout});
$running = $plugin->check_running($haenv, $id);
} elsif ($cmd eq 'migrate' || $cmd eq 'relocate') {
- my $target = $params[0];
+ my $target = $params->{target};
if (!defined($target)) {
die "$cmd '$sid' failed - missing target\n" if !defined($target);
return EINVALID_PARAMETER;
return SUCCESS;
- } elsif ($cmd eq 'error') {
-
- if ($running) {
- $haenv->log("err", "service $sid is in an error state while running");
- } else {
- $haenv->log("warning", "service $sid is not running and in an error state");
- }
- return SUCCESS; # error always succeeds
-
}
$haenv->log("err", "implement me (cmd '$cmd')");