lost_agent_lock => "lost agent_lock",
};
+# we sleep ~10s per 'active' round, so if no services is available for >= 10 min we'd go in wait
+# state giving up the watchdog and the LRM lock voluntary, ensuring the WD can do no harm
+my $max_active_idle_rounds = 60;
+
sub new {
my ($this, $haenv) = @_;
# mode can be: active, reboot, shutdown, restart
mode => 'active',
cluster_state_update => 0,
+ active_idle_rounds => 0,
}, $class;
$self->set_local_status({ state => 'wait_for_agent_lock' });
eval { $self->update_lrm_status() or die "not quorate?\n"; };
if (my $err = $@) {
- $self->log('err', "unable to update lrm status file - $err");
+ $haenv->log('err', "unable to update lrm status file - $err");
}
}
return undef;
} else {
$self->{service_status} = $ms->{service_status} || {};
+ my $nodename = $haenv->nodename();
+ $self->{node_status} = $ms->{node_status}->{$nodename} || 'unknown';
return 1;
}
}
return 0;
}
-sub active_service_count {
+# only cares if any service has the local node as their node, independent of which req.state it is
+sub has_configured_service_on_local_node {
+ my ($self) = @_;
+
+ my $haenv = $self->{haenv};
+ my $nodename = $haenv->nodename();
+
+ my $ss = $self->{service_status};
+ foreach my $sid (keys %$ss) {
+ my $sd = $ss->{$sid};
+ next if !$sd->{node} || $sd->{node} ne $nodename;
+
+ return 1;
+ }
+ return 0;
+}
+
+sub is_fence_requested {
my ($self) = @_;
my $haenv = $self->{haenv};
+ my $nodename = $haenv->nodename();
+ my $ss = $self->{service_status};
+
+ my $fenced_services = PVE::HA::Tools::count_fenced_services($ss, $nodename);
+
+ return $fenced_services || $self->{node_status} eq 'fence';
+}
+
+sub active_service_count {
+ my ($self) = @_;
+
+ my $haenv = $self->{haenv};
my $nodename = $haenv->nodename();
my $ss = $self->{service_status};
my $count = 0;
-
foreach my $sid (keys %$ss) {
my $sd = $ss->{$sid};
next if !$sd->{node};
my $req_state = $sd->{state};
next if !defined($req_state);
next if $req_state eq 'stopped';
+ # NOTE: 'ignored' ones are already dropped by the manager from service_status
next if $req_state eq 'freeze';
# erroneous services are not managed by HA, don't count them as active
next if $req_state eq 'error';
return $res;
}
+# NOTE: this is disabling the self-fence mechanism, so it must NOT be called with active services
+# It's normally *only* OK on graceful shutdown (with no services, or all services frozen)
+my sub give_up_watchdog_protection {
+ my ($self) = @_;
+
+ if ($self->{ha_agent_wd}) {
+ $self->{haenv}->watchdog_close($self->{ha_agent_wd});
+ delete $self->{ha_agent_wd}; # only delete after close!
+ }
+}
+
sub work {
my ($self) = @_;
$self->update_service_status();
- my $fence_request = PVE::HA::Tools::count_fenced_services($self->{service_status}, $haenv->nodename());
+ my $fence_request = $self->is_fence_requested();
# do state changes first
$self->set_local_status({ state => 'lost_agent_lock'});
} elsif ($self->{mode} eq 'maintenance') {
$self->set_local_status({ state => 'maintenance'});
+ } else {
+ if (!$self->has_configured_service_on_local_node() && !$self->run_workers()) {
+ # no active service configured for this node and all (old) workers are done
+ $self->{active_idle_rounds}++;
+ if ($self->{active_idle_rounds} > $max_active_idle_rounds) {
+ $haenv->log('info', "node had no service configured for $max_active_idle_rounds rounds, going idle.\n");
+ # safety: no active service & no running worker for quite some time -> OK
+ $haenv->release_ha_agent_lock();
+ give_up_watchdog_protection($self);
+ $self->set_local_status({ state => 'wait_for_agent_lock'});
+ $self->{active_idle_rounds} = 0;
+ }
+ } elsif ($self->{active_idle_rounds}) {
+ $self->{active_idle_rounds} = 0;
+ }
}
} elsif ($state eq 'maintenance') {
my $service_count = $self->active_service_count();
if ($service_count == 0) {
-
if ($self->run_workers() == 0) {
- if ($self->{ha_agent_wd}) {
- $haenv->watchdog_close($self->{ha_agent_wd});
- delete $self->{ha_agent_wd};
- }
-
+ # safety: no active services or workers -> OK
+ give_up_watchdog_protection($self);
$shutdown = 1;
# restart with no or freezed services, release the lock
if ($self->run_workers() == 0) {
if ($self->{shutdown_errors} == 0) {
- if ($self->{ha_agent_wd}) {
- $haenv->watchdog_close($self->{ha_agent_wd});
- delete $self->{ha_agent_wd};
- }
+ # safety: no active services and LRM shutdown -> OK
+ give_up_watchdog_protection($self);
# shutdown with all services stopped thus release the lock
$haenv->release_ha_agent_lock();
} elsif ($state eq 'lost_agent_lock') {
- # Note: watchdog is active an will triger soon!
-
+ # NOTE: watchdog is active an will trigger soon!
# so we hope to get the lock back soon!
-
if ($self->{shutdown_request}) {
my $service_count = $self->active_service_count();
}
}
} else {
-
- # all services are stopped, so we can close the watchdog
-
- if ($self->{ha_agent_wd}) {
- $haenv->watchdog_close($self->{ha_agent_wd});
- delete $self->{ha_agent_wd};
- }
+ # safety: all services are stopped, so we can close the watchdog
+ give_up_watchdog_protection($self);
return 0;
}
if ($self->{shutdown_request}) {
if ($service_count == 0 && $self->run_workers() == 0) {
- if ($self->{ha_agent_wd}) {
- $haenv->watchdog_close($self->{ha_agent_wd});
- delete $self->{ha_agent_wd};
- }
+ # safety: going into maintenance and all active services got moved -> OK
+ give_up_watchdog_protection($self);
$exit_lrm = 1;
my $sc = $haenv->read_service_config();
while (($haenv->get_time() - $starttime) < 5) {
- my $count = $self->check_active_workers();
+ my $count = $self->check_active_workers();
foreach my $sid (sort keys %{$self->{workers}}) {
last if $count >= $max_workers && $max_workers > 0;
my $w = $self->{workers}->{$sid};
- if (!$w->{pid}) {
- # only fork if we may else call exec_resource_agent
- # directly (e.g. for regression tests)
- if ($max_workers > 0) {
- my $pid = fork();
- if (!defined($pid)) {
- $haenv->log('err', "fork worker failed");
- $count = 0; last; # abort, try later
- } elsif ($pid == 0) {
- $haenv->after_fork(); # cleanup
-
- # do work
- my $res = -1;
- eval {
- $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{params});
- };
- if (my $err = $@) {
- $haenv->log('err', $err);
- POSIX::_exit(-1);
- }
- POSIX::_exit($res);
- } else {
- $count++;
- $w->{pid} = $pid;
- }
- } else {
+ next if $w->{pid};
+
+ # only fork if we may, else call exec_resource_agent directly (e.g. for tests)
+ if ($max_workers > 0) {
+ my $pid = fork();
+ if (!defined($pid)) {
+ $haenv->log('err', "forking worker failed - $!");
+ $count = 0; last; # abort, try later
+ } elsif ($pid == 0) {
+ $haenv->after_fork(); # cleanup
+
+ # do work
my $res = -1;
eval {
$res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{params});
- $res = $res << 8 if $res > 0;
};
if (my $err = $@) {
$haenv->log('err', $err);
+ POSIX::_exit(-1);
}
- if (defined($w->{uid})) {
- $self->resource_command_finished($sid, $w->{uid}, $res);
- } else {
- $self->stop_command_finished($sid, $res);
- }
+ POSIX::_exit($res);
+ } else {
+ $count++;
+ $w->{pid} = $pid;
+ }
+ } else {
+ my $res = -1;
+ eval {
+ $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{params});
+ $res = $res << 8 if $res > 0;
+ };
+ if (my $err = $@) {
+ $haenv->log('err', $err);
+ }
+ if (defined($w->{uid})) {
+ $self->resource_command_finished($sid, $w->{uid}, $res);
+ } else {
+ $self->stop_command_finished($sid, $res);
}
}
}
foreach my $sid (keys %$ss) {
my $sd = $ss->{$sid};
- next if !$sd->{node};
- next if !$sd->{uid};
+ next if !$sd->{node} || !$sd->{uid};
next if $sd->{node} ne $nodename;
- my $req_state = $sd->{state};
- next if !defined($req_state);
- next if $req_state eq 'freeze';
- $self->queue_resource_command($sid, $sd->{uid}, $req_state, {'target' => $sd->{target}, 'timeout' => $sd->{timeout}});
+ my $request_state = $sd->{state};
+ next if !defined($request_state);
+ # can only happen for restricted groups where the failed node itself needs to be the
+ # reocvery target. Always let the master first do so, it will then marked as 'stopped' and
+ # we can just continue normally. But we must NOT do anything with it while still in recovery
+ next if $request_state eq 'recovery';
+ next if $request_state eq 'freeze';
+
+ $self->queue_resource_command($sid, $sd->{uid}, $request_state, {
+ 'target' => $sd->{target},
+ 'timeout' => $sd->{timeout},
+ });
}
return $self->run_workers();
sub queue_resource_command {
my ($self, $sid, $uid, $state, $params) = @_;
- # do not queue the excatly same command twice as this may lead to
- # an inconsistent HA state when the first command fails but the CRM
- # does not process its failure right away and the LRM starts a second
- # try, without the CRM knowing of it (race condition)
- # The 'stopped' command is an exception as we do not process its result
- # in the CRM and we want to execute it always (even with no active CRM)
+ # do not queue the exact same command twice as this may lead to an inconsistent HA state when
+ # the first command fails but the CRM does not process its failure right away and the LRM starts
+ # a second try, without the CRM knowing of it (race condition) The 'stopped' command is an
+ # exception as we do not process its result in the CRM and we want to execute it always (even
+ # with no active CRM)
return if $state ne 'stopped' && $uid && defined($self->{results}->{$uid});
if (my $w = $self->{workers}->{$sid}) {
my $count = 0;
foreach my $sid (keys %{$self->{workers}}) {
my $w = $self->{workers}->{$sid};
- if (my $pid = $w->{pid}) {
- # check status
- my $waitpid = waitpid($pid, WNOHANG);
- if (defined($waitpid) && ($waitpid == $pid)) {
- if (defined($w->{uid})) {
- $self->resource_command_finished($sid, $w->{uid}, $?);
- } else {
- $self->stop_command_finished($sid, $?);
- }
+ my $pid = $w->{pid} || next;
+
+ my $waitpid = waitpid($pid, WNOHANG); # check status
+ if (defined($waitpid) && ($waitpid == $pid)) {
+ if (defined($w->{uid})) {
+ $self->resource_command_finished($sid, $w->{uid}, $?);
} else {
- $count++;
+ $self->stop_command_finished($sid, $?);
}
+ } else {
+ $count++; # still active
}
}
# process error state early
if ($cmd eq 'error') {
-
$haenv->log('err', "service $sid is in an error state and needs manual " .
"intervention. Look up 'ERROR RECOVERY' in the documentation.");