};
# we sleep ~10s per 'active' round, so if no services is available for >= 10 min we'd go in wait
-# state givining up the watchdog and the LRM lock acquire voluntary, ensuring the WD can do no harm
+# state giving up the watchdog and the LRM lock voluntary, ensuring the WD can do no harm
my $max_active_idle_rounds = 60;
sub new {
my ($shutdown, $reboot) = $haenv->is_node_shutdown();
- my $dc_ha_cfg = $haenv->get_ha_settings();
- my $shutdown_policy = $dc_ha_cfg->{shutdown_policy} // 'conditional';
+ my $dc_cfg = $haenv->get_datacenter_settings();
+ my $shutdown_policy = $dc_cfg->{ha}->{shutdown_policy} // 'conditional';
if ($shutdown) { # don't log this on service restart, only on node shutdown
$haenv->log('info', "got shutdown request with shutdown policy '$shutdown_policy'");
return undef;
} else {
$self->{service_status} = $ms->{service_status} || {};
+ my $nodename = $haenv->nodename();
+ $self->{node_status} = $ms->{node_status}->{$nodename} || 'unknown';
return 1;
}
}
return 0;
}
+sub is_fence_requested {
+ my ($self) = @_;
+
+ my $haenv = $self->{haenv};
+
+ my $nodename = $haenv->nodename();
+ my $ss = $self->{service_status};
+
+ my $fenced_services = PVE::HA::Tools::count_fenced_services($ss, $nodename);
+
+ return $fenced_services || $self->{node_status} eq 'fence';
+}
+
+sub is_maintenance_requested {
+ my ($self) = @_;
+
+ # shutdown maintenance or manual request
+ return $self->{mode} eq 'maintenance';
+}
+
sub active_service_count {
my ($self) = @_;
next if $req_state eq 'freeze';
# erroneous services are not managed by HA, don't count them as active
next if $req_state eq 'error';
+ # request_start is for (optional) better node selection for stop -> started transition
+ next if $req_state eq 'request_start';
$count++;
}
$self->update_service_status();
- my $fence_request = PVE::HA::Tools::count_fenced_services($self->{service_status}, $haenv->nodename());
+ my $fence_request = $self->is_fence_requested();
# do state changes first
$self->set_local_status({ state => 'lost_agent_lock'});
} elsif (!$self->get_protected_ha_agent_lock()) {
$self->set_local_status({ state => 'lost_agent_lock'});
- } elsif ($self->{mode} eq 'maintenance') {
+ } elsif ($self->is_maintenance_requested()) {
$self->set_local_status({ state => 'maintenance'});
} else {
if (!$self->has_configured_service_on_local_node() && !$self->run_workers()) {
if ($fence_request) {
$haenv->log('err', "node need to be fenced during maintenance mode - releasing agent_lock\n");
$self->set_local_status({ state => 'lost_agent_lock'});
- } elsif (!$self->get_protected_ha_agent_lock()) {
- $self->set_local_status({ state => 'lost_agent_lock'});
+ } elsif ($self->active_service_count() || $self->run_workers()) {
+ # keep the lock and watchdog as long as not all services cleared the node
+ if (!$self->get_protected_ha_agent_lock()) {
+ $self->set_local_status({ state => 'lost_agent_lock'});
+ }
}
}
if ($self->{shutdown_request}) {
if ($self->{mode} eq 'restart') {
-
+ # catch exited workers to update service state
+ my $workers = $self->run_workers();
my $service_count = $self->active_service_count();
- if ($service_count == 0) {
- if ($self->run_workers() == 0) {
- # safety: no active services or workers -> OK
- give_up_watchdog_protection($self);
- $shutdown = 1;
+ if ($service_count == 0 && $workers == 0) {
+ # safety: no active services or workers -> OK
+ give_up_watchdog_protection($self);
+ $shutdown = 1;
- # restart with no or freezed services, release the lock
- $haenv->release_ha_agent_lock();
- }
+ # restart with no or freezed services, release the lock
+ $haenv->release_ha_agent_lock();
}
} else {
my $exit_lrm = 0;
- if ($self->{shutdown_request}) {
- if ($service_count == 0 && $self->run_workers() == 0) {
- # safety: going into maintenance and all active services got moved -> OK
- give_up_watchdog_protection($self);
+ if ($service_count == 0 && $self->run_workers() == 0) {
+ # safety: going into maintenance and all active services got moved -> OK
+ give_up_watchdog_protection($self);
+ if ($self->{shutdown_request}) {
$exit_lrm = 1;
-
- # restart with no or freezed services, release the lock
- $haenv->release_ha_agent_lock();
}
+
+ # maintenance mode with no or only frozen services anymore, release the lock
+ $haenv->release_ha_agent_lock();
}
$self->manage_resources() if !$exit_lrm;
# number of workers to start, if 0 we exec the command directly witouth forking
my $max_workers = $haenv->get_max_workers();
-
my $sc = $haenv->read_service_config();
- while (($haenv->get_time() - $starttime) < 5) {
- my $count = $self->check_active_workers();
-
- foreach my $sid (sort keys %{$self->{workers}}) {
- last if $count >= $max_workers && $max_workers > 0;
-
- my $w = $self->{workers}->{$sid};
- if (!$w->{pid}) {
- # only fork if we may else call exec_resource_agent
- # directly (e.g. for regression tests)
- if ($max_workers > 0) {
- my $pid = fork();
- if (!defined($pid)) {
- $haenv->log('err', "fork worker failed");
- $count = 0; last; # abort, try later
- } elsif ($pid == 0) {
- $haenv->after_fork(); # cleanup
-
- # do work
- my $res = -1;
- eval {
- $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{params});
- };
- if (my $err = $@) {
- $haenv->log('err', $err);
- POSIX::_exit(-1);
- }
- POSIX::_exit($res);
- } else {
- $count++;
- $w->{pid} = $pid;
- }
- } else {
+ my $worker = $self->{workers};
+ # we only got limited time but want to ensure that every queued worker is scheduled
+ # eventually, so sort by the count a worker was seen here in this loop
+ my $fair_sorter = sub {
+ $worker->{$b}->{start_tries} <=> $worker->{$a}->{start_tries} || $a cmp $b
+ };
+
+ while (($haenv->get_time() - $starttime) <= 8) {
+ my $count = $self->check_active_workers();
+
+ for my $sid (sort $fair_sorter grep { !$worker->{$_}->{pid} } keys %$worker) {
+ my $w = $worker->{$sid};
+ # higher try-count means higher priority especially compared to newly queued jobs, so
+ # count every try to avoid starvation
+ $w->{start_tries}++;
+ # FIXME: should be last and ensure that check_active_workers is called sooner
+ next if $count >= $max_workers && $max_workers > 0;
+
+ # only fork if we may, else call exec_resource_agent directly (e.g. for tests)
+ if ($max_workers > 0) {
+ my $pid = fork();
+ if (!defined($pid)) {
+ $haenv->log('err', "forking worker failed - $!");
+ $count = 0; last; # abort, try later
+ } elsif ($pid == 0) {
+ $haenv->after_fork(); # cleanup
+
+ # do work
my $res = -1;
eval {
$res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{params});
- $res = $res << 8 if $res > 0;
};
if (my $err = $@) {
$haenv->log('err', $err);
+ POSIX::_exit(-1);
}
- if (defined($w->{uid})) {
- $self->resource_command_finished($sid, $w->{uid}, $res);
- } else {
- $self->stop_command_finished($sid, $res);
- }
+ POSIX::_exit($res);
+ } else {
+ $count++;
+ $w->{pid} = $pid;
+ }
+ } else {
+ my $res = -1;
+ eval {
+ $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{params});
+ $res = $res << 8 if $res > 0;
+ };
+ if (my $err = $@) {
+ $haenv->log('err', $err);
+ }
+ if (defined($w->{uid})) {
+ $self->resource_command_finished($sid, $w->{uid}, $res);
+ } else {
+ $self->stop_command_finished($sid, $res);
}
}
}
foreach my $sid (keys %$ss) {
my $sd = $ss->{$sid};
- next if !$sd->{node};
- next if !$sd->{uid};
+ next if !$sd->{node} || !$sd->{uid};
next if $sd->{node} ne $nodename;
- my $req_state = $sd->{state};
- next if !defined($req_state);
+ my $request_state = $sd->{state};
+ next if !defined($request_state);
# can only happen for restricted groups where the failed node itself needs to be the
# reocvery target. Always let the master first do so, it will then marked as 'stopped' and
# we can just continue normally. But we must NOT do anything with it while still in recovery
- next if $req_state eq 'recovery';
- next if $req_state eq 'freeze';
+ next if $request_state eq 'recovery';
+ next if $request_state eq 'freeze';
+ # intermediate step for optional better node selection on stop -> start request state change
+ next if $request_state eq 'request_start';
- $self->queue_resource_command($sid, $sd->{uid}, $req_state, {
+ $self->queue_resource_command($sid, $sd->{uid}, $request_state, {
'target' => $sd->{target},
'timeout' => $sd->{timeout},
});
sub queue_resource_command {
my ($self, $sid, $uid, $state, $params) = @_;
- # do not queue the excatly same command twice as this may lead to
- # an inconsistent HA state when the first command fails but the CRM
- # does not process its failure right away and the LRM starts a second
- # try, without the CRM knowing of it (race condition)
- # The 'stopped' command is an exception as we do not process its result
- # in the CRM and we want to execute it always (even with no active CRM)
+ # do not queue the exact same command twice as this may lead to an inconsistent HA state when
+ # the first command fails but the CRM does not process its failure right away and the LRM starts
+ # a second try, without the CRM knowing of it (race condition) The 'stopped' command is an
+ # exception as we do not process its result in the CRM and we want to execute it always (even
+ # with no active CRM)
return if $state ne 'stopped' && $uid && defined($self->{results}->{$uid});
if (my $w = $self->{workers}->{$sid}) {
sid => $sid,
uid => $uid,
state => $state,
+ start_tries => 0,
};
$self->{workers}->{$sid}->{params} = $params if $params;
my $count = 0;
foreach my $sid (keys %{$self->{workers}}) {
my $w = $self->{workers}->{$sid};
- if (my $pid = $w->{pid}) {
- # check status
- my $waitpid = waitpid($pid, WNOHANG);
- if (defined($waitpid) && ($waitpid == $pid)) {
- if (defined($w->{uid})) {
- $self->resource_command_finished($sid, $w->{uid}, $?);
- } else {
- $self->stop_command_finished($sid, $?);
- }
+ my $pid = $w->{pid} || next;
+
+ my $waitpid = waitpid($pid, WNOHANG); # check status
+ if (defined($waitpid) && ($waitpid == $pid)) {
+ if (defined($w->{uid})) {
+ $self->resource_command_finished($sid, $w->{uid}, $?);
} else {
- $count++;
+ $self->stop_command_finished($sid, $?);
}
+ } else {
+ $count++; # still active
}
}
return ERROR;
}
- } elsif ($cmd eq 'migrate' || $cmd eq 'relocate') {
+ } elsif ($cmd eq 'migrate' || $cmd eq 'relocate' || $cmd eq 'request_start_balance') {
my $target = $params->{target};
if (!defined($target)) {