my ($shutdown, $reboot) = $haenv->is_node_shutdown();
- my $dc_ha_cfg = $haenv->get_ha_settings();
- my $shutdown_policy = $dc_ha_cfg->{shutdown_policy} // 'conditional';
+ my $dc_cfg = $haenv->get_datacenter_settings();
+ my $shutdown_policy = $dc_cfg->{ha}->{shutdown_policy} // 'conditional';
if ($shutdown) { # don't log this on service restart, only on node shutdown
$haenv->log('info', "got shutdown request with shutdown policy '$shutdown_policy'");
next if $req_state eq 'freeze';
# erroneous services are not managed by HA, don't count them as active
next if $req_state eq 'error';
+ # request_start is for (optional) better node selection for stop -> started transition
+ next if $req_state eq 'request_start';
$count++;
}
if ($self->{shutdown_request}) {
if ($self->{mode} eq 'restart') {
-
+ # catch exited workers to update service state
+ my $workers = $self->run_workers();
my $service_count = $self->active_service_count();
- if ($service_count == 0) {
- if ($self->run_workers() == 0) {
- # safety: no active services or workers -> OK
- give_up_watchdog_protection($self);
- $shutdown = 1;
+ if ($service_count == 0 && $workers == 0) {
+ # safety: no active services or workers -> OK
+ give_up_watchdog_protection($self);
+ $shutdown = 1;
- # restart with no or freezed services, release the lock
- $haenv->release_ha_agent_lock();
- }
+ # restart with no or freezed services, release the lock
+ $haenv->release_ha_agent_lock();
}
} else {
# number of workers to start, if 0 we exec the command directly witouth forking
my $max_workers = $haenv->get_max_workers();
-
my $sc = $haenv->read_service_config();
- while (($haenv->get_time() - $starttime) < 5) {
- my $count = $self->check_active_workers();
+ my $worker = $self->{workers};
+ # we only got limited time but want to ensure that every queued worker is scheduled
+ # eventually, so sort by the count a worker was seen here in this loop
+ my $fair_sorter = sub {
+ $worker->{$b}->{start_tries} <=> $worker->{$a}->{start_tries} || $a cmp $b
+ };
- foreach my $sid (sort keys %{$self->{workers}}) {
- last if $count >= $max_workers && $max_workers > 0;
+ while (($haenv->get_time() - $starttime) <= 8) {
+ my $count = $self->check_active_workers();
- my $w = $self->{workers}->{$sid};
- next if $w->{pid};
+ for my $sid (sort $fair_sorter grep { !$worker->{$_}->{pid} } keys %$worker) {
+ my $w = $worker->{$sid};
+ # higher try-count means higher priority especially compared to newly queued jobs, so
+ # count every try to avoid starvation
+ $w->{start_tries}++;
+ # FIXME: should be last and ensure that check_active_workers is called sooner
+ next if $count >= $max_workers && $max_workers > 0;
# only fork if we may, else call exec_resource_agent directly (e.g. for tests)
if ($max_workers > 0) {
foreach my $sid (keys %$ss) {
my $sd = $ss->{$sid};
- next if !$sd->{node};
- next if !$sd->{uid};
+ next if !$sd->{node} || !$sd->{uid};
next if $sd->{node} ne $nodename;
- my $req_state = $sd->{state};
- next if !defined($req_state);
+ my $request_state = $sd->{state};
+ next if !defined($request_state);
# can only happen for restricted groups where the failed node itself needs to be the
# reocvery target. Always let the master first do so, it will then marked as 'stopped' and
# we can just continue normally. But we must NOT do anything with it while still in recovery
- next if $req_state eq 'recovery';
- next if $req_state eq 'freeze';
+ next if $request_state eq 'recovery';
+ next if $request_state eq 'freeze';
+ # intermediate step for optional better node selection on stop -> start request state change
+ next if $request_state eq 'request_start';
- $self->queue_resource_command($sid, $sd->{uid}, $req_state, {
+ $self->queue_resource_command($sid, $sd->{uid}, $request_state, {
'target' => $sd->{target},
'timeout' => $sd->{timeout},
});
sub queue_resource_command {
my ($self, $sid, $uid, $state, $params) = @_;
- # do not queue the excatly same command twice as this may lead to
- # an inconsistent HA state when the first command fails but the CRM
- # does not process its failure right away and the LRM starts a second
- # try, without the CRM knowing of it (race condition)
- # The 'stopped' command is an exception as we do not process its result
- # in the CRM and we want to execute it always (even with no active CRM)
+ # do not queue the exact same command twice as this may lead to an inconsistent HA state when
+ # the first command fails but the CRM does not process its failure right away and the LRM starts
+ # a second try, without the CRM knowing of it (race condition) The 'stopped' command is an
+ # exception as we do not process its result in the CRM and we want to execute it always (even
+ # with no active CRM)
return if $state ne 'stopped' && $uid && defined($self->{results}->{$uid});
if (my $w = $self->{workers}->{$sid}) {
sid => $sid,
uid => $uid,
state => $state,
+ start_tries => 0,
};
$self->{workers}->{$sid}->{params} = $params if $params;
my $count = 0;
foreach my $sid (keys %{$self->{workers}}) {
my $w = $self->{workers}->{$sid};
- if (my $pid = $w->{pid}) {
- # check status
- my $waitpid = waitpid($pid, WNOHANG);
- if (defined($waitpid) && ($waitpid == $pid)) {
- if (defined($w->{uid})) {
- $self->resource_command_finished($sid, $w->{uid}, $?);
- } else {
- $self->stop_command_finished($sid, $?);
- }
+ my $pid = $w->{pid} || next;
+
+ my $waitpid = waitpid($pid, WNOHANG); # check status
+ if (defined($waitpid) && ($waitpid == $pid)) {
+ if (defined($w->{uid})) {
+ $self->resource_command_finished($sid, $w->{uid}, $?);
} else {
- $count++;
+ $self->stop_command_finished($sid, $?);
}
+ } else {
+ $count++; # still active
}
}
return ERROR;
}
- } elsif ($cmd eq 'migrate' || $cmd eq 'relocate') {
+ } elsif ($cmd eq 'migrate' || $cmd eq 'relocate' || $cmd eq 'request_start_balance') {
my $target = $params->{target};
if (!defined($target)) {