use PVE::HA::Tools ':exit_codes';
use PVE::HA::NodeStatus;
-my $fence_delay = 60;
-
sub new {
my ($this, $haenv) = @_;
my $class = ref($this) || $this;
- my $ms = $haenv->read_manager_status();
+ my $self = bless { haenv => $haenv }, $class;
+
+ my $old_ms = $haenv->read_manager_status();
- $ms->{master_node} = $haenv->nodename();
+ # we only copy the state part of the manager which cannot be auto generated
- my $ns = PVE::HA::NodeStatus->new($haenv, $ms->{node_status} || {});
+ $self->{ns} = PVE::HA::NodeStatus->new($haenv, $old_ms->{node_status} || {});
# fixme: use separate class PVE::HA::ServiceStatus
- my $ss = $ms->{service_status} || {};
+ $self->{ss} = $old_ms->{service_status} || {};
- my $self = bless {
- haenv => $haenv,
- ms => $ms, # master status
- ns => $ns, # PVE::HA::NodeStatus
- ss => $ss, # service status
- }, $class;
+ $self->{ms} = { master_node => $haenv->nodename() };
return $self;
}
$haenv->write_manager_status($ms);
}
-sub select_service_node {
- my ($groups, $online_node_usage, $service_conf, $current_node, $try_next) = @_;
+sub get_service_group {
+ my ($groups, $online_node_usage, $service_conf) = @_;
- my $group = { 'nodes' => { $service_conf->{node} => 1 } }; # default group
+ my $group = {};
+ # add all online nodes to default group to allow try_next when no group set
+ foreach my $node (keys %$online_node_usage) {
+ $group->{nodes}->{$node} = 1;
+ }
+ # overwrite default if service is bound to a specific group
$group = $groups->{ids}->{$service_conf->{group}} if $service_conf->{group} &&
$groups->{ids}->{$service_conf->{group}};
+ return $group;
+}
+
+# groups available nodes with their priority as group index
+sub get_node_priority_groups {
+ my ($group, $online_node_usage) = @_;
+
my $pri_groups = {};
my $group_members = {};
foreach my $entry (keys %{$group->{nodes}}) {
$group_members->{$node} = $pri;
}
-
# add non-group members to unrestricted groups (priority -1)
if (!$group->{restricted}) {
my $pri = -1;
}
}
+ return ($pri_groups, $group_members);
+}
+
+sub select_service_node {
+ my ($groups, $online_node_usage, $service_conf, $current_node, $try_next, $tried_nodes) = @_;
+
+ my $group = get_service_group($groups, $online_node_usage, $service_conf);
+
+ my ($pri_groups, $group_members) = get_node_priority_groups($group, $online_node_usage);
my @pri_list = sort {$b <=> $a} keys %$pri_groups;
return undef if !scalar(@pri_list);
-
+
+ # stay on current node if possible (avoids random migrations)
if (!$try_next && $group->{nofailback} && defined($group_members->{$current_node})) {
return $current_node;
}
my $top_pri = $pri_list[0];
+ # try to avoid nodes where the service failed already if we want to relocate
+ if ($try_next) {
+ foreach my $node (@$tried_nodes) {
+ delete $pri_groups->{$top_pri}->{$node};
+ }
+ }
+
my @nodes = sort {
$online_node_usage->{$a} <=> $online_node_usage->{$b} || $a cmp $b
} keys %{$pri_groups->{$top_pri}};
my $old_state = $sd->{state};
my $old_node = $sd->{node};
+ my $old_failed_nodes = $sd->{failed_nodes};
die "no state change" if $old_state eq $new_state; # just to be sure
$sd->{state} = $new_state;
$sd->{node} = $old_node;
+ $sd->{failed_nodes} = $old_failed_nodes;
my $text_state = '';
foreach my $k (sort keys %params) {
$self->recompute_online_node_usage();
$sd->{uid} = compute_new_uuid($new_state);
-
- $text_state = " ($text_state)" if $text_state;
- $haenv->log('info', "service '$sid': state changed from '${old_state}' to '${new_state}' $text_state");
+ $text_state = " ($text_state)" if $text_state;
+ $haenv->log('info', "service '$sid': state changed from '${old_state}'" .
+ " to '${new_state}'$text_state");
+};
+
+# clean up a possible bad state from a recovered service to allow its start
+my $fence_recovery_cleanup = sub {
+ my ($self, $sid, $fenced_node) = @_;
+
+ my $haenv = $self->{haenv};
+
+ my (undef, $type, $id) = PVE::HA::Tools::parse_sid($sid);
+ my $plugin = PVE::HA::Resources->lookup($type);
+
+ # should not happen
+ die "unknown resource type '$type'" if !$plugin;
+
+ # locks may block recovery, cleanup those which are safe to remove after fencing
+ my $removable_locks = ['backup', 'mounted'];
+ if (my $removed_lock = $plugin->remove_locks($haenv, $id, $removable_locks, $fenced_node)) {
+ $haenv->log('warning', "removed leftover lock '$removed_lock' from recovered " .
+ "service '$sid' to allow its start.");
+ }
};
# after a node was fenced this recovers the service to a new node
$haenv->log('info', "recover service '$sid' from fenced node " .
"'$fenced_node' to node '$recovery_node'");
+ &$fence_recovery_cleanup($self, $sid, $fenced_node);
+
$haenv->steal_service($sid, $sd->{node}, $recovery_node);
# $sd *is normally read-only*, fencing is the exception
foreach my $sid (keys %$ss) {
next if $sc->{$sid};
$haenv->log('info', "removing stale service '$sid' (no config)");
+ # remove all service related state information
delete $ss->{$sid};
}
-
+
$self->update_crm_commands();
for (;;) {
$self->recompute_online_node_usage();
- foreach my $sid (keys %$ss) {
+ foreach my $sid (sort keys %$ss) {
my $sd = $ss->{$sid};
my $cd = $sc->{$sid} || { state => 'disabled' };
# handle fencing
my $fenced_nodes = {};
- foreach my $sid (keys %$ss) {
+ foreach my $sid (sort keys %$ss) {
my $sd = $ss->{$sid};
next if $sd->{state} ne 'fence';
}
}
- if ($ns->node_is_offline_delayed($sd->{node}, $fence_delay)) {
+ if ($ns->node_is_offline_delayed($sd->{node})) {
&$change_service_state($self, $sid, 'fence');
return;
}
# check result from LRM daemon
if ($lrm_res) {
my $exit_code = $lrm_res->{exit_code};
+ my $req_state = $cd->{state} eq 'enabled' ? 'started' : 'request_stop';
if ($exit_code == SUCCESS) {
- &$change_service_state($self, $sid, 'started', node => $sd->{target});
+ &$change_service_state($self, $sid, $req_state, node => $sd->{target});
return;
+ } elsif ($exit_code == EWRONG_NODE) {
+ $haenv->log('err', "service '$sid' - migration failed: service" .
+ " registered on wrong node!");
+ &$change_service_state($self, $sid, 'error');
} else {
$haenv->log('err', "service '$sid' - migration failed (exit code $exit_code)");
- &$change_service_state($self, $sid, 'started', node => $sd->{node});
+ &$change_service_state($self, $sid, $req_state, node => $sd->{node});
return;
}
}
- if ($ns->node_is_offline_delayed($sd->{node}, $fence_delay)) {
+ if ($ns->node_is_offline_delayed($sd->{node})) {
&$change_service_state($self, $sid, 'fence');
return;
}
if ($sd->{node} ne $cd->{node}) {
# this can happen if we fence a node with active migrations
# hack: modify $sd (normally this should be considered read-only)
- $haenv->log('info', "fixup service '$sid' location ($sd->{node} => $cd->{node}");
+ $haenv->log('info', "fixup service '$sid' location ($sd->{node} => $cd->{node})");
$sd->{node} = $cd->{node};
}
} elsif ($sd->{node} eq $target) {
$haenv->log('info', "ignore service '$sid' $cmd request - service already on node '$target'");
} else {
- &$change_service_state($self, $sid, $cmd, node => $target);
+ &$change_service_state($self, $sid, $cmd, node => $sd->{node},
+ target => $target);
return;
}
} else {
$haenv->log('err', "unknown command '$cmd' for service '$sid'");
}
- }
+ }
if ($cd->{state} eq 'disabled') {
- # do nothing
+ # NOTE: do nothing here, the stop state is an exception as we do not
+ # process the LRM result here, thus the LRM always tries to stop the
+ # service (protection for the case no CRM is active)
return;
- }
+ }
if ($cd->{state} eq 'enabled') {
# simply mark it started, if it's on the wrong node
$haenv->log('err', "service '$sid' - unknown state '$cd->{state}' in service configuration");
}
+sub record_service_failed_on_node {
+ my ($self, $sid, $node) = @_;
+
+ if (!defined($self->{ss}->{$sid}->{failed_nodes})) {
+ $self->{ss}->{$sid}->{failed_nodes} = [];
+ }
+
+ push @{$self->{ss}->{$sid}->{failed_nodes}}, $node;
+}
+
sub next_state_started {
my ($self, $sid, $cd, $sd, $lrm_res) = @_;
my $ns = $self->{ns};
if (!$ns->node_is_online($sd->{node})) {
- if ($ns->node_is_offline_delayed($sd->{node}, $fence_delay)) {
+ if ($ns->node_is_offline_delayed($sd->{node})) {
&$change_service_state($self, $sid, 'fence');
}
return;
} else {
my $try_next = 0;
+
if ($lrm_res) {
+
my $ec = $lrm_res->{exit_code};
if ($ec == SUCCESS) {
- $master_status->{relocate_trial}->{$sid} = 0;
-
- } elsif ($ec == ETRY_AGAIN) {
+ if (defined($sd->{failed_nodes})) {
+ $haenv->log('info', "relocation policy successful for '$sid' on node '$sd->{node}'," .
+ " failed nodes: " . join(', ', @{$sd->{failed_nodes}}) );
+ }
- # do nothing, the LRM wants to try again
+ delete $sd->{failed_nodes};
} elsif ($ec == ERROR) {
# apply our relocate policy if we got ERROR from the LRM
+ $self->record_service_failed_on_node($sid, $sd->{node});
- my $try = $master_status->{relocate_trial}->{$sid} || 0;
-
- if ($try < $cd->{max_relocate}) {
+ if (scalar(@{$sd->{failed_nodes}}) <= $cd->{max_relocate}) {
- $try++;
# tell select_service_node to relocate if possible
$try_next = 1;
$haenv->log('warning', "starting service $sid on node".
" '$sd->{node}' failed, relocating service.");
- $master_status->{relocate_trial}->{$sid} = $try;
} else {
- $haenv->log('err', "recovery policy for service".
- " $sid failed, entering error state!");
+ $haenv->log('err', "recovery policy for service $sid " .
+ "failed, entering error state. Failed nodes: ".
+ join(', ', @{$sd->{failed_nodes}}));
&$change_service_state($self, $sid, 'error');
return;
}
} else {
+ $self->record_service_failed_on_node($sid, $sd->{node});
+
$haenv->log('err', "service '$sid' got unrecoverable error" .
" (exit code $ec))");
# we have no save way out (yet) for other errors
&$change_service_state($self, $sid, 'error');
+ return;
}
}
- my $node = select_service_node($self->{groups}, $self->{online_node_usage},
- $cd, $sd->{node}, $try_next);
+ my $node = select_service_node($self->{groups}, $self->{online_node_usage},
+ $cd, $sd->{node}, $try_next, $sd->{failed_nodes});
if ($node && ($sd->{node} ne $node)) {
if ($cd->{type} eq 'vm') {
&$change_service_state($self, $sid, 'relocate', node => $sd->{node}, target => $node);
}
} else {
- # do nothing
+ if ($try_next && !defined($node)) {
+ $haenv->log('warning', "Start Error Recovery: Tried all available " .
+ " nodes for service '$sid', retry start on current node. " .
+ "Tried nodes: " . join(', ', @{$sd->{failed_nodes}}));
+ }
+ # ensure service get started again if it went unexpected down
+ # but ensure also no LRM result gets lost
+ $sd->{uid} = compute_new_uuid($sd->{state}) if defined($lrm_res);
}
}
return;
- }
+ }
$haenv->log('err', "service '$sid' - unknown state '$cd->{state}' in service configuration");
}
my ($self, $sid, $cd, $sd, $lrm_res) = @_;
my $ns = $self->{ns};
+ my $ms = $self->{ms};
if ($cd->{state} eq 'disabled') {
- &$change_service_state($self, $sid, 'stopped');
- return;
- }
+ # clean up on error recovery
+ delete $sd->{failed_nodes};
- if ($ns->node_is_offline_delayed($sd->{node}, $fence_delay)) {
- &$change_service_state($self, $sid, 'fence');
+ &$change_service_state($self, $sid, 'stopped');
return;
}