use warnings;
use Digest::MD5 qw(md5_base64);
-use Data::Dumper;
use PVE::Tools;
+use PVE::HA::Tools ':exit_codes';
use PVE::HA::NodeStatus;
-my $fence_delay = 60;
-
sub new {
my ($this, $haenv) = @_;
my $class = ref($this) || $this;
- my $ms = $haenv->read_manager_status();
+ my $self = bless { haenv => $haenv }, $class;
+
+ my $old_ms = $haenv->read_manager_status();
- $ms->{master_node} = $haenv->nodename();
+ # we only copy the state part of the manager which cannot be auto generated
- my $ns = PVE::HA::NodeStatus->new($haenv, $ms->{node_status} || {});
+ $self->{ns} = PVE::HA::NodeStatus->new($haenv, $old_ms->{node_status} || {});
# fixme: use separate class PVE::HA::ServiceStatus
- my $ss = $ms->{service_status} || {};
+ $self->{ss} = $old_ms->{service_status} || {};
- my $self = bless {
- haenv => $haenv,
- ms => $ms, # master status
- ns => $ns, # PVE::HA::NodeStatus
- ss => $ss, # service status
- }, $class;
+ $self->{ms} = { master_node => $haenv->nodename() };
return $self;
}
$haenv->write_manager_status($ms);
}
-sub select_service_node {
- my ($groups, $online_node_usage, $service_conf, $current_node, $try_next) = @_;
+sub get_service_group {
+ my ($groups, $online_node_usage, $service_conf) = @_;
- my $group = { 'nodes' => { $service_conf->{node} => 1 } }; # default group
+ my $group = {};
+ # add all online nodes to default group to allow try_next when no group set
+ foreach my $node (keys %$online_node_usage) {
+ $group->{nodes}->{$node} = 1;
+ }
+ # overwrite default if service is bound to a specific group
$group = $groups->{ids}->{$service_conf->{group}} if $service_conf->{group} &&
$groups->{ids}->{$service_conf->{group}};
+ return $group;
+}
+
+# groups available nodes with their priority as group index
+sub get_node_priority_groups {
+ my ($group, $online_node_usage) = @_;
+
my $pri_groups = {};
my $group_members = {};
foreach my $entry (keys %{$group->{nodes}}) {
$group_members->{$node} = $pri;
}
-
# add non-group members to unrestricted groups (priority -1)
if (!$group->{restricted}) {
my $pri = -1;
}
}
+ return ($pri_groups, $group_members);
+}
+
+sub select_service_node {
+ my ($groups, $online_node_usage, $service_conf, $current_node, $try_next, $tried_nodes) = @_;
+
+ my $group = get_service_group($groups, $online_node_usage, $service_conf);
+
+ my ($pri_groups, $group_members) = get_node_priority_groups($group, $online_node_usage);
my @pri_list = sort {$b <=> $a} keys %$pri_groups;
return undef if !scalar(@pri_list);
-
+
+ # stay on current node if possible (avoids random migrations)
if (!$try_next && $group->{nofailback} && defined($group_members->{$current_node})) {
return $current_node;
}
my $top_pri = $pri_list[0];
+ # try to avoid nodes where the service failed already if we want to relocate
+ if ($try_next) {
+ foreach my $node (@$tried_nodes) {
+ delete $pri_groups->{$top_pri}->{$node};
+ }
+ }
+
my @nodes = sort {
$online_node_usage->{$a} <=> $online_node_usage->{$b} || $a cmp $b
} keys %{$pri_groups->{$top_pri}};
my $old_state = $sd->{state};
my $old_node = $sd->{node};
+ my $old_failed_nodes = $sd->{failed_nodes};
die "no state change" if $old_state eq $new_state; # just to be sure
$sd->{state} = $new_state;
$sd->{node} = $old_node;
+ $sd->{failed_nodes} = $old_failed_nodes;
my $text_state = '';
foreach my $k (sort keys %params) {
$self->recompute_online_node_usage();
$sd->{uid} = compute_new_uuid($new_state);
-
- $text_state = " ($text_state)" if $text_state;
- $haenv->log('info', "service '$sid': state changed from '${old_state}' to '${new_state}' $text_state");
+ $text_state = " ($text_state)" if $text_state;
+ $haenv->log('info', "service '$sid': state changed from '${old_state}'" .
+ " to '${new_state}'$text_state");
+};
+
+# clean up a possible bad state from a recovered service to allow its start
+my $fence_recovery_cleanup = sub {
+ my ($self, $sid, $fenced_node) = @_;
+
+ my $haenv = $self->{haenv};
+
+ my (undef, $type, $id) = PVE::HA::Tools::parse_sid($sid);
+ my $plugin = PVE::HA::Resources->lookup($type);
+
+ # should not happen
+ die "unknown resource type '$type'" if !$plugin;
+
+ # locks may block recovery, cleanup those which are safe to remove after fencing
+ my $removable_locks = ['backup', 'mounted'];
+ if (my $removed_lock = $plugin->remove_locks($haenv, $id, $removable_locks, $fenced_node)) {
+ $haenv->log('warning', "removed leftover lock '$removed_lock' from recovered " .
+ "service '$sid' to allow its start.");
+ }
+};
+
+# after a node was fenced this recovers the service to a new node
+my $recover_fenced_service = sub {
+ my ($self, $sid, $cd) = @_;
+
+ my ($haenv, $ss) = ($self->{haenv}, $self->{ss});
+
+ my $sd = $ss->{$sid};
+
+ if ($sd->{state} ne 'fence') { # should not happen
+ $haenv->log('err', "cannot recover service '$sid' from fencing," .
+ " wrong state '$sd->{state}'");
+ return;
+ }
+
+ my $fenced_node = $sd->{node}; # for logging purpose
+
+ $self->recompute_online_node_usage(); # we want the most current node state
+
+ my $recovery_node = select_service_node($self->{groups},
+ $self->{online_node_usage},
+ $cd, $sd->{node});
+
+ if ($recovery_node) {
+ $haenv->log('info', "recover service '$sid' from fenced node " .
+ "'$fenced_node' to node '$recovery_node'");
+
+ &$fence_recovery_cleanup($self, $sid, $fenced_node);
+
+ $haenv->steal_service($sid, $sd->{node}, $recovery_node);
+
+ # $sd *is normally read-only*, fencing is the exception
+ $cd->{node} = $sd->{node} = $recovery_node;
+ my $new_state = ($cd->{state} eq 'started') ? 'started' : 'request_stop';
+ &$change_service_state($self, $sid, $new_state, node => $recovery_node);
+ } else {
+ # no possible node found, cannot recover
+ $haenv->log('err', "recovering service '$sid' from fenced node " .
+ "'$fenced_node' failed, no recovery node found");
+ &$change_service_state($self, $sid, 'error');
+ }
};
# read LRM status for all nodes
# add new service
foreach my $sid (sort keys %$sc) {
next if $ss->{$sid}; # already there
- $haenv->log('info', "adding new service '$sid' on node '$sc->{$sid}->{node}'");
+ my $cd = $sc->{$sid};
+ $haenv->log('info', "adding new service '$sid' on node '$cd->{node}'");
# assume we are running to avoid relocate running service at add
- $ss->{$sid} = { state => 'started', node => $sc->{$sid}->{node},
+ my $state = ($cd->{state} eq 'started') ? 'started' : 'request_stop';
+ $ss->{$sid} = { state => $state, node => $cd->{node},
uid => compute_new_uuid('started') };
}
foreach my $sid (keys %$ss) {
next if $sc->{$sid};
$haenv->log('info', "removing stale service '$sid' (no config)");
+ # remove all service related state information
delete $ss->{$sid};
}
-
+
$self->update_crm_commands();
for (;;) {
$self->recompute_online_node_usage();
- foreach my $sid (keys %$ss) {
+ foreach my $sid (sort keys %$ss) {
my $sd = $ss->{$sid};
my $cd = $sc->{$sid} || { state => 'disabled' };
my $lrm_mode = $sd->{node} ? $lrm_modes->{$sd->{node}} : undef;
# unfreeze
- &$change_service_state($self, $sid, 'started')
+ my $state = ($cd->{state} eq 'started') ? 'started' : 'request_stop';
+ &$change_service_state($self, $sid, $state)
if $lrm_mode && $lrm_mode eq 'active';
} elsif ($last_state eq 'error') {
die "unknown service state '$last_state'";
}
-
my $lrm_mode = $sd->{node} ? $lrm_modes->{$sd->{node}} : undef;
- $lrm_mode = 'unknown'if !$lrm_mode;
- if (($sd->{state} eq 'started' || $sd->{state} eq 'stopped' ||
- $sd->{state} eq 'request_stop') && ($lrm_mode ne 'active')) {
- &$change_service_state($self, $sid, 'freeze');
+ if ($lrm_mode && $lrm_mode eq 'restart') {
+ if (($sd->{state} eq 'started' || $sd->{state} eq 'stopped' ||
+ $sd->{state} eq 'request_stop')) {
+ &$change_service_state($self, $sid, 'freeze');
+ }
}
-
+
$repeat = 1 if $sd->{state} ne $last_state;
}
# handle fencing
my $fenced_nodes = {};
- foreach my $sid (keys %$ss) {
+ foreach my $sid (sort keys %$ss) {
my $sd = $ss->{$sid};
next if $sd->{state} ne 'fence';
next if !$fenced_nodes->{$sd->{node}};
- # node fence was successful - mark service as stopped
- &$change_service_state($self, $sid, 'stopped');
+ # node fence was successful - recover service
+ &$recover_fenced_service($self, $sid, $sc->{$sid});
}
last if !$repeat;
# check result from LRM daemon
if ($lrm_res) {
my $exit_code = $lrm_res->{exit_code};
- if ($exit_code == 0) {
+ if ($exit_code == SUCCESS) {
&$change_service_state($self, $sid, 'stopped');
return;
} else {
+ $haenv->log('err', "service '$sid' stop failed (exit code $exit_code)");
&$change_service_state($self, $sid, 'error'); # fixme: what state?
return;
}
}
- if ($ns->node_is_offline_delayed($sd->{node}, $fence_delay)) {
+ if ($ns->node_is_offline_delayed($sd->{node})) {
&$change_service_state($self, $sid, 'fence');
return;
}
# check result from LRM daemon
if ($lrm_res) {
my $exit_code = $lrm_res->{exit_code};
- if ($exit_code == 0) {
- &$change_service_state($self, $sid, 'started', node => $sd->{target});
+ my $req_state = $cd->{state} eq 'started' ? 'started' : 'request_stop';
+ if ($exit_code == SUCCESS) {
+ &$change_service_state($self, $sid, $req_state, node => $sd->{target});
return;
+ } elsif ($exit_code == EWRONG_NODE) {
+ $haenv->log('err', "service '$sid' - migration failed: service" .
+ " registered on wrong node!");
+ &$change_service_state($self, $sid, 'error');
} else {
$haenv->log('err', "service '$sid' - migration failed (exit code $exit_code)");
- &$change_service_state($self, $sid, 'started', node => $sd->{node});
+ &$change_service_state($self, $sid, $req_state, node => $sd->{node});
return;
}
}
- if ($ns->node_is_offline_delayed($sd->{node}, $fence_delay)) {
+ if ($ns->node_is_offline_delayed($sd->{node})) {
&$change_service_state($self, $sid, 'fence');
return;
}
if ($sd->{node} ne $cd->{node}) {
# this can happen if we fence a node with active migrations
# hack: modify $sd (normally this should be considered read-only)
- $haenv->log('info', "fixup service '$sid' location ($sd->{node} => $cd->{node}");
+ $haenv->log('info', "fixup service '$sid' location ($sd->{node} => $cd->{node})");
$sd->{node} = $cd->{node};
}
} elsif ($sd->{node} eq $target) {
$haenv->log('info', "ignore service '$sid' $cmd request - service already on node '$target'");
} else {
- eval {
- $haenv->change_service_location($sid, $sd->{node}, $target);
- $cd->{node} = $sd->{node} = $target; # fixme: $sd is read-only??!!
- $haenv->log('info', "$cmd service '$sid' to node '$target' (stopped)");
- };
- if (my $err = $@) {
- $haenv->log('err', "$cmd service '$sid' to node '$target' failed - $err");
- }
+ &$change_service_state($self, $sid, $cmd, node => $sd->{node},
+ target => $target);
+ return;
}
} else {
$haenv->log('err', "unknown command '$cmd' for service '$sid'");
}
- }
+ }
if ($cd->{state} eq 'disabled') {
- # do nothing
+ # NOTE: do nothing here, the stop state is an exception as we do not
+ # process the LRM result here, thus the LRM always tries to stop the
+ # service (protection for the case no CRM is active)
return;
- }
+ }
- if ($cd->{state} eq 'enabled') {
- if (my $node = select_service_node($self->{groups}, $self->{online_node_usage}, $cd, $sd->{node})) {
- if ($node && ($sd->{node} ne $node)) {
- eval {
- $haenv->change_service_location($sid, $sd->{node}, $node);
- $cd->{node} = $sd->{node} = $node; # fixme: $sd is read-only??!!
- };
- if (my $err = $@) {
- $haenv->log('err', "move service '$sid' to node '$node' failed - $err");
- } else {
- &$change_service_state($self, $sid, 'started', node => $node);
- }
- } else {
- &$change_service_state($self, $sid, 'started', node => $node);
- }
- } else {
- # fixme: warn
- }
+ if ($ns->node_is_offline_delayed($sd->{node})) {
+ &$change_service_state($self, $sid, 'fence');
+ return;
+ }
+ if ($cd->{state} eq 'stopped') {
+ # almost the same as 'disabled' state but the service will also get recovered
+ return;
+ }
+
+ if ($cd->{state} eq 'started') {
+ # simply mark it started, if it's on the wrong node
+ # next_state_started will fix that for us
+ &$change_service_state($self, $sid, 'started', node => $sd->{node});
return;
}
$haenv->log('err', "service '$sid' - unknown state '$cd->{state}' in service configuration");
}
+sub record_service_failed_on_node {
+ my ($self, $sid, $node) = @_;
+
+ if (!defined($self->{ss}->{$sid}->{failed_nodes})) {
+ $self->{ss}->{$sid}->{failed_nodes} = [];
+ }
+
+ push @{$self->{ss}->{$sid}->{failed_nodes}}, $node;
+}
+
sub next_state_started {
my ($self, $sid, $cd, $sd, $lrm_res) = @_;
my $ns = $self->{ns};
if (!$ns->node_is_online($sd->{node})) {
- if ($ns->node_is_offline_delayed($sd->{node}, $fence_delay)) {
+ if ($ns->node_is_offline_delayed($sd->{node})) {
&$change_service_state($self, $sid, 'fence');
}
return;
}
- if ($cd->{state} eq 'disabled') {
+ if ($cd->{state} eq 'disabled' || $cd->{state} eq 'stopped') {
&$change_service_state($self, $sid, 'request_stop');
return;
}
- if ($cd->{state} eq 'enabled') {
+ if ($cd->{state} eq 'started') {
if ($sd->{cmd}) {
my ($cmd, $target) = @{$sd->{cmd}};
} elsif ($sd->{node} eq $target) {
$haenv->log('info', "ignore service '$sid' $cmd request - service already on node '$target'");
} else {
- $haenv->log('info', "$cmd service '$sid' to node '$target' (running)");
+ $haenv->log('info', "$cmd service '$sid' to node '$target'");
&$change_service_state($self, $sid, $cmd, node => $sd->{node}, target => $target);
}
} else {
} else {
my $try_next = 0;
+
if ($lrm_res) {
- if ($lrm_res->{exit_code} == 1) {
- my $try = $master_status->{relocate_trial}->{$sid} || 0;
+ my $ec = $lrm_res->{exit_code};
+ if ($ec == SUCCESS) {
+
+ if (defined($sd->{failed_nodes})) {
+ $haenv->log('info', "relocation policy successful for '$sid' on node '$sd->{node}'," .
+ " failed nodes: " . join(', ', @{$sd->{failed_nodes}}) );
+ }
+
+ delete $sd->{failed_nodes};
+
+ # store flag to indicate successful start - only valid while state == 'started'
+ $sd->{running} = 1;
+
+ } elsif ($ec == ERROR) {
- if ($try < $cd->{max_relocate}) {
+ delete $sd->{running};
- $try++;
- $try_next = 1; # tell select_service_node to relocate
+ # apply our relocate policy if we got ERROR from the LRM
+ $self->record_service_failed_on_node($sid, $sd->{node});
+
+ if (scalar(@{$sd->{failed_nodes}}) <= $cd->{max_relocate}) {
+
+ # tell select_service_node to relocate if possible
+ $try_next = 1;
$haenv->log('warning', "starting service $sid on node".
" '$sd->{node}' failed, relocating service.");
- $master_status->{relocate_trial}->{$sid} = $try;
} else {
- $haenv->log('err', "recovery policy for service".
- " $sid failed, entering error state!");
+ $haenv->log('err', "recovery policy for service $sid " .
+ "failed, entering error state. Failed nodes: ".
+ join(', ', @{$sd->{failed_nodes}}));
&$change_service_state($self, $sid, 'error');
return;
}
- } elsif ($lrm_res->{exit_code} == 0) {
- $master_status->{relocate_trial}->{$sid} = 0;
+ } else {
+ $self->record_service_failed_on_node($sid, $sd->{node});
+
+ $haenv->log('err', "service '$sid' got unrecoverable error" .
+ " (exit code $ec))");
+ # we have no save way out (yet) for other errors
+ &$change_service_state($self, $sid, 'error');
+ return;
}
}
- my $node = select_service_node($self->{groups}, $self->{online_node_usage},
- $cd, $sd->{node}, $try_next);
+ my $node = select_service_node($self->{groups}, $self->{online_node_usage},
+ $cd, $sd->{node}, $try_next, $sd->{failed_nodes});
if ($node && ($sd->{node} ne $node)) {
- $haenv->log('info', "migrate service '$sid' to node '$node' (running)");
- &$change_service_state($self, $sid, 'migrate', node => $sd->{node}, target => $node);
+ if ($cd->{type} eq 'vm') {
+ $haenv->log('info', "migrate service '$sid' to node '$node' (running)");
+ &$change_service_state($self, $sid, 'migrate', node => $sd->{node}, target => $node);
+ } else {
+ $haenv->log('info', "relocate service '$sid' to node '$node'");
+ &$change_service_state($self, $sid, 'relocate', node => $sd->{node}, target => $node);
+ }
} else {
- # do nothing
+ if ($try_next && !defined($node)) {
+ $haenv->log('warning', "Start Error Recovery: Tried all available " .
+ " nodes for service '$sid', retry start on current node. " .
+ "Tried nodes: " . join(', ', @{$sd->{failed_nodes}}));
+ }
+ # ensure service get started again if it went unexpected down
+ # but ensure also no LRM result gets lost
+ $sd->{uid} = compute_new_uuid($sd->{state}) if defined($lrm_res);
}
}
return;
- }
+ }
$haenv->log('err', "service '$sid' - unknown state '$cd->{state}' in service configuration");
}
my ($self, $sid, $cd, $sd, $lrm_res) = @_;
my $ns = $self->{ns};
+ my $ms = $self->{ms};
if ($cd->{state} eq 'disabled') {
- &$change_service_state($self, $sid, 'stopped');
- return;
- }
+ # clean up on error recovery
+ delete $sd->{failed_nodes};
- if ($ns->node_is_offline_delayed($sd->{node}, $fence_delay)) {
- &$change_service_state($self, $sid, 'fence');
+ &$change_service_state($self, $sid, 'stopped');
return;
}