X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=src%2FPVE%2FHA%2FManager.pm;h=9e46f19d6ff55ef1c2721bd5a8790a1a450c528d;hb=2167dd1e6093362ccfbd3ad55c4c4bd0dcebc73d;hp=a482ef23b6eca257f200733ddac7d44965ba32e0;hpb=24678a5917602e7f3f484d404231328802ba08d4;p=pve-ha-manager.git diff --git a/src/PVE/HA/Manager.pm b/src/PVE/HA/Manager.pm index a482ef2..9e46f19 100644 --- a/src/PVE/HA/Manager.pm +++ b/src/PVE/HA/Manager.pm @@ -4,33 +4,27 @@ use strict; use warnings; use Digest::MD5 qw(md5_base64); -use Data::Dumper; use PVE::Tools; use PVE::HA::Tools ':exit_codes'; use PVE::HA::NodeStatus; -my $fence_delay = 60; - sub new { my ($this, $haenv) = @_; my $class = ref($this) || $this; - my $ms = $haenv->read_manager_status(); + my $self = bless { haenv => $haenv }, $class; + + my $old_ms = $haenv->read_manager_status(); - $ms->{master_node} = $haenv->nodename(); + # we only copy the state part of the manager which cannot be auto generated - my $ns = PVE::HA::NodeStatus->new($haenv, $ms->{node_status} || {}); + $self->{ns} = PVE::HA::NodeStatus->new($haenv, $old_ms->{node_status} || {}); # fixme: use separate class PVE::HA::ServiceStatus - my $ss = $ms->{service_status} || {}; + $self->{ss} = $old_ms->{service_status} || {}; - my $self = bless { - haenv => $haenv, - ms => $ms, # master status - ns => $ns, # PVE::HA::NodeStatus - ss => $ss, # service status - }, $class; + $self->{ms} = { master_node => $haenv->nodename() }; return $self; } @@ -49,18 +43,30 @@ sub flush_master_status { $ms->{node_status} = $ns->{status}; $ms->{service_status} = $ss; $ms->{timestamp} = $haenv->get_time(); - + $haenv->write_manager_status($ms); -} +} -sub select_service_node { - my ($groups, $online_node_usage, $service_conf, $current_node, $try_next) = @_; +sub get_service_group { + my ($groups, $online_node_usage, $service_conf) = @_; - my $group = { 'nodes' => { $service_conf->{node} => 1 } }; # default group + my $group = {}; + # add all online nodes to default group to allow try_next when no group set + foreach my $node (keys %$online_node_usage) { + $group->{nodes}->{$node} = 1; + } - $group = $groups->{ids}->{$service_conf->{group}} if $service_conf->{group} && + # overwrite default if service is bound to a specific group + $group = $groups->{ids}->{$service_conf->{group}} if $service_conf->{group} && $groups->{ids}->{$service_conf->{group}}; + return $group; +} + +# groups available nodes with their priority as group index +sub get_node_priority_groups { + my ($group, $online_node_usage) = @_; + my $pri_groups = {}; my $group_members = {}; foreach my $entry (keys %{$group->{nodes}}) { @@ -73,7 +79,6 @@ sub select_service_node { $group_members->{$node} = $pri; } - # add non-group members to unrestricted groups (priority -1) if (!$group->{restricted}) { my $pri = -1; @@ -84,10 +89,20 @@ sub select_service_node { } } + return ($pri_groups, $group_members); +} + +sub select_service_node { + my ($groups, $online_node_usage, $service_conf, $current_node, $try_next, $tried_nodes, $maintenance_fallback) = @_; + + my $group = get_service_group($groups, $online_node_usage, $service_conf); + + my ($pri_groups, $group_members) = get_node_priority_groups($group, $online_node_usage); my @pri_list = sort {$b <=> $a} keys %$pri_groups; return undef if !scalar(@pri_list); - + + # stay on current node if possible (avoids random migrations) if (!$try_next && $group->{nofailback} && defined($group_members->{$current_node})) { return $current_node; } @@ -96,33 +111,43 @@ sub select_service_node { my $top_pri = $pri_list[0]; - my @nodes = sort { + # try to avoid nodes where the service failed already if we want to relocate + if ($try_next) { + foreach my $node (@$tried_nodes) { + delete $pri_groups->{$top_pri}->{$node}; + } + } + + my @nodes = sort { $online_node_usage->{$a} <=> $online_node_usage->{$b} || $a cmp $b } keys %{$pri_groups->{$top_pri}}; my $found; + my $found_maintenace_fallback; for (my $i = scalar(@nodes) - 1; $i >= 0; $i--) { my $node = $nodes[$i]; if ($node eq $current_node) { $found = $i; - last; + } + if (defined($maintenance_fallback) && $node eq $maintenance_fallback) { + $found_maintenace_fallback = $i; } } - if ($try_next) { + if (defined($found_maintenace_fallback)) { + return $nodes[$found_maintenace_fallback]; + } + if ($try_next) { if (defined($found) && ($found < (scalar(@nodes) - 1))) { return $nodes[$found + 1]; } else { return $nodes[0]; } - + } elsif (defined($found)) { + return $nodes[$found]; } else { - - return $nodes[$found] if defined($found); - return $nodes[0]; - } } @@ -130,7 +155,7 @@ my $uid_counter = 0; sub compute_new_uuid { my ($state) = @_; - + $uid_counter++; return md5_base64($state . $$ . time() . $uid_counter); } @@ -161,10 +186,12 @@ sub recompute_online_node_usage { my $sd = $self->{ss}->{$sid}; my $state = $sd->{state}; if (defined($online_node_usage->{$sd->{node}})) { - if (($state eq 'started') || ($state eq 'request_stop') || + if (($state eq 'started') || ($state eq 'request_stop') || ($state eq 'fence') || ($state eq 'freeze') || ($state eq 'error')) { $online_node_usage->{$sd->{node}}++; } elsif (($state eq 'migrate') || ($state eq 'relocate')) { + # count it for both, source and target as load is put on both + $online_node_usage->{$sd->{node}}++; $online_node_usage->{$sd->{target}}++; } elsif ($state eq 'stopped') { # do nothing @@ -186,6 +213,8 @@ my $change_service_state = sub { my $old_state = $sd->{state}; my $old_node = $sd->{node}; + my $old_failed_nodes = $sd->{failed_nodes}; + my $old_maintenance_node = $sd->{maintenance_node}; die "no state change" if $old_state eq $new_state; # just to be sure @@ -195,6 +224,8 @@ my $change_service_state = sub { $sd->{state} = $new_state; $sd->{node} = $old_node; + $sd->{failed_nodes} = $old_failed_nodes if defined($old_failed_nodes); + $sd->{maintenance_node} = $old_maintenance_node if defined($old_maintenance_node); my $text_state = ''; foreach my $k (sort keys %params) { @@ -213,6 +244,27 @@ my $change_service_state = sub { " to '${new_state}'$text_state"); }; +# clean up a possible bad state from a recovered service to allow its start +my $fence_recovery_cleanup = sub { + my ($self, $sid, $fenced_node) = @_; + + my $haenv = $self->{haenv}; + + my (undef, $type, $id) = $haenv->parse_sid($sid); + my $plugin = PVE::HA::Resources->lookup($type); + + # should not happen + die "unknown resource type '$type'" if !$plugin; + + # locks may block recovery, cleanup those which are safe to remove after fencing, + # i.e., after the original node was reset and thus all it's state + my $removable_locks = ['backup', 'mounted', 'migrate', 'clone', 'rollback', 'snapshot', 'snapshot-delete', 'suspending', 'suspended']; + if (my $removed_lock = $plugin->remove_locks($haenv, $id, $removable_locks, $fenced_node)) { + $haenv->log('warning', "removed leftover lock '$removed_lock' from recovered " . + "service '$sid' to allow its start."); + } +}; + # after a node was fenced this recovers the service to a new node my $recover_fenced_service = sub { my ($self, $sid, $cd) = @_; @@ -239,19 +291,24 @@ my $recover_fenced_service = sub { $haenv->log('info', "recover service '$sid' from fenced node " . "'$fenced_node' to node '$recovery_node'"); + &$fence_recovery_cleanup($self, $sid, $fenced_node); + $haenv->steal_service($sid, $sd->{node}, $recovery_node); + $self->{online_node_usage}->{$recovery_node}++; # $sd *is normally read-only*, fencing is the exception $cd->{node} = $sd->{node} = $recovery_node; - &$change_service_state($self, $sid, 'started', node => $recovery_node); + my $new_state = ($cd->{state} eq 'started') ? 'started' : 'request_stop'; + &$change_service_state($self, $sid, $new_state, node => $recovery_node); } else { - # no node found, let the service in 'fence' state and try again + # no possible node found, cannot recover $haenv->log('err', "recovering service '$sid' from fenced node " . "'$fenced_node' failed, no recovery node found"); + &$change_service_state($self, $sid, 'error'); } }; -# read LRM status for all nodes +# read LRM status for all nodes sub read_lrm_status { my ($self) = @_; @@ -269,7 +326,7 @@ sub read_lrm_status { } } - + return ($results, $modes); } @@ -280,27 +337,35 @@ sub update_crm_commands { my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss}); my $cmdlist = $haenv->read_crm_commands(); - + foreach my $cmd (split(/\n/, $cmdlist)) { chomp $cmd; if ($cmd =~ m/^(migrate|relocate)\s+(\S+)\s+(\S+)$/) { - my ($task, $sid, $node) = ($1, $2, $3); + my ($task, $sid, $node) = ($1, $2, $3); if (my $sd = $ss->{$sid}) { if (!$ns->node_is_online($node)) { $haenv->log('err', "crm command error - node not online: $cmd"); } else { if ($node eq $sd->{node}) { $haenv->log('info', "ignore crm command - service already on target node: $cmd"); - } else { + } else { $haenv->log('info', "got crm command: $cmd"); - $ss->{$sid}->{cmd} = [ $task, $node]; + $ss->{$sid}->{cmd} = [ $task, $node ]; } } } else { $haenv->log('err', "crm command error - no such service: $cmd"); } + } elsif ($cmd =~ m/^stop\s+(\S+)\s+(\S+)$/) { + my ($sid, $timeout) = ($1, $2); + if (my $sd = $ss->{$sid}) { + $haenv->log('info', "got crm command: $cmd"); + $ss->{$sid}->{cmd} = [ 'stop', $timeout ]; + } else { + $haenv->log('err', "crm command error - no such service: $cmd"); + } } else { $haenv->log('err', "unable to parse crm command: $cmd"); } @@ -313,15 +378,16 @@ sub manage { my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss}); - $ns->update($haenv->get_node_info()); + my ($node_info) = $haenv->get_node_info(); + my ($lrm_results, $lrm_modes) = $self->read_lrm_status(); + + $ns->update($node_info, $lrm_modes); - if (!$ns->node_is_online($haenv->nodename())) { + if (!$ns->node_is_operational($haenv->nodename())) { $haenv->log('info', "master seems offline"); return; } - my ($lrm_results, $lrm_modes) = $self->read_lrm_status(); - my $sc = $haenv->read_service_config(); $self->{groups} = $haenv->read_group_config(); # update @@ -331,27 +397,35 @@ sub manage { # add new service foreach my $sid (sort keys %$sc) { next if $ss->{$sid}; # already there - $haenv->log('info', "adding new service '$sid' on node '$sc->{$sid}->{node}'"); + my $cd = $sc->{$sid}; + next if $cd->{state} eq 'ignored'; + + $haenv->log('info', "adding new service '$sid' on node '$cd->{node}'"); # assume we are running to avoid relocate running service at add - $ss->{$sid} = { state => 'started', node => $sc->{$sid}->{node}, + my $state = ($cd->{state} eq 'started') ? 'started' : 'request_stop'; + $ss->{$sid} = { state => $state, node => $cd->{node}, uid => compute_new_uuid('started') }; } - # remove stale service from manager state + # remove stale or ignored services from manager state foreach my $sid (keys %$ss) { - next if $sc->{$sid}; - $haenv->log('info', "removing stale service '$sid' (no config)"); + next if $sc->{$sid} && $sc->{$sid}->{state} ne 'ignored'; + + my $reason = defined($sc->{$sid}) ? 'ignored state requested' : 'no config'; + $haenv->log('info', "removing stale service '$sid' ($reason)"); + + # remove all service related state information delete $ss->{$sid}; } - + $self->update_crm_commands(); for (;;) { my $repeat = 0; - + $self->recompute_online_node_usage(); - foreach my $sid (keys %$ss) { + foreach my $sid (sort keys %$ss) { my $sd = $ss->{$sid}; my $cd = $sc->{$sid} || { state => 'disabled' }; @@ -383,7 +457,8 @@ sub manage { my $lrm_mode = $sd->{node} ? $lrm_modes->{$sd->{node}} : undef; # unfreeze - &$change_service_state($self, $sid, 'started') + my $state = ($cd->{state} eq 'started') ? 'started' : 'request_stop'; + &$change_service_state($self, $sid, $state) if $lrm_mode && $lrm_mode eq 'active'; } elsif ($last_state eq 'error') { @@ -408,7 +483,7 @@ sub manage { # handle fencing my $fenced_nodes = {}; - foreach my $sid (keys %$ss) { + foreach my $sid (sort keys %$ss) { my $sd = $ss->{$sid}; next if $sd->{state} ne 'fence'; @@ -454,7 +529,7 @@ sub next_state_request_stop { } } - if ($ns->node_is_offline_delayed($sd->{node}, $fence_delay)) { + if ($ns->node_is_offline_delayed($sd->{node})) { &$change_service_state($self, $sid, 'fence'); return; } @@ -469,23 +544,27 @@ sub next_state_migrate_relocate { # check result from LRM daemon if ($lrm_res) { my $exit_code = $lrm_res->{exit_code}; + my $req_state = $cd->{state} eq 'started' ? 'started' : 'request_stop'; if ($exit_code == SUCCESS) { - &$change_service_state($self, $sid, 'started', node => $sd->{target}); + &$change_service_state($self, $sid, $req_state, node => $sd->{target}); return; + } elsif ($exit_code == EWRONG_NODE) { + $haenv->log('err', "service '$sid' - migration failed: service" . + " registered on wrong node!"); + &$change_service_state($self, $sid, 'error'); } else { $haenv->log('err', "service '$sid' - migration failed (exit code $exit_code)"); - &$change_service_state($self, $sid, 'started', node => $sd->{node}); + &$change_service_state($self, $sid, $req_state, node => $sd->{node}); return; } } - if ($ns->node_is_offline_delayed($sd->{node}, $fence_delay)) { + if ($ns->node_is_offline_delayed($sd->{node})) { &$change_service_state($self, $sid, 'fence'); return; } } - sub next_state_stopped { my ($self, $sid, $cd, $sd, $lrm_res) = @_; @@ -496,33 +575,49 @@ sub next_state_stopped { # this can happen if we fence a node with active migrations # hack: modify $sd (normally this should be considered read-only) $haenv->log('info', "fixup service '$sid' location ($sd->{node} => $cd->{node})"); - $sd->{node} = $cd->{node}; + $sd->{node} = $cd->{node}; } if ($sd->{cmd}) { - my ($cmd, $target) = @{$sd->{cmd}}; - delete $sd->{cmd}; + my $cmd = shift @{$sd->{cmd}}; if ($cmd eq 'migrate' || $cmd eq 'relocate') { + my $target = shift @{$sd->{cmd}}; if (!$ns->node_is_online($target)) { $haenv->log('err', "ignore service '$sid' $cmd request - node '$target' not online"); } elsif ($sd->{node} eq $target) { $haenv->log('info', "ignore service '$sid' $cmd request - service already on node '$target'"); } else { - &$change_service_state($self, $sid, $cmd, node => $target); + &$change_service_state($self, $sid, $cmd, node => $sd->{node}, + target => $target); return; } + } elsif ($cmd eq 'stop') { + $haenv->log('info', "ignore service '$sid' $cmd request - service already stopped"); } else { - $haenv->log('err', "unknown command '$cmd' for service '$sid'"); + $haenv->log('err', "unknown command '$cmd' for service '$sid'"); } - } + delete $sd->{cmd}; + } if ($cd->{state} eq 'disabled') { - # do nothing + # NOTE: do nothing here, the stop state is an exception as we do not + # process the LRM result here, thus the LRM always tries to stop the + # service (protection for the case no CRM is active) return; - } + } + + if ($ns->node_is_offline_delayed($sd->{node})) { + &$change_service_state($self, $sid, 'fence'); + return; + } - if ($cd->{state} eq 'enabled') { + if ($cd->{state} eq 'stopped') { + # almost the same as 'disabled' state but the service will also get recovered + return; + } + + if ($cd->{state} eq 'started') { # simply mark it started, if it's on the wrong node # next_state_started will fix that for us &$change_service_state($self, $sid, 'started', node => $sd->{node}); @@ -532,6 +627,16 @@ sub next_state_stopped { $haenv->log('err', "service '$sid' - unknown state '$cd->{state}' in service configuration"); } +sub record_service_failed_on_node { + my ($self, $sid, $node) = @_; + + if (!defined($self->{ss}->{$sid}->{failed_nodes})) { + $self->{ss}->{$sid}->{failed_nodes} = []; + } + + push @{$self->{ss}->{$sid}->{failed_nodes}}, $node; +} + sub next_state_started { my ($self, $sid, $cd, $sd, $lrm_res) = @_; @@ -540,24 +645,30 @@ sub next_state_started { my $ns = $self->{ns}; if (!$ns->node_is_online($sd->{node})) { - if ($ns->node_is_offline_delayed($sd->{node}, $fence_delay)) { + if ($ns->node_is_offline_delayed($sd->{node})) { &$change_service_state($self, $sid, 'fence'); } - return; + if ($ns->get_node_state($sd->{node}) ne 'maintenance') { + return; + } else { + # save current node as fallback for when it comes out of + # maintenance + $sd->{maintenance_node} = $sd->{node}; + } } - - if ($cd->{state} eq 'disabled') { + + if ($cd->{state} eq 'disabled' || $cd->{state} eq 'stopped') { &$change_service_state($self, $sid, 'request_stop'); return; } - if ($cd->{state} eq 'enabled') { + if ($cd->{state} eq 'started') { if ($sd->{cmd}) { - my ($cmd, $target) = @{$sd->{cmd}}; - delete $sd->{cmd}; + my $cmd = shift @{$sd->{cmd}}; if ($cmd eq 'migrate' || $cmd eq 'relocate') { + my $target = shift @{$sd->{cmd}}; if (!$ns->node_is_online($target)) { $haenv->log('err', "ignore service '$sid' $cmd request - node '$target' not online"); } elsif ($sd->{node} eq $target) { @@ -566,57 +677,98 @@ sub next_state_started { $haenv->log('info', "$cmd service '$sid' to node '$target'"); &$change_service_state($self, $sid, $cmd, node => $sd->{node}, target => $target); } + } elsif ($cmd eq 'stop') { + my $timeout = shift @{$sd->{cmd}}; + if ($timeout == 0) { + $haenv->log('info', "request immediate service hard-stop for service '$sid'"); + } else { + $haenv->log('info', "request graceful stop with timeout '$timeout' for service '$sid'"); + } + &$change_service_state($self, $sid, 'request_stop', timeout => $timeout); + $haenv->update_service_config($sid, {'state' => 'stopped'}); } else { - $haenv->log('err', "unknown command '$cmd' for service '$sid'"); + $haenv->log('err', "unknown command '$cmd' for service '$sid'"); } + + delete $sd->{cmd}; + } else { my $try_next = 0; + if ($lrm_res) { + my $ec = $lrm_res->{exit_code}; if ($ec == SUCCESS) { - $master_status->{relocate_trial}->{$sid} = 0; + if (defined($sd->{failed_nodes})) { + $haenv->log('info', "relocation policy successful for '$sid' on node '$sd->{node}'," . + " failed nodes: " . join(', ', @{$sd->{failed_nodes}}) ); + } - } elsif ($ec == ETRY_AGAIN) { + delete $sd->{failed_nodes}; - # do nothing, the LRM wants to try again + # store flag to indicate successful start - only valid while state == 'started' + $sd->{running} = 1; } elsif ($ec == ERROR) { - # apply our relocate policy if we got ERROR from the LRM - my $try = $master_status->{relocate_trial}->{$sid} || 0; + delete $sd->{running}; + + # apply our relocate policy if we got ERROR from the LRM + $self->record_service_failed_on_node($sid, $sd->{node}); - if ($try < $cd->{max_relocate}) { + if (scalar(@{$sd->{failed_nodes}}) <= $cd->{max_relocate}) { - $try++; # tell select_service_node to relocate if possible $try_next = 1; $haenv->log('warning', "starting service $sid on node". " '$sd->{node}' failed, relocating service."); - $master_status->{relocate_trial}->{$sid} = $try; } else { - $haenv->log('err', "recovery policy for service". - " $sid failed, entering error state!"); + $haenv->log('err', "recovery policy for service $sid " . + "failed, entering error state. Failed nodes: ". + join(', ', @{$sd->{failed_nodes}})); &$change_service_state($self, $sid, 'error'); return; } } else { + $self->record_service_failed_on_node($sid, $sd->{node}); + $haenv->log('err', "service '$sid' got unrecoverable error" . " (exit code $ec))"); # we have no save way out (yet) for other errors &$change_service_state($self, $sid, 'error'); + return; } } - my $node = select_service_node($self->{groups}, $self->{online_node_usage}, - $cd, $sd->{node}, $try_next); + my $node = select_service_node( + $self->{groups}, + $self->{online_node_usage}, + $cd, + $sd->{node}, + $try_next, + $sd->{failed_nodes}, + $sd->{maintenance_node}, + ); if ($node && ($sd->{node} ne $node)) { + $self->{online_node_usage}->{$node}++; + + if (defined(my $fallback = $sd->{maintenance_node})) { + if ($node eq $fallback) { + $haenv->log('info', "moving service '$sid' back to '$fallback', node came back from maintenance."); + delete $sd->{maintenance_node}; + } elsif ($sd->{node} ne $fallback) { + $haenv->log('info', "dropping maintenance fallback node '$fallback' for '$sid'"); + delete $sd->{maintenance_node}; + } + } + if ($cd->{type} eq 'vm') { $haenv->log('info', "migrate service '$sid' to node '$node' (running)"); &$change_service_state($self, $sid, 'migrate', node => $sd->{node}, target => $node); @@ -625,12 +777,19 @@ sub next_state_started { &$change_service_state($self, $sid, 'relocate', node => $sd->{node}, target => $node); } } else { - # do nothing + if ($try_next && !defined($node)) { + $haenv->log('warning', "Start Error Recovery: Tried all available " . + " nodes for service '$sid', retry start on current node. " . + "Tried nodes: " . join(', ', @{$sd->{failed_nodes}})); + } + # ensure service get started again if it went unexpected down + # but ensure also no LRM result gets lost + $sd->{uid} = compute_new_uuid($sd->{state}) if defined($lrm_res); } } return; - } + } $haenv->log('err', "service '$sid' - unknown state '$cd->{state}' in service configuration"); } @@ -639,14 +798,13 @@ sub next_state_error { my ($self, $sid, $cd, $sd, $lrm_res) = @_; my $ns = $self->{ns}; + my $ms = $self->{ms}; if ($cd->{state} eq 'disabled') { - &$change_service_state($self, $sid, 'stopped'); - return; - } + # clean up on error recovery + delete $sd->{failed_nodes}; - if ($ns->node_is_offline_delayed($sd->{node}, $fence_delay)) { - &$change_service_state($self, $sid, 'fence'); + &$change_service_state($self, $sid, 'stopped'); return; }