1 package PVE
::HA
::Manager
;
5 use Digest
::MD5
qw(md5_base64);
9 use PVE
::HA
::Tools
':exit_codes';
10 use PVE
::HA
::NodeStatus
;
13 my ($this, $haenv) = @_;
15 my $class = ref($this) || $this;
17 my $self = bless { haenv
=> $haenv }, $class;
19 my $old_ms = $haenv->read_manager_status();
21 # we only copy the state part of the manager which cannot be auto generated
23 $self->{ns
} = PVE
::HA
::NodeStatus-
>new($haenv, $old_ms->{node_status
} || {});
25 # fixme: use separate class PVE::HA::ServiceStatus
26 $self->{ss
} = $old_ms->{service_status
} || {};
28 $self->{ms
} = { master_node
=> $haenv->nodename() };
39 sub flush_master_status
{
42 my ($haenv, $ms, $ns, $ss) = ($self->{haenv
}, $self->{ms
}, $self->{ns
}, $self->{ss
});
44 $ms->{node_status
} = $ns->{status
};
45 $ms->{service_status
} = $ss;
46 $ms->{timestamp
} = $haenv->get_time();
48 $haenv->write_manager_status($ms);
51 sub get_service_group
{
52 my ($groups, $online_node_usage, $service_conf) = @_;
55 # add all online nodes to default group to allow try_next when no group set
56 foreach my $node (keys %$online_node_usage) {
57 $group->{nodes
}->{$node} = 1;
60 # overwrite default if service is bound to a specific group
61 $group = $groups->{ids
}->{$service_conf->{group
}} if $service_conf->{group
} &&
62 $groups->{ids
}->{$service_conf->{group
}};
67 # groups available nodes with their priority as group index
68 sub get_node_priority_groups
{
69 my ($group, $online_node_usage) = @_;
72 my $group_members = {};
73 foreach my $entry (keys %{$group->{nodes
}}) {
74 my ($node, $pri) = ($entry, 0);
75 if ($entry =~ m/^(\S+):(\d+)$/) {
76 ($node, $pri) = ($1, $2);
78 next if !defined($online_node_usage->{$node}); # offline
79 $pri_groups->{$pri}->{$node} = 1;
80 $group_members->{$node} = $pri;
83 # add non-group members to unrestricted groups (priority -1)
84 if (!$group->{restricted
}) {
86 foreach my $node (keys %$online_node_usage) {
87 next if defined($group_members->{$node});
88 $pri_groups->{$pri}->{$node} = 1;
89 $group_members->{$node} = -1;
93 return ($pri_groups, $group_members);
96 sub select_service_node
{
97 my ($groups, $online_node_usage, $service_conf, $current_node, $try_next, $tried_nodes) = @_;
99 my $group = get_service_group
($groups, $online_node_usage, $service_conf);
101 my ($pri_groups, $group_members) = get_node_priority_groups
($group, $online_node_usage);
103 my @pri_list = sort {$b <=> $a} keys %$pri_groups;
104 return undef if !scalar(@pri_list);
106 # stay on current node if possible (avoids random migrations)
107 if (!$try_next && $group->{nofailback
} && defined($group_members->{$current_node})) {
108 return $current_node;
111 # select node from top priority node list
113 my $top_pri = $pri_list[0];
115 # try to avoid nodes where the service failed already if we want to relocate
117 foreach my $node (@$tried_nodes) {
118 delete $pri_groups->{$top_pri}->{$node};
123 $online_node_usage->{$a} <=> $online_node_usage->{$b} || $a cmp $b
124 } keys %{$pri_groups->{$top_pri}};
127 for (my $i = scalar(@nodes) - 1; $i >= 0; $i--) {
128 my $node = $nodes[$i];
129 if ($node eq $current_node) {
137 if (defined($found) && ($found < (scalar(@nodes) - 1))) {
138 return $nodes[$found + 1];
145 return $nodes[$found] if defined($found);
154 sub compute_new_uuid
{
158 return md5_base64
($state . $$ . time() . $uid_counter);
161 my $valid_service_states = {
172 sub recompute_online_node_usage
{
175 my $online_node_usage = {};
177 my $online_nodes = $self->{ns
}->list_online_nodes();
179 foreach my $node (@$online_nodes) {
180 $online_node_usage->{$node} = 0;
183 foreach my $sid (keys %{$self->{ss
}}) {
184 my $sd = $self->{ss
}->{$sid};
185 my $state = $sd->{state};
186 if (defined($online_node_usage->{$sd->{node
}})) {
187 if (($state eq 'started') || ($state eq 'request_stop') ||
188 ($state eq 'fence') || ($state eq 'freeze') || ($state eq 'error')) {
189 $online_node_usage->{$sd->{node
}}++;
190 } elsif (($state eq 'migrate') || ($state eq 'relocate')) {
191 $online_node_usage->{$sd->{target
}}++;
192 } elsif ($state eq 'stopped') {
195 die "should not be reached";
200 $self->{online_node_usage
} = $online_node_usage;
203 my $change_service_state = sub {
204 my ($self, $sid, $new_state, %params) = @_;
206 my ($haenv, $ss) = ($self->{haenv
}, $self->{ss
});
208 my $sd = $ss->{$sid} || die "no such service '$sid";
210 my $old_state = $sd->{state};
211 my $old_node = $sd->{node
};
212 my $old_failed_nodes = $sd->{failed_nodes
};
214 die "no state change" if $old_state eq $new_state; # just to be sure
216 die "invalid CRM service state '$new_state'\n" if !$valid_service_states->{$new_state};
218 foreach my $k (keys %$sd) { delete $sd->{$k}; };
220 $sd->{state} = $new_state;
221 $sd->{node
} = $old_node;
222 $sd->{failed_nodes
} = $old_failed_nodes;
225 foreach my $k (sort keys %params) {
227 $text_state .= ", " if $text_state;
228 $text_state .= "$k = $v";
232 $self->recompute_online_node_usage();
234 $sd->{uid
} = compute_new_uuid
($new_state);
236 $text_state = " ($text_state)" if $text_state;
237 $haenv->log('info', "service '$sid': state changed from '${old_state}'" .
238 " to '${new_state}'$text_state");
241 # clean up a possible bad state from a recovered service to allow its start
242 my $fence_recovery_cleanup = sub {
243 my ($self, $sid, $fenced_node) = @_;
245 my $haenv = $self->{haenv
};
247 my (undef, $type, $id) = PVE
::HA
::Tools
::parse_sid
($sid);
248 my $plugin = PVE
::HA
::Resources-
>lookup($type);
251 die "unknown resource type '$type'" if !$plugin;
253 # locks may block recovery, cleanup those which are safe to remove after fencing
254 my $removable_locks = ['backup', 'mounted'];
255 if (my $removed_lock = $plugin->remove_locks($haenv, $id, $removable_locks, $fenced_node)) {
256 $haenv->log('warning', "removed leftover lock '$removed_lock' from recovered " .
257 "service '$sid' to allow its start.");
261 # after a node was fenced this recovers the service to a new node
262 my $recover_fenced_service = sub {
263 my ($self, $sid, $cd) = @_;
265 my ($haenv, $ss) = ($self->{haenv
}, $self->{ss
});
267 my $sd = $ss->{$sid};
269 if ($sd->{state} ne 'fence') { # should not happen
270 $haenv->log('err', "cannot recover service '$sid' from fencing," .
271 " wrong state '$sd->{state}'");
275 my $fenced_node = $sd->{node
}; # for logging purpose
277 $self->recompute_online_node_usage(); # we want the most current node state
279 my $recovery_node = select_service_node
($self->{groups
},
280 $self->{online_node_usage
},
283 if ($recovery_node) {
284 $haenv->log('info', "recover service '$sid' from fenced node " .
285 "'$fenced_node' to node '$recovery_node'");
287 &$fence_recovery_cleanup($self, $sid, $fenced_node);
289 $haenv->steal_service($sid, $sd->{node
}, $recovery_node);
291 # $sd *is normally read-only*, fencing is the exception
292 $cd->{node
} = $sd->{node
} = $recovery_node;
293 &$change_service_state($self, $sid, 'started', node
=> $recovery_node);
295 # no node found, let the service in 'fence' state and try again
296 $haenv->log('err', "recovering service '$sid' from fenced node " .
297 "'$fenced_node' failed, no recovery node found");
301 # read LRM status for all nodes
302 sub read_lrm_status
{
305 my $nodes = $self->{ns
}->list_nodes();
306 my $haenv = $self->{haenv
};
310 foreach my $node (@$nodes) {
311 my $lrm_status = $haenv->read_lrm_status($node);
312 $modes->{$node} = $lrm_status->{mode
} || 'active';
313 foreach my $uid (keys %{$lrm_status->{results
}}) {
314 next if $results->{$uid}; # should not happen
315 $results->{$uid} = $lrm_status->{results
}->{$uid};
320 return ($results, $modes);
323 # read new crm commands and save them into crm master status
324 sub update_crm_commands
{
327 my ($haenv, $ms, $ns, $ss) = ($self->{haenv
}, $self->{ms
}, $self->{ns
}, $self->{ss
});
329 my $cmdlist = $haenv->read_crm_commands();
331 foreach my $cmd (split(/\n/, $cmdlist)) {
334 if ($cmd =~ m/^(migrate|relocate)\s+(\S+)\s+(\S+)$/) {
335 my ($task, $sid, $node) = ($1, $2, $3);
336 if (my $sd = $ss->{$sid}) {
337 if (!$ns->node_is_online($node)) {
338 $haenv->log('err', "crm command error - node not online: $cmd");
340 if ($node eq $sd->{node
}) {
341 $haenv->log('info', "ignore crm command - service already on target node: $cmd");
343 $haenv->log('info', "got crm command: $cmd");
344 $ss->{$sid}->{cmd
} = [ $task, $node];
348 $haenv->log('err', "crm command error - no such service: $cmd");
352 $haenv->log('err', "unable to parse crm command: $cmd");
361 my ($haenv, $ms, $ns, $ss) = ($self->{haenv
}, $self->{ms
}, $self->{ns
}, $self->{ss
});
363 $ns->update($haenv->get_node_info());
365 if (!$ns->node_is_online($haenv->nodename())) {
366 $haenv->log('info', "master seems offline");
370 my ($lrm_results, $lrm_modes) = $self->read_lrm_status();
372 my $sc = $haenv->read_service_config();
374 $self->{groups
} = $haenv->read_group_config(); # update
376 # compute new service status
379 foreach my $sid (sort keys %$sc) {
380 next if $ss->{$sid}; # already there
381 $haenv->log('info', "adding new service '$sid' on node '$sc->{$sid}->{node}'");
382 # assume we are running to avoid relocate running service at add
383 $ss->{$sid} = { state => 'started', node
=> $sc->{$sid}->{node
},
384 uid
=> compute_new_uuid
('started') };
387 # remove stale service from manager state
388 foreach my $sid (keys %$ss) {
390 $haenv->log('info', "removing stale service '$sid' (no config)");
391 # remove all service related state information
395 $self->update_crm_commands();
400 $self->recompute_online_node_usage();
402 foreach my $sid (sort keys %$ss) {
403 my $sd = $ss->{$sid};
404 my $cd = $sc->{$sid} || { state => 'disabled' };
406 my $lrm_res = $sd->{uid
} ?
$lrm_results->{$sd->{uid
}} : undef;
408 my $last_state = $sd->{state};
410 if ($last_state eq 'stopped') {
412 $self->next_state_stopped($sid, $cd, $sd, $lrm_res);
414 } elsif ($last_state eq 'started') {
416 $self->next_state_started($sid, $cd, $sd, $lrm_res);
418 } elsif ($last_state eq 'migrate' || $last_state eq 'relocate') {
420 $self->next_state_migrate_relocate($sid, $cd, $sd, $lrm_res);
422 } elsif ($last_state eq 'fence') {
424 # do nothing here - wait until fenced
426 } elsif ($last_state eq 'request_stop') {
428 $self->next_state_request_stop($sid, $cd, $sd, $lrm_res);
430 } elsif ($last_state eq 'freeze') {
432 my $lrm_mode = $sd->{node
} ?
$lrm_modes->{$sd->{node
}} : undef;
434 &$change_service_state($self, $sid, 'started')
435 if $lrm_mode && $lrm_mode eq 'active';
437 } elsif ($last_state eq 'error') {
439 $self->next_state_error($sid, $cd, $sd, $lrm_res);
443 die "unknown service state '$last_state'";
446 my $lrm_mode = $sd->{node
} ?
$lrm_modes->{$sd->{node
}} : undef;
447 if ($lrm_mode && $lrm_mode eq 'restart') {
448 if (($sd->{state} eq 'started' || $sd->{state} eq 'stopped' ||
449 $sd->{state} eq 'request_stop')) {
450 &$change_service_state($self, $sid, 'freeze');
454 $repeat = 1 if $sd->{state} ne $last_state;
458 my $fenced_nodes = {};
459 foreach my $sid (sort keys %$ss) {
460 my $sd = $ss->{$sid};
461 next if $sd->{state} ne 'fence';
463 if (!defined($fenced_nodes->{$sd->{node
}})) {
464 $fenced_nodes->{$sd->{node
}} = $ns->fence_node($sd->{node
}) || 0;
467 next if !$fenced_nodes->{$sd->{node
}};
469 # node fence was successful - recover service
470 &$recover_fenced_service($self, $sid, $sc->{$sid});
476 $self->flush_master_status();
479 # functions to compute next service states
480 # $cd: service configuration data (read only)
481 # $sd: service status data (read only)
483 # Note: use change_service_state() to alter state
486 sub next_state_request_stop
{
487 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
489 my $haenv = $self->{haenv
};
490 my $ns = $self->{ns
};
492 # check result from LRM daemon
494 my $exit_code = $lrm_res->{exit_code
};
495 if ($exit_code == SUCCESS
) {
496 &$change_service_state($self, $sid, 'stopped');
499 $haenv->log('err', "service '$sid' stop failed (exit code $exit_code)");
500 &$change_service_state($self, $sid, 'error'); # fixme: what state?
505 if ($ns->node_is_offline_delayed($sd->{node
})) {
506 &$change_service_state($self, $sid, 'fence');
511 sub next_state_migrate_relocate
{
512 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
514 my $haenv = $self->{haenv
};
515 my $ns = $self->{ns
};
517 # check result from LRM daemon
519 my $exit_code = $lrm_res->{exit_code
};
520 my $req_state = $cd->{state} eq 'enabled' ?
'started' : 'request_stop';
521 if ($exit_code == SUCCESS
) {
522 &$change_service_state($self, $sid, $req_state, node
=> $sd->{target
});
524 } elsif ($exit_code == EWRONG_NODE
) {
525 $haenv->log('err', "service '$sid' - migration failed: service" .
526 " registered on wrong node!");
527 &$change_service_state($self, $sid, 'error');
529 $haenv->log('err', "service '$sid' - migration failed (exit code $exit_code)");
530 &$change_service_state($self, $sid, $req_state, node
=> $sd->{node
});
535 if ($ns->node_is_offline_delayed($sd->{node
})) {
536 &$change_service_state($self, $sid, 'fence');
542 sub next_state_stopped
{
543 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
545 my $haenv = $self->{haenv
};
546 my $ns = $self->{ns
};
548 if ($sd->{node
} ne $cd->{node
}) {
549 # this can happen if we fence a node with active migrations
550 # hack: modify $sd (normally this should be considered read-only)
551 $haenv->log('info', "fixup service '$sid' location ($sd->{node} => $cd->{node})");
552 $sd->{node
} = $cd->{node
};
556 my ($cmd, $target) = @{$sd->{cmd
}};
559 if ($cmd eq 'migrate' || $cmd eq 'relocate') {
560 if (!$ns->node_is_online($target)) {
561 $haenv->log('err', "ignore service '$sid' $cmd request - node '$target' not online");
562 } elsif ($sd->{node
} eq $target) {
563 $haenv->log('info', "ignore service '$sid' $cmd request - service already on node '$target'");
565 &$change_service_state($self, $sid, $cmd, node
=> $sd->{node
},
570 $haenv->log('err', "unknown command '$cmd' for service '$sid'");
574 if ($cd->{state} eq 'disabled') {
575 # NOTE: do nothing here, the stop state is an exception as we do not
576 # process the LRM result here, thus the LRM always tries to stop the
577 # service (protection for the case no CRM is active)
581 if ($cd->{state} eq 'enabled') {
582 # simply mark it started, if it's on the wrong node
583 # next_state_started will fix that for us
584 &$change_service_state($self, $sid, 'started', node
=> $sd->{node
});
588 $haenv->log('err', "service '$sid' - unknown state '$cd->{state}' in service configuration");
591 sub record_service_failed_on_node
{
592 my ($self, $sid, $node) = @_;
594 if (!defined($self->{ss
}->{$sid}->{failed_nodes
})) {
595 $self->{ss
}->{$sid}->{failed_nodes
} = [];
598 push @{$self->{ss
}->{$sid}->{failed_nodes
}}, $node;
601 sub next_state_started
{
602 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
604 my $haenv = $self->{haenv
};
605 my $master_status = $self->{ms
};
606 my $ns = $self->{ns
};
608 if (!$ns->node_is_online($sd->{node
})) {
609 if ($ns->node_is_offline_delayed($sd->{node
})) {
610 &$change_service_state($self, $sid, 'fence');
615 if ($cd->{state} eq 'disabled') {
616 &$change_service_state($self, $sid, 'request_stop');
620 if ($cd->{state} eq 'enabled') {
623 my ($cmd, $target) = @{$sd->{cmd
}};
626 if ($cmd eq 'migrate' || $cmd eq 'relocate') {
627 if (!$ns->node_is_online($target)) {
628 $haenv->log('err', "ignore service '$sid' $cmd request - node '$target' not online");
629 } elsif ($sd->{node
} eq $target) {
630 $haenv->log('info', "ignore service '$sid' $cmd request - service already on node '$target'");
632 $haenv->log('info', "$cmd service '$sid' to node '$target'");
633 &$change_service_state($self, $sid, $cmd, node
=> $sd->{node
}, target
=> $target);
636 $haenv->log('err', "unknown command '$cmd' for service '$sid'");
644 my $ec = $lrm_res->{exit_code
};
645 if ($ec == SUCCESS
) {
647 if (defined($sd->{failed_nodes
})) {
648 $haenv->log('info', "relocation policy successful for '$sid' on node '$sd->{node}'," .
649 " failed nodes: " . join(', ', @{$sd->{failed_nodes
}}) );
652 delete $sd->{failed_nodes
};
654 } elsif ($ec == ERROR
) {
655 # apply our relocate policy if we got ERROR from the LRM
656 $self->record_service_failed_on_node($sid, $sd->{node
});
658 if (scalar(@{$sd->{failed_nodes
}}) <= $cd->{max_relocate
}) {
660 # tell select_service_node to relocate if possible
663 $haenv->log('warning', "starting service $sid on node".
664 " '$sd->{node}' failed, relocating service.");
668 $haenv->log('err', "recovery policy for service $sid " .
669 "failed, entering error state. Failed nodes: ".
670 join(', ', @{$sd->{failed_nodes
}}));
671 &$change_service_state($self, $sid, 'error');
676 $self->record_service_failed_on_node($sid, $sd->{node
});
678 $haenv->log('err', "service '$sid' got unrecoverable error" .
679 " (exit code $ec))");
680 # we have no save way out (yet) for other errors
681 &$change_service_state($self, $sid, 'error');
686 my $node = select_service_node
($self->{groups
}, $self->{online_node_usage
},
687 $cd, $sd->{node
}, $try_next, $sd->{failed_nodes
});
689 if ($node && ($sd->{node
} ne $node)) {
690 if ($cd->{type
} eq 'vm') {
691 $haenv->log('info', "migrate service '$sid' to node '$node' (running)");
692 &$change_service_state($self, $sid, 'migrate', node
=> $sd->{node
}, target
=> $node);
694 $haenv->log('info', "relocate service '$sid' to node '$node'");
695 &$change_service_state($self, $sid, 'relocate', node
=> $sd->{node
}, target
=> $node);
698 if ($try_next && !defined($node)) {
699 $haenv->log('warning', "Start Error Recovery: Tried all available " .
700 " nodes for service '$sid', retry start on current node. " .
701 "Tried nodes: " . join(', ', @{$sd->{failed_nodes
}}));
703 # ensure service get started again if it went unexpected down
704 # but ensure also no LRM result gets lost
705 $sd->{uid
} = compute_new_uuid
($sd->{state}) if defined($lrm_res);
712 $haenv->log('err', "service '$sid' - unknown state '$cd->{state}' in service configuration");
715 sub next_state_error
{
716 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
718 my $ns = $self->{ns
};
719 my $ms = $self->{ms
};
721 if ($cd->{state} eq 'disabled') {
722 # clean up on error recovery
723 delete $sd->{failed_nodes
};
725 &$change_service_state($self, $sid, 'stopped');