1 package PVE
::HA
::Manager
;
6 use Digest
::MD5
qw(md5_base64);
9 use PVE
::HA
::Tools
':exit_codes';
10 use PVE
::HA
::NodeStatus
;
11 use PVE
::HA
::Usage
::Basic
;
12 use PVE
::HA
::Usage
::Static
;
14 ## Variable Name & Abbreviations Convention
16 # The HA stack has some variables it uses frequently and thus abbreviates it such that it may be
17 # confusing for new readers. Here's a short list of the most common used.
19 # NOTE: variables should be assumed to be read only if not otherwise stated, only use the specific
20 # methods to re-compute/read/alter them.
22 # - $haenv -> HA environment, the main interface to the simulator/test/real world
23 # - $sid -> Service ID, unique identifier for a service, `type:vmid` is common
25 # - $ms -> Master/Manager Status, contains runtime info from the current active manager
26 # - $ns -> Node Status, hash holding online/offline status about all nodes
28 # - $ss -> Service Status, hash holding the current state (last LRM cmd result, failed starts
29 # or migrates, maintenance fallback node, for *all* services ...
30 # - $sd -> Service Data, the service status of a *single* service, iow. $ss->{$sid}
32 # - $sc -> Service Configuration, hash for all services including target state, group, ...
33 # - $cd -> Configuration Data, the service config of a *single* service, iow. $sc->{$sid}
35 # Try to avoid adding new two letter (or similar over abbreviated) names, but also don't send
36 # patches for changing above, as that set is mostly sensible and should be easy to remember once
37 # spending a bit time in the HA code base.
40 my ($this, $haenv) = @_;
42 my $class = ref($this) || $this;
44 my $self = bless { haenv
=> $haenv, crs
=> {} }, $class;
46 my $old_ms = $haenv->read_manager_status();
48 # we only copy the state part of the manager which cannot be auto generated
50 $self->{ns
} = PVE
::HA
::NodeStatus-
>new($haenv, $old_ms->{node_status
} || {});
52 # fixme: use separate class PVE::HA::ServiceStatus
53 $self->{ss
} = $old_ms->{service_status
} || {};
55 $self->{ms
} = { master_node
=> $haenv->nodename() };
57 $self->update_crs_scheduler_mode(); # initial set, we update it once every loop
62 sub update_crs_scheduler_mode
{
65 my $haenv = $self->{haenv
};
66 my $dc_cfg = $haenv->get_datacenter_settings();
68 $self->{crs
}->{rebalance_on_request_start
} = !!$dc_cfg->{crs
}->{'ha-rebalance-on-start'};
70 my $old_mode = $self->{crs
}->{scheduler
};
71 my $new_mode = $dc_cfg->{crs
}->{ha
} || 'basic';
73 if (!defined($old_mode)) {
74 $haenv->log('info', "using scheduler mode '$new_mode'") if $new_mode ne 'basic';
75 } elsif ($new_mode eq $old_mode) {
76 return; # nothing to do
78 $haenv->log('info', "switching scheduler mode from '$old_mode' to '$new_mode'");
81 $self->{crs
}->{scheduler
} = $new_mode;
92 sub flush_master_status
{
95 my ($haenv, $ms, $ns, $ss) = ($self->{haenv
}, $self->{ms
}, $self->{ns
}, $self->{ss
});
97 $ms->{node_status
} = $ns->{status
};
98 $ms->{service_status
} = $ss;
99 $ms->{timestamp
} = $haenv->get_time();
101 $haenv->write_manager_status($ms);
104 sub get_service_group
{
105 my ($groups, $online_node_usage, $service_conf) = @_;
108 # add all online nodes to default group to allow try_next when no group set
109 $group->{nodes
}->{$_} = 1 for $online_node_usage->list_nodes();
111 # overwrite default if service is bound to a specific group
112 if (my $group_id = $service_conf->{group
}) {
113 $group = $groups->{ids
}->{$group_id} if $groups->{ids
}->{$group_id};
119 # groups available nodes with their priority as group index
120 sub get_node_priority_groups
{
121 my ($group, $online_node_usage) = @_;
124 my $group_members = {};
125 foreach my $entry (keys %{$group->{nodes
}}) {
126 my ($node, $pri) = ($entry, 0);
127 if ($entry =~ m/^(\S+):(\d+)$/) {
128 ($node, $pri) = ($1, $2);
130 next if !$online_node_usage->contains_node($node); # offline
131 $pri_groups->{$pri}->{$node} = 1;
132 $group_members->{$node} = $pri;
135 # add non-group members to unrestricted groups (priority -1)
136 if (!$group->{restricted
}) {
138 for my $node ($online_node_usage->list_nodes()) {
139 next if defined($group_members->{$node});
140 $pri_groups->{$pri}->{$node} = 1;
141 $group_members->{$node} = -1;
145 return ($pri_groups, $group_members);
148 sub select_service_node
{
149 my ($groups, $online_node_usage, $sid, $service_conf, $current_node, $try_next, $tried_nodes, $maintenance_fallback, $best_scored) = @_;
151 my $group = get_service_group
($groups, $online_node_usage, $service_conf);
153 my ($pri_groups, $group_members) = get_node_priority_groups
($group, $online_node_usage);
155 my @pri_list = sort {$b <=> $a} keys %$pri_groups;
156 return undef if !scalar(@pri_list);
158 # stay on current node if possible (avoids random migrations)
159 if ((!$try_next && !$best_scored) && $group->{nofailback
} && defined($group_members->{$current_node})) {
160 return $current_node;
163 # select node from top priority node list
165 my $top_pri = $pri_list[0];
167 # try to avoid nodes where the service failed already if we want to relocate
169 foreach my $node (@$tried_nodes) {
170 delete $pri_groups->{$top_pri}->{$node};
174 return $maintenance_fallback
175 if defined($maintenance_fallback) && $pri_groups->{$top_pri}->{$maintenance_fallback};
177 return $current_node if (!$try_next && !$best_scored) && $pri_groups->{$top_pri}->{$current_node};
179 my $scores = $online_node_usage->score_nodes_to_start_service($sid, $current_node);
181 $scores->{$a} <=> $scores->{$b} || $a cmp $b
182 } keys %{$pri_groups->{$top_pri}};
185 for (my $i = scalar(@nodes) - 1; $i >= 0; $i--) {
186 my $node = $nodes[$i];
187 if ($node eq $current_node) {
193 if (!$best_scored && defined($found) && ($found < (scalar(@nodes) - 1))) {
194 return $nodes[$found + 1];
205 sub compute_new_uuid
{
209 return md5_base64
($state . $$ . time() . $uid_counter);
212 my $valid_service_states = {
216 request_start_balance
=> 1,
226 # FIXME with 'static' mode and thousands of services, the overhead can be noticable and the fact
227 # that this function is called for each state change and upon recovery doesn't help.
228 sub recompute_online_node_usage
{
231 my $haenv = $self->{haenv
};
233 my $online_nodes = $self->{ns
}->list_online_nodes();
235 my $online_node_usage;
237 if (my $mode = $self->{crs
}->{scheduler
}) {
238 if ($mode eq 'static') {
239 $online_node_usage = eval {
240 my $scheduler = PVE
::HA
::Usage
::Static-
>new($haenv);
241 $scheduler->add_node($_) for $online_nodes->@*;
244 $haenv->log('warning', "fallback to 'basic' scheduler mode, init for 'static' failed - $@")
246 } elsif ($mode eq 'basic') {
247 # handled below in the general fall-back case
249 $haenv->log('warning', "got unknown scheduler mode '$mode', using 'basic'");
253 # fallback to the basic algorithm in any case
254 if (!$online_node_usage) {
255 $online_node_usage = PVE
::HA
::Usage
::Basic-
>new($haenv);
256 $online_node_usage->add_node($_) for $online_nodes->@*;
259 foreach my $sid (sort keys %{$self->{ss
}}) {
260 my $sd = $self->{ss
}->{$sid};
261 my $state = $sd->{state};
262 my $target = $sd->{target
}; # optional
263 if ($online_node_usage->contains_node($sd->{node
})) {
265 $state eq 'started' || $state eq 'request_stop' || $state eq 'fence'
266 || $state eq 'freeze' || $state eq 'error' || $state eq 'recovery'
268 $online_node_usage->add_service_usage_to_node($sd->{node
}, $sid, $sd->{node
});
269 } elsif ($state eq 'migrate' || $state eq 'relocate' || $state eq 'request_start_balance') {
270 my $source = $sd->{node
};
271 # count it for both, source and target as load is put on both
272 $online_node_usage->add_service_usage_to_node($source, $sid, $source, $target)
273 if $state ne 'request_start_balance';
274 $online_node_usage->add_service_usage_to_node($target, $sid, $source, $target);
275 } elsif ($state eq 'stopped' || $state eq 'request_start') {
278 die "should not be reached (sid = '$sid', state = '$state')";
280 } elsif (defined($target) && $online_node_usage->contains_node($target)) {
281 if ($state eq 'migrate' || $state eq 'relocate') {
282 # to correctly track maintenance modi and also consider the target as used for the
283 # case a node dies, as we cannot really know if the to-be-aborted incoming migration
284 # has already cleaned up all used resources
285 $online_node_usage->add_service_usage_to_node($target, $sid, $sd->{node
}, $target);
290 $self->{online_node_usage
} = $online_node_usage;
293 my $change_service_state = sub {
294 my ($self, $sid, $new_state, %params) = @_;
296 my ($haenv, $ss) = ($self->{haenv
}, $self->{ss
});
298 my $sd = $ss->{$sid} || die "no such service '$sid";
300 my $old_state = $sd->{state};
301 my $old_node = $sd->{node
};
302 my $old_failed_nodes = $sd->{failed_nodes
};
303 my $old_maintenance_node = $sd->{maintenance_node
};
305 die "no state change" if $old_state eq $new_state; # just to be sure
307 die "invalid CRM service state '$new_state'\n" if !$valid_service_states->{$new_state};
309 foreach my $k (keys %$sd) { delete $sd->{$k}; };
311 $sd->{state} = $new_state;
312 $sd->{node
} = $old_node;
313 $sd->{failed_nodes
} = $old_failed_nodes if defined($old_failed_nodes);
314 $sd->{maintenance_node
} = $old_maintenance_node if defined($old_maintenance_node);
317 foreach my $k (sort keys %params) {
319 $text_state .= ", " if $text_state;
320 $text_state .= "$k = $v";
324 $self->recompute_online_node_usage();
326 $sd->{uid
} = compute_new_uuid
($new_state);
328 $text_state = " ($text_state)" if $text_state;
329 $haenv->log('info', "service '$sid': state changed from '${old_state}' to '${new_state}'$text_state");
332 # clean up a possible bad state from a recovered service to allow its start
333 my $fence_recovery_cleanup = sub {
334 my ($self, $sid, $fenced_node) = @_;
336 my $haenv = $self->{haenv
};
338 my (undef, $type, $id) = $haenv->parse_sid($sid);
339 my $plugin = PVE
::HA
::Resources-
>lookup($type);
342 die "unknown resource type '$type'" if !$plugin;
344 # locks may block recovery, cleanup those which are safe to remove after fencing,
345 # i.e., after the original node was reset and thus all it's state
346 my $removable_locks = [
357 if (my $removed_lock = $plugin->remove_locks($haenv, $id, $removable_locks, $fenced_node)) {
358 $haenv->log('warning', "removed leftover lock '$removed_lock' from recovered " .
359 "service '$sid' to allow its start.");
363 # read LRM status for all nodes
364 sub read_lrm_status
{
367 my $nodes = $self->{ns
}->list_nodes();
368 my $haenv = $self->{haenv
};
372 foreach my $node (@$nodes) {
373 my $lrm_status = $haenv->read_lrm_status($node);
374 $modes->{$node} = $lrm_status->{mode
} || 'active';
375 foreach my $uid (keys %{$lrm_status->{results
}}) {
376 next if $results->{$uid}; # should not happen
377 $results->{$uid} = $lrm_status->{results
}->{$uid};
381 return ($results, $modes);
384 # read new crm commands and save them into crm master status
385 sub update_crm_commands
{
388 my ($haenv, $ms, $ns, $ss) = ($self->{haenv
}, $self->{ms
}, $self->{ns
}, $self->{ss
});
390 my $cmdlist = $haenv->read_crm_commands();
392 foreach my $cmd (split(/\n/, $cmdlist)) {
395 if ($cmd =~ m/^(migrate|relocate)\s+(\S+)\s+(\S+)$/) {
396 my ($task, $sid, $node) = ($1, $2, $3);
397 if (my $sd = $ss->{$sid}) {
398 if (!$ns->node_is_online($node)) {
399 $haenv->log('err', "crm command error - node not online: $cmd");
401 if ($node eq $sd->{node
}) {
402 $haenv->log('info', "ignore crm command - service already on target node: $cmd");
404 $haenv->log('info', "got crm command: $cmd");
405 $ss->{$sid}->{cmd
} = [ $task, $node ];
409 $haenv->log('err', "crm command error - no such service: $cmd");
412 } elsif ($cmd =~ m/^stop\s+(\S+)\s+(\S+)$/) {
413 my ($sid, $timeout) = ($1, $2);
414 if (my $sd = $ss->{$sid}) {
415 $haenv->log('info', "got crm command: $cmd");
416 $ss->{$sid}->{cmd
} = [ 'stop', $timeout ];
418 $haenv->log('err', "crm command error - no such service: $cmd");
420 } elsif ($cmd =~ m/^enable-node-maintenance\s+(\S+)$/) {
423 my $state = $ns->get_node_state($node);
424 if ($state eq 'online') {
425 $ms->{node_request
}->{$node}->{maintenance
} = 1;
426 } elsif ($state eq 'maintenance') {
427 $haenv->log('info', "ignoring crm command - node $node is already in maintenance state");
429 $haenv->log('err', "crm command error - node not online: $cmd");
431 } elsif ($cmd =~ m/^disable-node-maintenance\s+(\S+)$/) {
434 my $state = $ns->get_node_state($node);
435 if ($state ne 'maintenance') {
437 'warn', "clearing maintenance of node $node requested, but it's in state $state");
439 delete $ms->{node_request
}->{$node}->{maintenance
}; # gets flushed out at the end of the CRM loop
441 $haenv->log('err', "unable to parse crm command: $cmd");
450 my ($haenv, $ms, $ns, $ss) = ($self->{haenv
}, $self->{ms
}, $self->{ns
}, $self->{ss
});
452 my ($node_info) = $haenv->get_node_info();
453 my ($lrm_results, $lrm_modes) = $self->read_lrm_status();
455 $ns->update($node_info, $lrm_modes);
457 if (!$ns->node_is_operational($haenv->nodename())) {
458 $haenv->log('info', "master seems offline");
462 $self->update_crs_scheduler_mode();
464 my $sc = $haenv->read_service_config();
466 $self->{groups
} = $haenv->read_group_config(); # update
468 # compute new service status
471 foreach my $sid (sort keys %$sc) {
472 next if $ss->{$sid}; # already there
473 my $cd = $sc->{$sid};
474 next if $cd->{state} eq 'ignored';
476 $haenv->log('info', "adding new service '$sid' on node '$cd->{node}'");
477 # assume we are running to avoid relocate running service at add
478 my $state = ($cd->{state} eq 'started') ?
'request_start' : 'request_stop';
480 state => $state, node
=> $cd->{node
}, uid
=> compute_new_uuid
('started'),
484 # remove stale or ignored services from manager state
485 foreach my $sid (keys %$ss) {
486 next if $sc->{$sid} && $sc->{$sid}->{state} ne 'ignored';
488 my $reason = defined($sc->{$sid}) ?
'ignored state requested' : 'no config';
489 $haenv->log('info', "removing stale service '$sid' ($reason)");
491 # remove all service related state information
495 $self->update_crm_commands();
500 $self->recompute_online_node_usage();
502 foreach my $sid (sort keys %$ss) {
503 my $sd = $ss->{$sid};
504 my $cd = $sc->{$sid} || { state => 'disabled' };
506 my $lrm_res = $sd->{uid
} ?
$lrm_results->{$sd->{uid
}} : undef;
508 my $last_state = $sd->{state};
510 if ($last_state eq 'stopped') {
512 $self->next_state_stopped($sid, $cd, $sd, $lrm_res);
514 } elsif ($last_state eq 'started') {
516 $self->next_state_started($sid, $cd, $sd, $lrm_res);
518 } elsif ($last_state eq 'request_start') {
520 $self->next_state_request_start($sid, $cd, $sd, $lrm_res);
522 } elsif ($last_state eq 'migrate' || $last_state eq 'relocate' || $last_state eq 'request_start_balance') {
524 $self->next_state_migrate_relocate($sid, $cd, $sd, $lrm_res);
526 } elsif ($last_state eq 'fence') {
528 # do nothing here - wait until fenced
530 } elsif ($last_state eq 'recovery') {
532 $self->next_state_recovery($sid, $cd, $sd, $lrm_res);
534 } elsif ($last_state eq 'request_stop') {
536 $self->next_state_request_stop($sid, $cd, $sd, $lrm_res);
538 } elsif ($last_state eq 'freeze') {
540 my $lrm_mode = $sd->{node
} ?
$lrm_modes->{$sd->{node
}} : undef;
541 if ($lrm_mode && $lrm_mode eq 'active') { # unfreeze if active again
542 my $state = ($cd->{state} eq 'started') ?
'started' : 'request_stop';
543 $change_service_state->($self, $sid, $state);
546 } elsif ($last_state eq 'error') {
548 $self->next_state_error($sid, $cd, $sd, $lrm_res);
552 die "unknown service state '$last_state'";
555 my $lrm_mode = $sd->{node
} ?
$lrm_modes->{$sd->{node
}} : undef;
556 if ($lrm_mode && $lrm_mode eq 'restart') {
557 my $state = $sd->{state};
558 if ($state eq 'started' || $state eq 'stopped'|| $state eq 'request_stop') {
559 $change_service_state->($self, $sid, 'freeze');
563 $repeat = 1 if $sd->{state} ne $last_state;
567 my $fenced_nodes = {};
568 foreach my $sid (sort keys %$ss) {
569 my ($service_state, $service_node) = $ss->{$sid}->@{'state', 'node'};
570 next if $service_state ne 'fence';
572 if (!defined($fenced_nodes->{$service_node})) {
573 $fenced_nodes->{$service_node} = $ns->fence_node($service_node) || 0;
576 next if !$fenced_nodes->{$service_node};
578 # node fence was successful - recover service
579 $change_service_state->($self, $sid, 'recovery');
580 $repeat = 1; # for faster recovery execution
583 # Avoid that a node without services in 'fence' state (e.g., removed
584 # manually by admin) is stuck with the 'fence' node state.
585 for my $node (sort grep { !defined($fenced_nodes->{$_}) } keys $ns->{status
}->%*) {
586 next if $ns->get_node_state($node) ne 'fence';
588 $haenv->log('notice', "node '$node' in fence state but no services to-fence! admin interference?!");
589 $repeat = 1 if $ns->fence_node($node);
595 $self->flush_master_status();
598 # functions to compute next service states
599 # $cd: service configuration data (read only)
600 # $sd: service status data (read only)
602 # Note: use change_service_state() to alter state
605 sub next_state_request_stop
{
606 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
608 my $haenv = $self->{haenv
};
609 my $ns = $self->{ns
};
611 # check result from LRM daemon
613 my $exit_code = $lrm_res->{exit_code
};
614 if ($exit_code == SUCCESS
) {
615 &$change_service_state($self, $sid, 'stopped');
618 $haenv->log('err', "service '$sid' stop failed (exit code $exit_code)");
619 &$change_service_state($self, $sid, 'error'); # fixme: what state?
624 if ($ns->node_is_offline_delayed($sd->{node
})) {
625 &$change_service_state($self, $sid, 'fence');
630 sub next_state_migrate_relocate
{
631 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
633 my $haenv = $self->{haenv
};
634 my $ns = $self->{ns
};
636 # check result from LRM daemon
638 my $exit_code = $lrm_res->{exit_code
};
639 my $req_state = $cd->{state} eq 'started' ?
'started' : 'request_stop';
640 if ($exit_code == SUCCESS
) {
641 &$change_service_state($self, $sid, $req_state, node
=> $sd->{target
});
643 } elsif ($exit_code == EWRONG_NODE
) {
644 $haenv->log('err', "service '$sid' - migration failed: service" .
645 " registered on wrong node!");
646 &$change_service_state($self, $sid, 'error');
648 $haenv->log('err', "service '$sid' - migration failed (exit code $exit_code)");
649 &$change_service_state($self, $sid, $req_state, node
=> $sd->{node
});
654 if ($ns->node_is_offline_delayed($sd->{node
})) {
655 &$change_service_state($self, $sid, 'fence');
660 sub next_state_stopped
{
661 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
663 my $haenv = $self->{haenv
};
664 my $ns = $self->{ns
};
666 if ($sd->{node
} ne $cd->{node
}) {
667 # this can happen if we fence a node with active migrations
668 # hack: modify $sd (normally this should be considered read-only)
669 $haenv->log('info', "fixup service '$sid' location ($sd->{node} => $cd->{node})");
670 $sd->{node
} = $cd->{node
};
674 my $cmd = shift @{$sd->{cmd
}};
676 if ($cmd eq 'migrate' || $cmd eq 'relocate') {
677 my $target = shift @{$sd->{cmd
}};
678 if (!$ns->node_is_online($target)) {
679 $haenv->log('err', "ignore service '$sid' $cmd request - node '$target' not online");
680 } elsif ($sd->{node
} eq $target) {
681 $haenv->log('info', "ignore service '$sid' $cmd request - service already on node '$target'");
683 &$change_service_state($self, $sid, $cmd, node
=> $sd->{node
}, target
=> $target);
686 } elsif ($cmd eq 'stop') {
687 $haenv->log('info', "ignore service '$sid' $cmd request - service already stopped");
689 $haenv->log('err', "unknown command '$cmd' for service '$sid'");
694 if ($cd->{state} eq 'disabled') {
695 # NOTE: do nothing here, the stop state is an exception as we do not
696 # process the LRM result here, thus the LRM always tries to stop the
697 # service (protection for the case no CRM is active)
701 if ($ns->node_is_offline_delayed($sd->{node
}) && $ns->get_node_state($sd->{node
}) ne 'maintenance') {
702 &$change_service_state($self, $sid, 'fence');
706 if ($cd->{state} eq 'stopped') {
707 # almost the same as 'disabled' state but the service will also get recovered
711 if ($cd->{state} eq 'started') {
712 # simply mark it started, if it's on the wrong node next_state_started will fix that for us
713 $change_service_state->($self, $sid, 'request_start', node
=> $sd->{node
});
717 $haenv->log('err', "service '$sid' - unknown state '$cd->{state}' in service configuration");
720 sub next_state_request_start
{
721 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
723 my $haenv = $self->{haenv
};
724 my $current_node = $sd->{node
};
726 if ($self->{crs
}->{rebalance_on_request_start
}) {
727 my $selected_node = select_service_node
(
729 $self->{online_node_usage
},
735 $sd->{maintenance_node
},
738 my $select_text = $selected_node ne $current_node ?
'new' : 'current';
739 $haenv->log('info', "service $sid: re-balance selected $select_text node $selected_node for startup");
741 if ($selected_node ne $current_node) {
742 $change_service_state->($self, $sid, 'request_start_balance', node
=> $current_node, target
=> $selected_node);
747 $change_service_state->($self, $sid, 'started', node
=> $current_node);
750 sub record_service_failed_on_node
{
751 my ($self, $sid, $node) = @_;
753 if (!defined($self->{ss
}->{$sid}->{failed_nodes
})) {
754 $self->{ss
}->{$sid}->{failed_nodes
} = [];
757 push @{$self->{ss
}->{$sid}->{failed_nodes
}}, $node;
760 sub next_state_started
{
761 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
763 my $haenv = $self->{haenv
};
764 my $master_status = $self->{ms
};
765 my $ns = $self->{ns
};
767 if (!$ns->node_is_online($sd->{node
})) {
768 if ($ns->node_is_offline_delayed($sd->{node
})) {
769 &$change_service_state($self, $sid, 'fence');
771 if ($ns->get_node_state($sd->{node
}) ne 'maintenance') {
774 # save current node as fallback for when it comes out of maintenance
775 $sd->{maintenance_node
} = $sd->{node
};
779 if ($cd->{state} eq 'disabled' || $cd->{state} eq 'stopped') {
780 &$change_service_state($self, $sid, 'request_stop');
784 if ($cd->{state} eq 'started') {
787 my $cmd = shift @{$sd->{cmd
}};
789 if ($cmd eq 'migrate' || $cmd eq 'relocate') {
790 my $target = shift @{$sd->{cmd
}};
791 if (!$ns->node_is_online($target)) {
792 $haenv->log('err', "ignore service '$sid' $cmd request - node '$target' not online");
793 } elsif ($sd->{node
} eq $target) {
794 $haenv->log('info', "ignore service '$sid' $cmd request - service already on node '$target'");
796 $haenv->log('info', "$cmd service '$sid' to node '$target'");
797 &$change_service_state($self, $sid, $cmd, node
=> $sd->{node
}, target
=> $target);
799 } elsif ($cmd eq 'stop') {
800 my $timeout = shift @{$sd->{cmd
}};
802 $haenv->log('info', "request immediate service hard-stop for service '$sid'");
804 $haenv->log('info', "request graceful stop with timeout '$timeout' for service '$sid'");
806 &$change_service_state($self, $sid, 'request_stop', timeout
=> $timeout);
807 $haenv->update_service_config($sid, {'state' => 'stopped'});
809 $haenv->log('err', "unknown command '$cmd' for service '$sid'");
820 my $ec = $lrm_res->{exit_code
};
821 if ($ec == SUCCESS
) {
823 if (defined($sd->{failed_nodes
})) {
824 $haenv->log('info', "relocation policy successful for '$sid' on node '$sd->{node}'," .
825 " failed nodes: " . join(', ', @{$sd->{failed_nodes
}}) );
828 delete $sd->{failed_nodes
};
830 # store flag to indicate successful start - only valid while state == 'started'
833 } elsif ($ec == ERROR
|| $ec == EWRONG_NODE
) {
835 delete $sd->{running
};
837 # apply our relocate policy if we got ERROR from the LRM
838 $self->record_service_failed_on_node($sid, $sd->{node
});
840 if (scalar(@{$sd->{failed_nodes
}}) <= $cd->{max_relocate
}) {
842 # tell select_service_node to relocate if possible
845 $haenv->log('warning', "starting service $sid on node".
846 " '$sd->{node}' failed, relocating service.");
850 $haenv->log('err', "recovery policy for service $sid " .
851 "failed, entering error state. Failed nodes: ".
852 join(', ', @{$sd->{failed_nodes
}}));
853 &$change_service_state($self, $sid, 'error');
858 $self->record_service_failed_on_node($sid, $sd->{node
});
860 $haenv->log('err', "service '$sid' got unrecoverable error (exit code $ec))");
861 # we have no save way out (yet) for other errors
862 &$change_service_state($self, $sid, 'error');
867 my $node = select_service_node
(
869 $self->{online_node_usage
},
875 $sd->{maintenance_node
},
878 if ($node && ($sd->{node
} ne $node)) {
879 $self->{online_node_usage
}->add_service_usage_to_node($node, $sid, $sd->{node
});
881 if (defined(my $fallback = $sd->{maintenance_node
})) {
882 if ($node eq $fallback) {
885 "moving service '$sid' back to '$fallback', node came back from maintenance.",
887 delete $sd->{maintenance_node
};
888 } elsif ($sd->{node
} ne $fallback) {
889 $haenv->log('info', "dropping maintenance fallback node '$fallback' for '$sid'");
890 delete $sd->{maintenance_node
};
894 if ($cd->{type
} eq 'vm') {
895 $haenv->log('info', "migrate service '$sid' to node '$node' (running)");
896 &$change_service_state($self, $sid, 'migrate', node
=> $sd->{node
}, target
=> $node);
898 $haenv->log('info', "relocate service '$sid' to node '$node'");
899 &$change_service_state($self, $sid, 'relocate', node
=> $sd->{node
}, target
=> $node);
902 if ($try_next && !defined($node)) {
905 "Start Error Recovery: Tried all available nodes for service '$sid', retry"
906 ." start on current node. Tried nodes: " . join(', ', @{$sd->{failed_nodes
}},
910 # ensure service get started again if it went unexpected down
911 # but ensure also no LRM result gets lost
912 $sd->{uid
} = compute_new_uuid
($sd->{state}) if defined($lrm_res);
919 $haenv->log('err', "service '$sid' - unknown state '$cd->{state}' in service configuration");
922 sub next_state_error
{
923 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
925 my $ns = $self->{ns
};
926 my $ms = $self->{ms
};
928 if ($cd->{state} eq 'disabled') {
929 # clean up on error recovery
930 delete $sd->{failed_nodes
};
932 &$change_service_state($self, $sid, 'stopped');
938 # after a node was fenced this recovers the service to a new node
939 sub next_state_recovery
{
940 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
942 my ($haenv, $ss) = ($self->{haenv
}, $self->{ss
});
943 my $ns = $self->{ns
};
944 my $ms = $self->{ms
};
946 if ($sd->{state} ne 'recovery') { # should not happen
947 $haenv->log('err', "cannot recover service '$sid' from fencing, wrong state '$sd->{state}'");
951 my $fenced_node = $sd->{node
}; # for logging purpose
953 $self->recompute_online_node_usage(); # we want the most current node state
955 my $recovery_node = select_service_node
(
957 $self->{online_node_usage
},
963 if ($recovery_node) {
964 my $msg = "recover service '$sid' from fenced node '$fenced_node' to node '$recovery_node'";
965 if ($recovery_node eq $fenced_node) {
966 # can happen if restriced groups and the node came up again OK
967 $msg = "recover service '$sid' to previous failed and fenced node '$fenced_node' again";
969 $haenv->log('info', "$msg");
971 $fence_recovery_cleanup->($self, $sid, $fenced_node);
973 $haenv->steal_service($sid, $sd->{node
}, $recovery_node);
974 $self->{online_node_usage
}->add_service_usage_to_node($recovery_node, $sid, $recovery_node);
976 # NOTE: $sd *is normally read-only*, fencing is the exception
977 $cd->{node
} = $sd->{node
} = $recovery_node;
978 my $new_state = ($cd->{state} eq 'started') ?
'started' : 'request_stop';
979 $change_service_state->($self, $sid, $new_state, node
=> $recovery_node);
981 # no possible node found, cannot recover - but retry later, as we always try to make it available
982 $haenv->log('err', "recovering service '$sid' from fenced node '$fenced_node' failed, no recovery node found");
984 if ($cd->{state} eq 'disabled') {
985 # allow getting a service out of recovery manually if an admin disables it.
986 delete $sd->{failed_nodes
}; # clean up on recovery to stopped
987 $change_service_state->($self, $sid, 'stopped'); # must NOT go through request_stop