use strict;
use warnings;
+
use Digest::MD5 qw(md5_base64);
use PVE::Tools;
$sd->{uid} = compute_new_uuid($new_state);
$text_state = " ($text_state)" if $text_state;
- $haenv->log('info', "service '$sid': state changed from '${old_state}'" .
- " to '${new_state}'$text_state");
+ $haenv->log('info', "service '$sid': state changed from '${old_state}' to '${new_state}'$text_state");
};
# clean up a possible bad state from a recovered service to allow its start
if ($ns->get_node_state($sd->{node}) ne 'maintenance') {
return;
} else {
- # save current node as fallback for when it comes out of
- # maintenance
+ # save current node as fallback for when it comes out of maintenance
$sd->{maintenance_node} = $sd->{node};
}
}
} else {
$self->record_service_failed_on_node($sid, $sd->{node});
- $haenv->log('err', "service '$sid' got unrecoverable error" .
- " (exit code $ec))");
+ $haenv->log('err', "service '$sid' got unrecoverable error (exit code $ec))");
# we have no save way out (yet) for other errors
&$change_service_state($self, $sid, 'error');
return;
if (defined(my $fallback = $sd->{maintenance_node})) {
if ($node eq $fallback) {
- $haenv->log('info', "moving service '$sid' back to '$fallback', node came back from maintenance.");
+ $haenv->log(
+ 'info',
+ "moving service '$sid' back to '$fallback', node came back from maintenance.",
+ );
delete $sd->{maintenance_node};
} elsif ($sd->{node} ne $fallback) {
$haenv->log('info', "dropping maintenance fallback node '$fallback' for '$sid'");
}
} else {
if ($try_next && !defined($node)) {
- $haenv->log('warning', "Start Error Recovery: Tried all available " .
- " nodes for service '$sid', retry start on current node. " .
- "Tried nodes: " . join(', ', @{$sd->{failed_nodes}}));
+ $haenv->log(
+ 'warning',
+ "Start Error Recovery: Tried all available nodes for service '$sid', retry"
+ ." start on current node. Tried nodes: " . join(', ', @{$sd->{failed_nodes}},
+ )
+ );
}
# ensure service get started again if it went unexpected down
# but ensure also no LRM result gets lost
warn 201 node1/lrm: unable to start service fa:130
err 201 node1/lrm: unable to start service fa:130 on local node after 0 retries
warn 220 node1/crm: starting service fa:130 on node 'node1' failed, relocating service.
-warn 220 node1/crm: Start Error Recovery: Tried all available nodes for service 'fa:130', retry start on current node. Tried nodes: node3, node2, node1
+warn 220 node1/crm: Start Error Recovery: Tried all available nodes for service 'fa:130', retry start on current node. Tried nodes: node3, node2, node1
info 221 node1/lrm: starting service fa:130
info 221 node1/lrm: service status fa:130 started
info 240 node1/crm: relocation policy successful for 'fa:130' on node 'node1', failed nodes: node3, node2, node1
warn 205 node3/lrm: unable to start service fa:130
err 205 node3/lrm: unable to start service fa:130 on local node after 0 retries
warn 220 node1/crm: starting service fa:130 on node 'node3' failed, relocating service.
-warn 220 node1/crm: Start Error Recovery: Tried all available nodes for service 'fa:130', retry start on current node. Tried nodes: node2, node1, node3
+warn 220 node1/crm: Start Error Recovery: Tried all available nodes for service 'fa:130', retry start on current node. Tried nodes: node2, node1, node3
info 225 node3/lrm: starting service fa:130
info 225 node3/lrm: service status fa:130 started
info 240 node1/crm: relocation policy successful for 'fa:130' on node 'node3', failed nodes: node2, node1, node3