]> git.proxmox.com Git - pve-ha-manager.git/blob - src/PVE/HA/Manager.pm
service data: only set failed_nodes key if needed
[pve-ha-manager.git] / src / PVE / HA / Manager.pm
1 package PVE::HA::Manager;
2
3 use strict;
4 use warnings;
5 use Digest::MD5 qw(md5_base64);
6
7 use PVE::Tools;
8 use PVE::HA::Tools ':exit_codes';
9 use PVE::HA::NodeStatus;
10
11 sub new {
12 my ($this, $haenv) = @_;
13
14 my $class = ref($this) || $this;
15
16 my $self = bless { haenv => $haenv }, $class;
17
18 my $old_ms = $haenv->read_manager_status();
19
20 # we only copy the state part of the manager which cannot be auto generated
21
22 $self->{ns} = PVE::HA::NodeStatus->new($haenv, $old_ms->{node_status} || {});
23
24 # fixme: use separate class PVE::HA::ServiceStatus
25 $self->{ss} = $old_ms->{service_status} || {};
26
27 $self->{ms} = { master_node => $haenv->nodename() };
28
29 return $self;
30 }
31
32 sub cleanup {
33 my ($self) = @_;
34
35 # todo: ?
36 }
37
38 sub flush_master_status {
39 my ($self) = @_;
40
41 my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss});
42
43 $ms->{node_status} = $ns->{status};
44 $ms->{service_status} = $ss;
45 $ms->{timestamp} = $haenv->get_time();
46
47 $haenv->write_manager_status($ms);
48 }
49
50 sub get_service_group {
51 my ($groups, $online_node_usage, $service_conf) = @_;
52
53 my $group = {};
54 # add all online nodes to default group to allow try_next when no group set
55 foreach my $node (keys %$online_node_usage) {
56 $group->{nodes}->{$node} = 1;
57 }
58
59 # overwrite default if service is bound to a specific group
60 $group = $groups->{ids}->{$service_conf->{group}} if $service_conf->{group} &&
61 $groups->{ids}->{$service_conf->{group}};
62
63 return $group;
64 }
65
66 # groups available nodes with their priority as group index
67 sub get_node_priority_groups {
68 my ($group, $online_node_usage) = @_;
69
70 my $pri_groups = {};
71 my $group_members = {};
72 foreach my $entry (keys %{$group->{nodes}}) {
73 my ($node, $pri) = ($entry, 0);
74 if ($entry =~ m/^(\S+):(\d+)$/) {
75 ($node, $pri) = ($1, $2);
76 }
77 next if !defined($online_node_usage->{$node}); # offline
78 $pri_groups->{$pri}->{$node} = 1;
79 $group_members->{$node} = $pri;
80 }
81
82 # add non-group members to unrestricted groups (priority -1)
83 if (!$group->{restricted}) {
84 my $pri = -1;
85 foreach my $node (keys %$online_node_usage) {
86 next if defined($group_members->{$node});
87 $pri_groups->{$pri}->{$node} = 1;
88 $group_members->{$node} = -1;
89 }
90 }
91
92 return ($pri_groups, $group_members);
93 }
94
95 sub select_service_node {
96 my ($groups, $online_node_usage, $service_conf, $current_node, $try_next, $tried_nodes) = @_;
97
98 my $group = get_service_group($groups, $online_node_usage, $service_conf);
99
100 my ($pri_groups, $group_members) = get_node_priority_groups($group, $online_node_usage);
101
102 my @pri_list = sort {$b <=> $a} keys %$pri_groups;
103 return undef if !scalar(@pri_list);
104
105 # stay on current node if possible (avoids random migrations)
106 if (!$try_next && $group->{nofailback} && defined($group_members->{$current_node})) {
107 return $current_node;
108 }
109
110 # select node from top priority node list
111
112 my $top_pri = $pri_list[0];
113
114 # try to avoid nodes where the service failed already if we want to relocate
115 if ($try_next) {
116 foreach my $node (@$tried_nodes) {
117 delete $pri_groups->{$top_pri}->{$node};
118 }
119 }
120
121 my @nodes = sort {
122 $online_node_usage->{$a} <=> $online_node_usage->{$b} || $a cmp $b
123 } keys %{$pri_groups->{$top_pri}};
124
125 my $found;
126 for (my $i = scalar(@nodes) - 1; $i >= 0; $i--) {
127 my $node = $nodes[$i];
128 if ($node eq $current_node) {
129 $found = $i;
130 last;
131 }
132 }
133
134 if ($try_next) {
135
136 if (defined($found) && ($found < (scalar(@nodes) - 1))) {
137 return $nodes[$found + 1];
138 } else {
139 return $nodes[0];
140 }
141
142 } else {
143
144 return $nodes[$found] if defined($found);
145
146 return $nodes[0];
147
148 }
149 }
150
151 my $uid_counter = 0;
152
153 sub compute_new_uuid {
154 my ($state) = @_;
155
156 $uid_counter++;
157 return md5_base64($state . $$ . time() . $uid_counter);
158 }
159
160 my $valid_service_states = {
161 stopped => 1,
162 request_stop => 1,
163 started => 1,
164 fence => 1,
165 migrate => 1,
166 relocate => 1,
167 freeze => 1,
168 error => 1,
169 };
170
171 sub recompute_online_node_usage {
172 my ($self) = @_;
173
174 my $online_node_usage = {};
175
176 my $online_nodes = $self->{ns}->list_online_nodes();
177
178 foreach my $node (@$online_nodes) {
179 $online_node_usage->{$node} = 0;
180 }
181
182 foreach my $sid (keys %{$self->{ss}}) {
183 my $sd = $self->{ss}->{$sid};
184 my $state = $sd->{state};
185 if (defined($online_node_usage->{$sd->{node}})) {
186 if (($state eq 'started') || ($state eq 'request_stop') ||
187 ($state eq 'fence') || ($state eq 'freeze') || ($state eq 'error')) {
188 $online_node_usage->{$sd->{node}}++;
189 } elsif (($state eq 'migrate') || ($state eq 'relocate')) {
190 $online_node_usage->{$sd->{target}}++;
191 } elsif ($state eq 'stopped') {
192 # do nothing
193 } else {
194 die "should not be reached";
195 }
196 }
197 }
198
199 $self->{online_node_usage} = $online_node_usage;
200 }
201
202 my $change_service_state = sub {
203 my ($self, $sid, $new_state, %params) = @_;
204
205 my ($haenv, $ss) = ($self->{haenv}, $self->{ss});
206
207 my $sd = $ss->{$sid} || die "no such service '$sid";
208
209 my $old_state = $sd->{state};
210 my $old_node = $sd->{node};
211 my $old_failed_nodes = $sd->{failed_nodes};
212
213 die "no state change" if $old_state eq $new_state; # just to be sure
214
215 die "invalid CRM service state '$new_state'\n" if !$valid_service_states->{$new_state};
216
217 foreach my $k (keys %$sd) { delete $sd->{$k}; };
218
219 $sd->{state} = $new_state;
220 $sd->{node} = $old_node;
221 $sd->{failed_nodes} = $old_failed_nodes if defined($old_failed_nodes);
222
223 my $text_state = '';
224 foreach my $k (sort keys %params) {
225 my $v = $params{$k};
226 $text_state .= ", " if $text_state;
227 $text_state .= "$k = $v";
228 $sd->{$k} = $v;
229 }
230
231 $self->recompute_online_node_usage();
232
233 $sd->{uid} = compute_new_uuid($new_state);
234
235 $text_state = " ($text_state)" if $text_state;
236 $haenv->log('info', "service '$sid': state changed from '${old_state}'" .
237 " to '${new_state}'$text_state");
238 };
239
240 # clean up a possible bad state from a recovered service to allow its start
241 my $fence_recovery_cleanup = sub {
242 my ($self, $sid, $fenced_node) = @_;
243
244 my $haenv = $self->{haenv};
245
246 my (undef, $type, $id) = $haenv->parse_sid($sid);
247 my $plugin = PVE::HA::Resources->lookup($type);
248
249 # should not happen
250 die "unknown resource type '$type'" if !$plugin;
251
252 # locks may block recovery, cleanup those which are safe to remove after fencing
253 my $removable_locks = ['backup', 'mounted'];
254 if (my $removed_lock = $plugin->remove_locks($haenv, $id, $removable_locks, $fenced_node)) {
255 $haenv->log('warning', "removed leftover lock '$removed_lock' from recovered " .
256 "service '$sid' to allow its start.");
257 }
258 };
259
260 # after a node was fenced this recovers the service to a new node
261 my $recover_fenced_service = sub {
262 my ($self, $sid, $cd) = @_;
263
264 my ($haenv, $ss) = ($self->{haenv}, $self->{ss});
265
266 my $sd = $ss->{$sid};
267
268 if ($sd->{state} ne 'fence') { # should not happen
269 $haenv->log('err', "cannot recover service '$sid' from fencing," .
270 " wrong state '$sd->{state}'");
271 return;
272 }
273
274 my $fenced_node = $sd->{node}; # for logging purpose
275
276 $self->recompute_online_node_usage(); # we want the most current node state
277
278 my $recovery_node = select_service_node($self->{groups},
279 $self->{online_node_usage},
280 $cd, $sd->{node});
281
282 if ($recovery_node) {
283 $haenv->log('info', "recover service '$sid' from fenced node " .
284 "'$fenced_node' to node '$recovery_node'");
285
286 &$fence_recovery_cleanup($self, $sid, $fenced_node);
287
288 $haenv->steal_service($sid, $sd->{node}, $recovery_node);
289
290 # $sd *is normally read-only*, fencing is the exception
291 $cd->{node} = $sd->{node} = $recovery_node;
292 my $new_state = ($cd->{state} eq 'started') ? 'started' : 'request_stop';
293 &$change_service_state($self, $sid, $new_state, node => $recovery_node);
294 } else {
295 # no possible node found, cannot recover
296 $haenv->log('err', "recovering service '$sid' from fenced node " .
297 "'$fenced_node' failed, no recovery node found");
298 &$change_service_state($self, $sid, 'error');
299 }
300 };
301
302 # read LRM status for all nodes
303 sub read_lrm_status {
304 my ($self) = @_;
305
306 my $nodes = $self->{ns}->list_nodes();
307 my $haenv = $self->{haenv};
308
309 my $results = {};
310 my $modes = {};
311 foreach my $node (@$nodes) {
312 my $lrm_status = $haenv->read_lrm_status($node);
313 $modes->{$node} = $lrm_status->{mode} || 'active';
314 foreach my $uid (keys %{$lrm_status->{results}}) {
315 next if $results->{$uid}; # should not happen
316 $results->{$uid} = $lrm_status->{results}->{$uid};
317 }
318 }
319
320
321 return ($results, $modes);
322 }
323
324 # read new crm commands and save them into crm master status
325 sub update_crm_commands {
326 my ($self) = @_;
327
328 my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss});
329
330 my $cmdlist = $haenv->read_crm_commands();
331
332 foreach my $cmd (split(/\n/, $cmdlist)) {
333 chomp $cmd;
334
335 if ($cmd =~ m/^(migrate|relocate)\s+(\S+)\s+(\S+)$/) {
336 my ($task, $sid, $node) = ($1, $2, $3);
337 if (my $sd = $ss->{$sid}) {
338 if (!$ns->node_is_online($node)) {
339 $haenv->log('err', "crm command error - node not online: $cmd");
340 } else {
341 if ($node eq $sd->{node}) {
342 $haenv->log('info', "ignore crm command - service already on target node: $cmd");
343 } else {
344 $haenv->log('info', "got crm command: $cmd");
345 $ss->{$sid}->{cmd} = [ $task, $node];
346 }
347 }
348 } else {
349 $haenv->log('err', "crm command error - no such service: $cmd");
350 }
351
352 } else {
353 $haenv->log('err', "unable to parse crm command: $cmd");
354 }
355 }
356
357 }
358
359 sub manage {
360 my ($self) = @_;
361
362 my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss});
363
364 $ns->update($haenv->get_node_info());
365
366 if (!$ns->node_is_online($haenv->nodename())) {
367 $haenv->log('info', "master seems offline");
368 return;
369 }
370
371 my ($lrm_results, $lrm_modes) = $self->read_lrm_status();
372
373 my $sc = $haenv->read_service_config();
374
375 $self->{groups} = $haenv->read_group_config(); # update
376
377 # compute new service status
378
379 # add new service
380 foreach my $sid (sort keys %$sc) {
381 next if $ss->{$sid}; # already there
382 my $cd = $sc->{$sid};
383 next if $cd->{state} eq 'ignored';
384
385 $haenv->log('info', "adding new service '$sid' on node '$cd->{node}'");
386 # assume we are running to avoid relocate running service at add
387 my $state = ($cd->{state} eq 'started') ? 'started' : 'request_stop';
388 $ss->{$sid} = { state => $state, node => $cd->{node},
389 uid => compute_new_uuid('started') };
390 }
391
392 # remove stale or ignored services from manager state
393 foreach my $sid (keys %$ss) {
394 next if $sc->{$sid} && $sc->{$sid}->{state} ne 'ignored';
395
396 my $reason = defined($sc->{$sid}) ? 'ignored state requested' : 'no config';
397 $haenv->log('info', "removing stale service '$sid' ($reason)");
398
399 # remove all service related state information
400 delete $ss->{$sid};
401 }
402
403 $self->update_crm_commands();
404
405 for (;;) {
406 my $repeat = 0;
407
408 $self->recompute_online_node_usage();
409
410 foreach my $sid (sort keys %$ss) {
411 my $sd = $ss->{$sid};
412 my $cd = $sc->{$sid} || { state => 'disabled' };
413
414 my $lrm_res = $sd->{uid} ? $lrm_results->{$sd->{uid}} : undef;
415
416 my $last_state = $sd->{state};
417
418 if ($last_state eq 'stopped') {
419
420 $self->next_state_stopped($sid, $cd, $sd, $lrm_res);
421
422 } elsif ($last_state eq 'started') {
423
424 $self->next_state_started($sid, $cd, $sd, $lrm_res);
425
426 } elsif ($last_state eq 'migrate' || $last_state eq 'relocate') {
427
428 $self->next_state_migrate_relocate($sid, $cd, $sd, $lrm_res);
429
430 } elsif ($last_state eq 'fence') {
431
432 # do nothing here - wait until fenced
433
434 } elsif ($last_state eq 'request_stop') {
435
436 $self->next_state_request_stop($sid, $cd, $sd, $lrm_res);
437
438 } elsif ($last_state eq 'freeze') {
439
440 my $lrm_mode = $sd->{node} ? $lrm_modes->{$sd->{node}} : undef;
441 # unfreeze
442 my $state = ($cd->{state} eq 'started') ? 'started' : 'request_stop';
443 &$change_service_state($self, $sid, $state)
444 if $lrm_mode && $lrm_mode eq 'active';
445
446 } elsif ($last_state eq 'error') {
447
448 $self->next_state_error($sid, $cd, $sd, $lrm_res);
449
450 } else {
451
452 die "unknown service state '$last_state'";
453 }
454
455 my $lrm_mode = $sd->{node} ? $lrm_modes->{$sd->{node}} : undef;
456 if ($lrm_mode && $lrm_mode eq 'restart') {
457 if (($sd->{state} eq 'started' || $sd->{state} eq 'stopped' ||
458 $sd->{state} eq 'request_stop')) {
459 &$change_service_state($self, $sid, 'freeze');
460 }
461 }
462
463 $repeat = 1 if $sd->{state} ne $last_state;
464 }
465
466 # handle fencing
467 my $fenced_nodes = {};
468 foreach my $sid (sort keys %$ss) {
469 my $sd = $ss->{$sid};
470 next if $sd->{state} ne 'fence';
471
472 if (!defined($fenced_nodes->{$sd->{node}})) {
473 $fenced_nodes->{$sd->{node}} = $ns->fence_node($sd->{node}) || 0;
474 }
475
476 next if !$fenced_nodes->{$sd->{node}};
477
478 # node fence was successful - recover service
479 &$recover_fenced_service($self, $sid, $sc->{$sid});
480 }
481
482 last if !$repeat;
483 }
484
485 $self->flush_master_status();
486 }
487
488 # functions to compute next service states
489 # $cd: service configuration data (read only)
490 # $sd: service status data (read only)
491 #
492 # Note: use change_service_state() to alter state
493 #
494
495 sub next_state_request_stop {
496 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
497
498 my $haenv = $self->{haenv};
499 my $ns = $self->{ns};
500
501 # check result from LRM daemon
502 if ($lrm_res) {
503 my $exit_code = $lrm_res->{exit_code};
504 if ($exit_code == SUCCESS) {
505 &$change_service_state($self, $sid, 'stopped');
506 return;
507 } else {
508 $haenv->log('err', "service '$sid' stop failed (exit code $exit_code)");
509 &$change_service_state($self, $sid, 'error'); # fixme: what state?
510 return;
511 }
512 }
513
514 if ($ns->node_is_offline_delayed($sd->{node})) {
515 &$change_service_state($self, $sid, 'fence');
516 return;
517 }
518 }
519
520 sub next_state_migrate_relocate {
521 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
522
523 my $haenv = $self->{haenv};
524 my $ns = $self->{ns};
525
526 # check result from LRM daemon
527 if ($lrm_res) {
528 my $exit_code = $lrm_res->{exit_code};
529 my $req_state = $cd->{state} eq 'started' ? 'started' : 'request_stop';
530 if ($exit_code == SUCCESS) {
531 &$change_service_state($self, $sid, $req_state, node => $sd->{target});
532 return;
533 } elsif ($exit_code == EWRONG_NODE) {
534 $haenv->log('err', "service '$sid' - migration failed: service" .
535 " registered on wrong node!");
536 &$change_service_state($self, $sid, 'error');
537 } else {
538 $haenv->log('err', "service '$sid' - migration failed (exit code $exit_code)");
539 &$change_service_state($self, $sid, $req_state, node => $sd->{node});
540 return;
541 }
542 }
543
544 if ($ns->node_is_offline_delayed($sd->{node})) {
545 &$change_service_state($self, $sid, 'fence');
546 return;
547 }
548 }
549
550
551 sub next_state_stopped {
552 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
553
554 my $haenv = $self->{haenv};
555 my $ns = $self->{ns};
556
557 if ($sd->{node} ne $cd->{node}) {
558 # this can happen if we fence a node with active migrations
559 # hack: modify $sd (normally this should be considered read-only)
560 $haenv->log('info', "fixup service '$sid' location ($sd->{node} => $cd->{node})");
561 $sd->{node} = $cd->{node};
562 }
563
564 if ($sd->{cmd}) {
565 my ($cmd, $target) = @{$sd->{cmd}};
566 delete $sd->{cmd};
567
568 if ($cmd eq 'migrate' || $cmd eq 'relocate') {
569 if (!$ns->node_is_online($target)) {
570 $haenv->log('err', "ignore service '$sid' $cmd request - node '$target' not online");
571 } elsif ($sd->{node} eq $target) {
572 $haenv->log('info', "ignore service '$sid' $cmd request - service already on node '$target'");
573 } else {
574 &$change_service_state($self, $sid, $cmd, node => $sd->{node},
575 target => $target);
576 return;
577 }
578 } else {
579 $haenv->log('err', "unknown command '$cmd' for service '$sid'");
580 }
581 }
582
583 if ($cd->{state} eq 'disabled') {
584 # NOTE: do nothing here, the stop state is an exception as we do not
585 # process the LRM result here, thus the LRM always tries to stop the
586 # service (protection for the case no CRM is active)
587 return;
588 }
589
590 if ($ns->node_is_offline_delayed($sd->{node})) {
591 &$change_service_state($self, $sid, 'fence');
592 return;
593 }
594
595 if ($cd->{state} eq 'stopped') {
596 # almost the same as 'disabled' state but the service will also get recovered
597 return;
598 }
599
600 if ($cd->{state} eq 'started') {
601 # simply mark it started, if it's on the wrong node
602 # next_state_started will fix that for us
603 &$change_service_state($self, $sid, 'started', node => $sd->{node});
604 return;
605 }
606
607 $haenv->log('err', "service '$sid' - unknown state '$cd->{state}' in service configuration");
608 }
609
610 sub record_service_failed_on_node {
611 my ($self, $sid, $node) = @_;
612
613 if (!defined($self->{ss}->{$sid}->{failed_nodes})) {
614 $self->{ss}->{$sid}->{failed_nodes} = [];
615 }
616
617 push @{$self->{ss}->{$sid}->{failed_nodes}}, $node;
618 }
619
620 sub next_state_started {
621 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
622
623 my $haenv = $self->{haenv};
624 my $master_status = $self->{ms};
625 my $ns = $self->{ns};
626
627 if (!$ns->node_is_online($sd->{node})) {
628 if ($ns->node_is_offline_delayed($sd->{node})) {
629 &$change_service_state($self, $sid, 'fence');
630 }
631 return;
632 }
633
634 if ($cd->{state} eq 'disabled' || $cd->{state} eq 'stopped') {
635 &$change_service_state($self, $sid, 'request_stop');
636 return;
637 }
638
639 if ($cd->{state} eq 'started') {
640
641 if ($sd->{cmd}) {
642 my ($cmd, $target) = @{$sd->{cmd}};
643 delete $sd->{cmd};
644
645 if ($cmd eq 'migrate' || $cmd eq 'relocate') {
646 if (!$ns->node_is_online($target)) {
647 $haenv->log('err', "ignore service '$sid' $cmd request - node '$target' not online");
648 } elsif ($sd->{node} eq $target) {
649 $haenv->log('info', "ignore service '$sid' $cmd request - service already on node '$target'");
650 } else {
651 $haenv->log('info', "$cmd service '$sid' to node '$target'");
652 &$change_service_state($self, $sid, $cmd, node => $sd->{node}, target => $target);
653 }
654 } else {
655 $haenv->log('err', "unknown command '$cmd' for service '$sid'");
656 }
657 } else {
658
659 my $try_next = 0;
660
661 if ($lrm_res) {
662
663 my $ec = $lrm_res->{exit_code};
664 if ($ec == SUCCESS) {
665
666 if (defined($sd->{failed_nodes})) {
667 $haenv->log('info', "relocation policy successful for '$sid' on node '$sd->{node}'," .
668 " failed nodes: " . join(', ', @{$sd->{failed_nodes}}) );
669 }
670
671 delete $sd->{failed_nodes};
672
673 # store flag to indicate successful start - only valid while state == 'started'
674 $sd->{running} = 1;
675
676 } elsif ($ec == ERROR) {
677
678 delete $sd->{running};
679
680 # apply our relocate policy if we got ERROR from the LRM
681 $self->record_service_failed_on_node($sid, $sd->{node});
682
683 if (scalar(@{$sd->{failed_nodes}}) <= $cd->{max_relocate}) {
684
685 # tell select_service_node to relocate if possible
686 $try_next = 1;
687
688 $haenv->log('warning', "starting service $sid on node".
689 " '$sd->{node}' failed, relocating service.");
690
691 } else {
692
693 $haenv->log('err', "recovery policy for service $sid " .
694 "failed, entering error state. Failed nodes: ".
695 join(', ', @{$sd->{failed_nodes}}));
696 &$change_service_state($self, $sid, 'error');
697 return;
698
699 }
700 } else {
701 $self->record_service_failed_on_node($sid, $sd->{node});
702
703 $haenv->log('err', "service '$sid' got unrecoverable error" .
704 " (exit code $ec))");
705 # we have no save way out (yet) for other errors
706 &$change_service_state($self, $sid, 'error');
707 return;
708 }
709 }
710
711 my $node = select_service_node($self->{groups}, $self->{online_node_usage},
712 $cd, $sd->{node}, $try_next, $sd->{failed_nodes});
713
714 if ($node && ($sd->{node} ne $node)) {
715 if ($cd->{type} eq 'vm') {
716 $haenv->log('info', "migrate service '$sid' to node '$node' (running)");
717 &$change_service_state($self, $sid, 'migrate', node => $sd->{node}, target => $node);
718 } else {
719 $haenv->log('info', "relocate service '$sid' to node '$node'");
720 &$change_service_state($self, $sid, 'relocate', node => $sd->{node}, target => $node);
721 }
722 } else {
723 if ($try_next && !defined($node)) {
724 $haenv->log('warning', "Start Error Recovery: Tried all available " .
725 " nodes for service '$sid', retry start on current node. " .
726 "Tried nodes: " . join(', ', @{$sd->{failed_nodes}}));
727 }
728 # ensure service get started again if it went unexpected down
729 # but ensure also no LRM result gets lost
730 $sd->{uid} = compute_new_uuid($sd->{state}) if defined($lrm_res);
731 }
732 }
733
734 return;
735 }
736
737 $haenv->log('err', "service '$sid' - unknown state '$cd->{state}' in service configuration");
738 }
739
740 sub next_state_error {
741 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
742
743 my $ns = $self->{ns};
744 my $ms = $self->{ms};
745
746 if ($cd->{state} eq 'disabled') {
747 # clean up on error recovery
748 delete $sd->{failed_nodes};
749
750 &$change_service_state($self, $sid, 'stopped');
751 return;
752 }
753
754 }
755
756 1;