]> git.proxmox.com Git - pve-ha-manager.git/blob - src/PVE/HA/Manager.pm
clean up 'Data::Dumper' usage tree wide
[pve-ha-manager.git] / src / PVE / HA / Manager.pm
1 package PVE::HA::Manager;
2
3 use strict;
4 use warnings;
5 use Digest::MD5 qw(md5_base64);
6
7 use PVE::Tools;
8 use PVE::HA::Tools ':exit_codes';
9 use PVE::HA::NodeStatus;
10
11 sub new {
12 my ($this, $haenv) = @_;
13
14 my $class = ref($this) || $this;
15
16 my $self = bless { haenv => $haenv }, $class;
17
18 my $old_ms = $haenv->read_manager_status();
19
20 # we only copy the state part of the manager which cannot be auto generated
21
22 $self->{ns} = PVE::HA::NodeStatus->new($haenv, $old_ms->{node_status} || {});
23
24 # fixme: use separate class PVE::HA::ServiceStatus
25 $self->{ss} = $old_ms->{service_status} || {};
26
27 $self->{ms} = { master_node => $haenv->nodename() };
28
29 return $self;
30 }
31
32 sub cleanup {
33 my ($self) = @_;
34
35 # todo: ?
36 }
37
38 sub flush_master_status {
39 my ($self) = @_;
40
41 my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss});
42
43 $ms->{node_status} = $ns->{status};
44 $ms->{service_status} = $ss;
45 $ms->{timestamp} = $haenv->get_time();
46
47 $haenv->write_manager_status($ms);
48 }
49
50 sub get_service_group {
51 my ($groups, $online_node_usage, $service_conf) = @_;
52
53 my $group = {};
54 # add all online nodes to default group to allow try_next when no group set
55 foreach my $node (keys %$online_node_usage) {
56 $group->{nodes}->{$node} = 1;
57 }
58
59 # overwrite default if service is bound to a specific group
60 $group = $groups->{ids}->{$service_conf->{group}} if $service_conf->{group} &&
61 $groups->{ids}->{$service_conf->{group}};
62
63 return $group;
64 }
65
66 # groups available nodes with their priority as group index
67 sub get_node_priority_groups {
68 my ($group, $online_node_usage) = @_;
69
70 my $pri_groups = {};
71 my $group_members = {};
72 foreach my $entry (keys %{$group->{nodes}}) {
73 my ($node, $pri) = ($entry, 0);
74 if ($entry =~ m/^(\S+):(\d+)$/) {
75 ($node, $pri) = ($1, $2);
76 }
77 next if !defined($online_node_usage->{$node}); # offline
78 $pri_groups->{$pri}->{$node} = 1;
79 $group_members->{$node} = $pri;
80 }
81
82 # add non-group members to unrestricted groups (priority -1)
83 if (!$group->{restricted}) {
84 my $pri = -1;
85 foreach my $node (keys %$online_node_usage) {
86 next if defined($group_members->{$node});
87 $pri_groups->{$pri}->{$node} = 1;
88 $group_members->{$node} = -1;
89 }
90 }
91
92 return ($pri_groups, $group_members);
93 }
94
95 sub select_service_node {
96 my ($groups, $online_node_usage, $service_conf, $current_node, $try_next, $tried_nodes) = @_;
97
98 my $group = get_service_group($groups, $online_node_usage, $service_conf);
99
100 my ($pri_groups, $group_members) = get_node_priority_groups($group, $online_node_usage);
101
102 my @pri_list = sort {$b <=> $a} keys %$pri_groups;
103 return undef if !scalar(@pri_list);
104
105 # stay on current node if possible (avoids random migrations)
106 if (!$try_next && $group->{nofailback} && defined($group_members->{$current_node})) {
107 return $current_node;
108 }
109
110 # select node from top priority node list
111
112 my $top_pri = $pri_list[0];
113
114 # try to avoid nodes where the service failed already if we want to relocate
115 if ($try_next) {
116 foreach my $node (@$tried_nodes) {
117 delete $pri_groups->{$top_pri}->{$node};
118 }
119 }
120
121 my @nodes = sort {
122 $online_node_usage->{$a} <=> $online_node_usage->{$b} || $a cmp $b
123 } keys %{$pri_groups->{$top_pri}};
124
125 my $found;
126 for (my $i = scalar(@nodes) - 1; $i >= 0; $i--) {
127 my $node = $nodes[$i];
128 if ($node eq $current_node) {
129 $found = $i;
130 last;
131 }
132 }
133
134 if ($try_next) {
135
136 if (defined($found) && ($found < (scalar(@nodes) - 1))) {
137 return $nodes[$found + 1];
138 } else {
139 return $nodes[0];
140 }
141
142 } else {
143
144 return $nodes[$found] if defined($found);
145
146 return $nodes[0];
147
148 }
149 }
150
151 my $uid_counter = 0;
152
153 sub compute_new_uuid {
154 my ($state) = @_;
155
156 $uid_counter++;
157 return md5_base64($state . $$ . time() . $uid_counter);
158 }
159
160 my $valid_service_states = {
161 stopped => 1,
162 request_stop => 1,
163 started => 1,
164 fence => 1,
165 migrate => 1,
166 relocate => 1,
167 freeze => 1,
168 error => 1,
169 };
170
171 sub recompute_online_node_usage {
172 my ($self) = @_;
173
174 my $online_node_usage = {};
175
176 my $online_nodes = $self->{ns}->list_online_nodes();
177
178 foreach my $node (@$online_nodes) {
179 $online_node_usage->{$node} = 0;
180 }
181
182 foreach my $sid (keys %{$self->{ss}}) {
183 my $sd = $self->{ss}->{$sid};
184 my $state = $sd->{state};
185 if (defined($online_node_usage->{$sd->{node}})) {
186 if (($state eq 'started') || ($state eq 'request_stop') ||
187 ($state eq 'fence') || ($state eq 'freeze') || ($state eq 'error')) {
188 $online_node_usage->{$sd->{node}}++;
189 } elsif (($state eq 'migrate') || ($state eq 'relocate')) {
190 $online_node_usage->{$sd->{target}}++;
191 } elsif ($state eq 'stopped') {
192 # do nothing
193 } else {
194 die "should not be reached";
195 }
196 }
197 }
198
199 $self->{online_node_usage} = $online_node_usage;
200 }
201
202 my $change_service_state = sub {
203 my ($self, $sid, $new_state, %params) = @_;
204
205 my ($haenv, $ss) = ($self->{haenv}, $self->{ss});
206
207 my $sd = $ss->{$sid} || die "no such service '$sid";
208
209 my $old_state = $sd->{state};
210 my $old_node = $sd->{node};
211 my $old_failed_nodes = $sd->{failed_nodes};
212
213 die "no state change" if $old_state eq $new_state; # just to be sure
214
215 die "invalid CRM service state '$new_state'\n" if !$valid_service_states->{$new_state};
216
217 foreach my $k (keys %$sd) { delete $sd->{$k}; };
218
219 $sd->{state} = $new_state;
220 $sd->{node} = $old_node;
221 $sd->{failed_nodes} = $old_failed_nodes;
222
223 my $text_state = '';
224 foreach my $k (sort keys %params) {
225 my $v = $params{$k};
226 $text_state .= ", " if $text_state;
227 $text_state .= "$k = $v";
228 $sd->{$k} = $v;
229 }
230
231 $self->recompute_online_node_usage();
232
233 $sd->{uid} = compute_new_uuid($new_state);
234
235 $text_state = " ($text_state)" if $text_state;
236 $haenv->log('info', "service '$sid': state changed from '${old_state}'" .
237 " to '${new_state}'$text_state");
238 };
239
240 # clean up a possible bad state from a recovered service to allow its start
241 my $fence_recovery_cleanup = sub {
242 my ($self, $sid, $fenced_node) = @_;
243
244 my $haenv = $self->{haenv};
245
246 my (undef, $type, $id) = PVE::HA::Tools::parse_sid($sid);
247 my $plugin = PVE::HA::Resources->lookup($type);
248
249 # should not happen
250 die "unknown resource type '$type'" if !$plugin;
251
252 # locks may block recovery, cleanup those which are safe to remove after fencing
253 my $removable_locks = ['backup', 'mounted'];
254 if (my $removed_lock = $plugin->remove_locks($haenv, $id, $removable_locks, $fenced_node)) {
255 $haenv->log('warning', "removed leftover lock '$removed_lock' from recovered " .
256 "service '$sid' to allow its start.");
257 }
258 };
259
260 # after a node was fenced this recovers the service to a new node
261 my $recover_fenced_service = sub {
262 my ($self, $sid, $cd) = @_;
263
264 my ($haenv, $ss) = ($self->{haenv}, $self->{ss});
265
266 my $sd = $ss->{$sid};
267
268 if ($sd->{state} ne 'fence') { # should not happen
269 $haenv->log('err', "cannot recover service '$sid' from fencing," .
270 " wrong state '$sd->{state}'");
271 return;
272 }
273
274 my $fenced_node = $sd->{node}; # for logging purpose
275
276 $self->recompute_online_node_usage(); # we want the most current node state
277
278 my $recovery_node = select_service_node($self->{groups},
279 $self->{online_node_usage},
280 $cd, $sd->{node});
281
282 if ($recovery_node) {
283 $haenv->log('info', "recover service '$sid' from fenced node " .
284 "'$fenced_node' to node '$recovery_node'");
285
286 &$fence_recovery_cleanup($self, $sid, $fenced_node);
287
288 $haenv->steal_service($sid, $sd->{node}, $recovery_node);
289
290 # $sd *is normally read-only*, fencing is the exception
291 $cd->{node} = $sd->{node} = $recovery_node;
292 my $new_state = ($cd->{state} eq 'started') ? 'started' : 'request_stop';
293 &$change_service_state($self, $sid, $new_state, node => $recovery_node);
294 } else {
295 # no possible node found, cannot recover
296 $haenv->log('err', "recovering service '$sid' from fenced node " .
297 "'$fenced_node' failed, no recovery node found");
298 &$change_service_state($self, $sid, 'error');
299 }
300 };
301
302 # read LRM status for all nodes
303 sub read_lrm_status {
304 my ($self) = @_;
305
306 my $nodes = $self->{ns}->list_nodes();
307 my $haenv = $self->{haenv};
308
309 my $results = {};
310 my $modes = {};
311 foreach my $node (@$nodes) {
312 my $lrm_status = $haenv->read_lrm_status($node);
313 $modes->{$node} = $lrm_status->{mode} || 'active';
314 foreach my $uid (keys %{$lrm_status->{results}}) {
315 next if $results->{$uid}; # should not happen
316 $results->{$uid} = $lrm_status->{results}->{$uid};
317 }
318 }
319
320
321 return ($results, $modes);
322 }
323
324 # read new crm commands and save them into crm master status
325 sub update_crm_commands {
326 my ($self) = @_;
327
328 my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss});
329
330 my $cmdlist = $haenv->read_crm_commands();
331
332 foreach my $cmd (split(/\n/, $cmdlist)) {
333 chomp $cmd;
334
335 if ($cmd =~ m/^(migrate|relocate)\s+(\S+)\s+(\S+)$/) {
336 my ($task, $sid, $node) = ($1, $2, $3);
337 if (my $sd = $ss->{$sid}) {
338 if (!$ns->node_is_online($node)) {
339 $haenv->log('err', "crm command error - node not online: $cmd");
340 } else {
341 if ($node eq $sd->{node}) {
342 $haenv->log('info', "ignore crm command - service already on target node: $cmd");
343 } else {
344 $haenv->log('info', "got crm command: $cmd");
345 $ss->{$sid}->{cmd} = [ $task, $node];
346 }
347 }
348 } else {
349 $haenv->log('err', "crm command error - no such service: $cmd");
350 }
351
352 } else {
353 $haenv->log('err', "unable to parse crm command: $cmd");
354 }
355 }
356
357 }
358
359 sub manage {
360 my ($self) = @_;
361
362 my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss});
363
364 $ns->update($haenv->get_node_info());
365
366 if (!$ns->node_is_online($haenv->nodename())) {
367 $haenv->log('info', "master seems offline");
368 return;
369 }
370
371 my ($lrm_results, $lrm_modes) = $self->read_lrm_status();
372
373 my $sc = $haenv->read_service_config();
374
375 $self->{groups} = $haenv->read_group_config(); # update
376
377 # compute new service status
378
379 # add new service
380 foreach my $sid (sort keys %$sc) {
381 next if $ss->{$sid}; # already there
382 my $cd = $sc->{$sid};
383 $haenv->log('info', "adding new service '$sid' on node '$cd->{node}'");
384 # assume we are running to avoid relocate running service at add
385 my $state = ($cd->{state} eq 'started') ? 'started' : 'request_stop';
386 $ss->{$sid} = { state => $state, node => $cd->{node},
387 uid => compute_new_uuid('started') };
388 }
389
390 # remove stale service from manager state
391 foreach my $sid (keys %$ss) {
392 next if $sc->{$sid};
393 $haenv->log('info', "removing stale service '$sid' (no config)");
394 # remove all service related state information
395 delete $ss->{$sid};
396 }
397
398 $self->update_crm_commands();
399
400 for (;;) {
401 my $repeat = 0;
402
403 $self->recompute_online_node_usage();
404
405 foreach my $sid (sort keys %$ss) {
406 my $sd = $ss->{$sid};
407 my $cd = $sc->{$sid} || { state => 'disabled' };
408
409 my $lrm_res = $sd->{uid} ? $lrm_results->{$sd->{uid}} : undef;
410
411 my $last_state = $sd->{state};
412
413 if ($last_state eq 'stopped') {
414
415 $self->next_state_stopped($sid, $cd, $sd, $lrm_res);
416
417 } elsif ($last_state eq 'started') {
418
419 $self->next_state_started($sid, $cd, $sd, $lrm_res);
420
421 } elsif ($last_state eq 'migrate' || $last_state eq 'relocate') {
422
423 $self->next_state_migrate_relocate($sid, $cd, $sd, $lrm_res);
424
425 } elsif ($last_state eq 'fence') {
426
427 # do nothing here - wait until fenced
428
429 } elsif ($last_state eq 'request_stop') {
430
431 $self->next_state_request_stop($sid, $cd, $sd, $lrm_res);
432
433 } elsif ($last_state eq 'freeze') {
434
435 my $lrm_mode = $sd->{node} ? $lrm_modes->{$sd->{node}} : undef;
436 # unfreeze
437 my $state = ($cd->{state} eq 'started') ? 'started' : 'request_stop';
438 &$change_service_state($self, $sid, $state)
439 if $lrm_mode && $lrm_mode eq 'active';
440
441 } elsif ($last_state eq 'error') {
442
443 $self->next_state_error($sid, $cd, $sd, $lrm_res);
444
445 } else {
446
447 die "unknown service state '$last_state'";
448 }
449
450 my $lrm_mode = $sd->{node} ? $lrm_modes->{$sd->{node}} : undef;
451 if ($lrm_mode && $lrm_mode eq 'restart') {
452 if (($sd->{state} eq 'started' || $sd->{state} eq 'stopped' ||
453 $sd->{state} eq 'request_stop')) {
454 &$change_service_state($self, $sid, 'freeze');
455 }
456 }
457
458 $repeat = 1 if $sd->{state} ne $last_state;
459 }
460
461 # handle fencing
462 my $fenced_nodes = {};
463 foreach my $sid (sort keys %$ss) {
464 my $sd = $ss->{$sid};
465 next if $sd->{state} ne 'fence';
466
467 if (!defined($fenced_nodes->{$sd->{node}})) {
468 $fenced_nodes->{$sd->{node}} = $ns->fence_node($sd->{node}) || 0;
469 }
470
471 next if !$fenced_nodes->{$sd->{node}};
472
473 # node fence was successful - recover service
474 &$recover_fenced_service($self, $sid, $sc->{$sid});
475 }
476
477 last if !$repeat;
478 }
479
480 $self->flush_master_status();
481 }
482
483 # functions to compute next service states
484 # $cd: service configuration data (read only)
485 # $sd: service status data (read only)
486 #
487 # Note: use change_service_state() to alter state
488 #
489
490 sub next_state_request_stop {
491 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
492
493 my $haenv = $self->{haenv};
494 my $ns = $self->{ns};
495
496 # check result from LRM daemon
497 if ($lrm_res) {
498 my $exit_code = $lrm_res->{exit_code};
499 if ($exit_code == SUCCESS) {
500 &$change_service_state($self, $sid, 'stopped');
501 return;
502 } else {
503 $haenv->log('err', "service '$sid' stop failed (exit code $exit_code)");
504 &$change_service_state($self, $sid, 'error'); # fixme: what state?
505 return;
506 }
507 }
508
509 if ($ns->node_is_offline_delayed($sd->{node})) {
510 &$change_service_state($self, $sid, 'fence');
511 return;
512 }
513 }
514
515 sub next_state_migrate_relocate {
516 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
517
518 my $haenv = $self->{haenv};
519 my $ns = $self->{ns};
520
521 # check result from LRM daemon
522 if ($lrm_res) {
523 my $exit_code = $lrm_res->{exit_code};
524 my $req_state = $cd->{state} eq 'started' ? 'started' : 'request_stop';
525 if ($exit_code == SUCCESS) {
526 &$change_service_state($self, $sid, $req_state, node => $sd->{target});
527 return;
528 } elsif ($exit_code == EWRONG_NODE) {
529 $haenv->log('err', "service '$sid' - migration failed: service" .
530 " registered on wrong node!");
531 &$change_service_state($self, $sid, 'error');
532 } else {
533 $haenv->log('err', "service '$sid' - migration failed (exit code $exit_code)");
534 &$change_service_state($self, $sid, $req_state, node => $sd->{node});
535 return;
536 }
537 }
538
539 if ($ns->node_is_offline_delayed($sd->{node})) {
540 &$change_service_state($self, $sid, 'fence');
541 return;
542 }
543 }
544
545
546 sub next_state_stopped {
547 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
548
549 my $haenv = $self->{haenv};
550 my $ns = $self->{ns};
551
552 if ($sd->{node} ne $cd->{node}) {
553 # this can happen if we fence a node with active migrations
554 # hack: modify $sd (normally this should be considered read-only)
555 $haenv->log('info', "fixup service '$sid' location ($sd->{node} => $cd->{node})");
556 $sd->{node} = $cd->{node};
557 }
558
559 if ($sd->{cmd}) {
560 my ($cmd, $target) = @{$sd->{cmd}};
561 delete $sd->{cmd};
562
563 if ($cmd eq 'migrate' || $cmd eq 'relocate') {
564 if (!$ns->node_is_online($target)) {
565 $haenv->log('err', "ignore service '$sid' $cmd request - node '$target' not online");
566 } elsif ($sd->{node} eq $target) {
567 $haenv->log('info', "ignore service '$sid' $cmd request - service already on node '$target'");
568 } else {
569 &$change_service_state($self, $sid, $cmd, node => $sd->{node},
570 target => $target);
571 return;
572 }
573 } else {
574 $haenv->log('err', "unknown command '$cmd' for service '$sid'");
575 }
576 }
577
578 if ($cd->{state} eq 'disabled') {
579 # NOTE: do nothing here, the stop state is an exception as we do not
580 # process the LRM result here, thus the LRM always tries to stop the
581 # service (protection for the case no CRM is active)
582 return;
583 }
584
585 if ($ns->node_is_offline_delayed($sd->{node})) {
586 &$change_service_state($self, $sid, 'fence');
587 return;
588 }
589
590 if ($cd->{state} eq 'stopped') {
591 # almost the same as 'disabled' state but the service will also get recovered
592 return;
593 }
594
595 if ($cd->{state} eq 'started') {
596 # simply mark it started, if it's on the wrong node
597 # next_state_started will fix that for us
598 &$change_service_state($self, $sid, 'started', node => $sd->{node});
599 return;
600 }
601
602 $haenv->log('err', "service '$sid' - unknown state '$cd->{state}' in service configuration");
603 }
604
605 sub record_service_failed_on_node {
606 my ($self, $sid, $node) = @_;
607
608 if (!defined($self->{ss}->{$sid}->{failed_nodes})) {
609 $self->{ss}->{$sid}->{failed_nodes} = [];
610 }
611
612 push @{$self->{ss}->{$sid}->{failed_nodes}}, $node;
613 }
614
615 sub next_state_started {
616 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
617
618 my $haenv = $self->{haenv};
619 my $master_status = $self->{ms};
620 my $ns = $self->{ns};
621
622 if (!$ns->node_is_online($sd->{node})) {
623 if ($ns->node_is_offline_delayed($sd->{node})) {
624 &$change_service_state($self, $sid, 'fence');
625 }
626 return;
627 }
628
629 if ($cd->{state} eq 'disabled' || $cd->{state} eq 'stopped') {
630 &$change_service_state($self, $sid, 'request_stop');
631 return;
632 }
633
634 if ($cd->{state} eq 'started') {
635
636 if ($sd->{cmd}) {
637 my ($cmd, $target) = @{$sd->{cmd}};
638 delete $sd->{cmd};
639
640 if ($cmd eq 'migrate' || $cmd eq 'relocate') {
641 if (!$ns->node_is_online($target)) {
642 $haenv->log('err', "ignore service '$sid' $cmd request - node '$target' not online");
643 } elsif ($sd->{node} eq $target) {
644 $haenv->log('info', "ignore service '$sid' $cmd request - service already on node '$target'");
645 } else {
646 $haenv->log('info', "$cmd service '$sid' to node '$target'");
647 &$change_service_state($self, $sid, $cmd, node => $sd->{node}, target => $target);
648 }
649 } else {
650 $haenv->log('err', "unknown command '$cmd' for service '$sid'");
651 }
652 } else {
653
654 my $try_next = 0;
655
656 if ($lrm_res) {
657
658 my $ec = $lrm_res->{exit_code};
659 if ($ec == SUCCESS) {
660
661 if (defined($sd->{failed_nodes})) {
662 $haenv->log('info', "relocation policy successful for '$sid' on node '$sd->{node}'," .
663 " failed nodes: " . join(', ', @{$sd->{failed_nodes}}) );
664 }
665
666 delete $sd->{failed_nodes};
667
668 # store flag to indicate successful start - only valid while state == 'started'
669 $sd->{running} = 1;
670
671 } elsif ($ec == ERROR) {
672
673 delete $sd->{running};
674
675 # apply our relocate policy if we got ERROR from the LRM
676 $self->record_service_failed_on_node($sid, $sd->{node});
677
678 if (scalar(@{$sd->{failed_nodes}}) <= $cd->{max_relocate}) {
679
680 # tell select_service_node to relocate if possible
681 $try_next = 1;
682
683 $haenv->log('warning', "starting service $sid on node".
684 " '$sd->{node}' failed, relocating service.");
685
686 } else {
687
688 $haenv->log('err', "recovery policy for service $sid " .
689 "failed, entering error state. Failed nodes: ".
690 join(', ', @{$sd->{failed_nodes}}));
691 &$change_service_state($self, $sid, 'error');
692 return;
693
694 }
695 } else {
696 $self->record_service_failed_on_node($sid, $sd->{node});
697
698 $haenv->log('err', "service '$sid' got unrecoverable error" .
699 " (exit code $ec))");
700 # we have no save way out (yet) for other errors
701 &$change_service_state($self, $sid, 'error');
702 return;
703 }
704 }
705
706 my $node = select_service_node($self->{groups}, $self->{online_node_usage},
707 $cd, $sd->{node}, $try_next, $sd->{failed_nodes});
708
709 if ($node && ($sd->{node} ne $node)) {
710 if ($cd->{type} eq 'vm') {
711 $haenv->log('info', "migrate service '$sid' to node '$node' (running)");
712 &$change_service_state($self, $sid, 'migrate', node => $sd->{node}, target => $node);
713 } else {
714 $haenv->log('info', "relocate service '$sid' to node '$node'");
715 &$change_service_state($self, $sid, 'relocate', node => $sd->{node}, target => $node);
716 }
717 } else {
718 if ($try_next && !defined($node)) {
719 $haenv->log('warning', "Start Error Recovery: Tried all available " .
720 " nodes for service '$sid', retry start on current node. " .
721 "Tried nodes: " . join(', ', @{$sd->{failed_nodes}}));
722 }
723 # ensure service get started again if it went unexpected down
724 # but ensure also no LRM result gets lost
725 $sd->{uid} = compute_new_uuid($sd->{state}) if defined($lrm_res);
726 }
727 }
728
729 return;
730 }
731
732 $haenv->log('err', "service '$sid' - unknown state '$cd->{state}' in service configuration");
733 }
734
735 sub next_state_error {
736 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
737
738 my $ns = $self->{ns};
739 my $ms = $self->{ms};
740
741 if ($cd->{state} eq 'disabled') {
742 # clean up on error recovery
743 delete $sd->{failed_nodes};
744
745 &$change_service_state($self, $sid, 'stopped');
746 return;
747 }
748
749 }
750
751 1;