]> git.proxmox.com Git - pve-ha-manager.git/blob - src/PVE/HA/Manager.pm
sim/hardware: sort and split use statements
[pve-ha-manager.git] / src / PVE / HA / Manager.pm
1 package PVE::HA::Manager;
2
3 use strict;
4 use warnings;
5 use Digest::MD5 qw(md5_base64);
6
7 use PVE::Tools;
8 use PVE::HA::Tools ':exit_codes';
9 use PVE::HA::NodeStatus;
10
11 sub new {
12 my ($this, $haenv) = @_;
13
14 my $class = ref($this) || $this;
15
16 my $self = bless { haenv => $haenv }, $class;
17
18 my $old_ms = $haenv->read_manager_status();
19
20 # we only copy the state part of the manager which cannot be auto generated
21
22 $self->{ns} = PVE::HA::NodeStatus->new($haenv, $old_ms->{node_status} || {});
23
24 # fixme: use separate class PVE::HA::ServiceStatus
25 $self->{ss} = $old_ms->{service_status} || {};
26
27 $self->{ms} = { master_node => $haenv->nodename() };
28
29 return $self;
30 }
31
32 sub cleanup {
33 my ($self) = @_;
34
35 # todo: ?
36 }
37
38 sub flush_master_status {
39 my ($self) = @_;
40
41 my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss});
42
43 $ms->{node_status} = $ns->{status};
44 $ms->{service_status} = $ss;
45 $ms->{timestamp} = $haenv->get_time();
46
47 $haenv->write_manager_status($ms);
48 }
49
50 sub get_service_group {
51 my ($groups, $online_node_usage, $service_conf) = @_;
52
53 my $group = {};
54 # add all online nodes to default group to allow try_next when no group set
55 foreach my $node (keys %$online_node_usage) {
56 $group->{nodes}->{$node} = 1;
57 }
58
59 # overwrite default if service is bound to a specific group
60 if (my $group_id = $service_conf->{group}) {
61 $group = $groups->{ids}->{$group_id} if $groups->{ids}->{$group_id};
62 }
63
64 return $group;
65 }
66
67 # groups available nodes with their priority as group index
68 sub get_node_priority_groups {
69 my ($group, $online_node_usage) = @_;
70
71 my $pri_groups = {};
72 my $group_members = {};
73 foreach my $entry (keys %{$group->{nodes}}) {
74 my ($node, $pri) = ($entry, 0);
75 if ($entry =~ m/^(\S+):(\d+)$/) {
76 ($node, $pri) = ($1, $2);
77 }
78 next if !defined($online_node_usage->{$node}); # offline
79 $pri_groups->{$pri}->{$node} = 1;
80 $group_members->{$node} = $pri;
81 }
82
83 # add non-group members to unrestricted groups (priority -1)
84 if (!$group->{restricted}) {
85 my $pri = -1;
86 foreach my $node (keys %$online_node_usage) {
87 next if defined($group_members->{$node});
88 $pri_groups->{$pri}->{$node} = 1;
89 $group_members->{$node} = -1;
90 }
91 }
92
93 return ($pri_groups, $group_members);
94 }
95
96 sub select_service_node {
97 my ($groups, $online_node_usage, $service_conf, $current_node, $try_next, $tried_nodes, $maintenance_fallback) = @_;
98
99 my $group = get_service_group($groups, $online_node_usage, $service_conf);
100
101 my ($pri_groups, $group_members) = get_node_priority_groups($group, $online_node_usage);
102
103 my @pri_list = sort {$b <=> $a} keys %$pri_groups;
104 return undef if !scalar(@pri_list);
105
106 # stay on current node if possible (avoids random migrations)
107 if (!$try_next && $group->{nofailback} && defined($group_members->{$current_node})) {
108 return $current_node;
109 }
110
111 # select node from top priority node list
112
113 my $top_pri = $pri_list[0];
114
115 # try to avoid nodes where the service failed already if we want to relocate
116 if ($try_next) {
117 foreach my $node (@$tried_nodes) {
118 delete $pri_groups->{$top_pri}->{$node};
119 }
120 }
121
122 my @nodes = sort {
123 $online_node_usage->{$a} <=> $online_node_usage->{$b} || $a cmp $b
124 } keys %{$pri_groups->{$top_pri}};
125
126 my $found;
127 my $found_maintenace_fallback;
128 for (my $i = scalar(@nodes) - 1; $i >= 0; $i--) {
129 my $node = $nodes[$i];
130 if ($node eq $current_node) {
131 $found = $i;
132 }
133 if (defined($maintenance_fallback) && $node eq $maintenance_fallback) {
134 $found_maintenace_fallback = $i;
135 }
136 }
137
138 if (defined($found_maintenace_fallback)) {
139 return $nodes[$found_maintenace_fallback];
140 }
141
142 if ($try_next) {
143 if (defined($found) && ($found < (scalar(@nodes) - 1))) {
144 return $nodes[$found + 1];
145 } else {
146 return $nodes[0];
147 }
148 } elsif (defined($found)) {
149 return $nodes[$found];
150 } else {
151 return $nodes[0];
152 }
153 }
154
155 my $uid_counter = 0;
156
157 sub compute_new_uuid {
158 my ($state) = @_;
159
160 $uid_counter++;
161 return md5_base64($state . $$ . time() . $uid_counter);
162 }
163
164 my $valid_service_states = {
165 stopped => 1,
166 request_stop => 1,
167 started => 1,
168 fence => 1,
169 recovery => 1,
170 migrate => 1,
171 relocate => 1,
172 freeze => 1,
173 error => 1,
174 };
175
176 sub recompute_online_node_usage {
177 my ($self) = @_;
178
179 my $online_node_usage = {};
180
181 my $online_nodes = $self->{ns}->list_online_nodes();
182
183 foreach my $node (@$online_nodes) {
184 $online_node_usage->{$node} = 0;
185 }
186
187 foreach my $sid (keys %{$self->{ss}}) {
188 my $sd = $self->{ss}->{$sid};
189 my $state = $sd->{state};
190 if (defined($online_node_usage->{$sd->{node}})) {
191 if (
192 $state eq 'started' || $state eq 'request_stop' || $state eq 'fence' ||
193 $state eq 'freeze' || $state eq 'error' || $state eq 'recovery'
194 ) {
195 $online_node_usage->{$sd->{node}}++;
196 } elsif (($state eq 'migrate') || ($state eq 'relocate')) {
197 # count it for both, source and target as load is put on both
198 $online_node_usage->{$sd->{node}}++;
199 $online_node_usage->{$sd->{target}}++;
200 } elsif ($state eq 'stopped') {
201 # do nothing
202 } else {
203 die "should not be reached (sid = '$sid', state = '$state')";
204 }
205 }
206 }
207
208 $self->{online_node_usage} = $online_node_usage;
209 }
210
211 my $change_service_state = sub {
212 my ($self, $sid, $new_state, %params) = @_;
213
214 my ($haenv, $ss) = ($self->{haenv}, $self->{ss});
215
216 my $sd = $ss->{$sid} || die "no such service '$sid";
217
218 my $old_state = $sd->{state};
219 my $old_node = $sd->{node};
220 my $old_failed_nodes = $sd->{failed_nodes};
221 my $old_maintenance_node = $sd->{maintenance_node};
222
223 die "no state change" if $old_state eq $new_state; # just to be sure
224
225 die "invalid CRM service state '$new_state'\n" if !$valid_service_states->{$new_state};
226
227 foreach my $k (keys %$sd) { delete $sd->{$k}; };
228
229 $sd->{state} = $new_state;
230 $sd->{node} = $old_node;
231 $sd->{failed_nodes} = $old_failed_nodes if defined($old_failed_nodes);
232 $sd->{maintenance_node} = $old_maintenance_node if defined($old_maintenance_node);
233
234 my $text_state = '';
235 foreach my $k (sort keys %params) {
236 my $v = $params{$k};
237 $text_state .= ", " if $text_state;
238 $text_state .= "$k = $v";
239 $sd->{$k} = $v;
240 }
241
242 $self->recompute_online_node_usage();
243
244 $sd->{uid} = compute_new_uuid($new_state);
245
246 $text_state = " ($text_state)" if $text_state;
247 $haenv->log('info', "service '$sid': state changed from '${old_state}'" .
248 " to '${new_state}'$text_state");
249 };
250
251 # clean up a possible bad state from a recovered service to allow its start
252 my $fence_recovery_cleanup = sub {
253 my ($self, $sid, $fenced_node) = @_;
254
255 my $haenv = $self->{haenv};
256
257 my (undef, $type, $id) = $haenv->parse_sid($sid);
258 my $plugin = PVE::HA::Resources->lookup($type);
259
260 # should not happen
261 die "unknown resource type '$type'" if !$plugin;
262
263 # locks may block recovery, cleanup those which are safe to remove after fencing,
264 # i.e., after the original node was reset and thus all it's state
265 my $removable_locks = [
266 'backup',
267 'mounted',
268 'migrate',
269 'clone',
270 'rollback',
271 'snapshot',
272 'snapshot-delete',
273 'suspending',
274 'suspended',
275 ];
276 if (my $removed_lock = $plugin->remove_locks($haenv, $id, $removable_locks, $fenced_node)) {
277 $haenv->log('warning', "removed leftover lock '$removed_lock' from recovered " .
278 "service '$sid' to allow its start.");
279 }
280 };
281
282 # read LRM status for all nodes
283 sub read_lrm_status {
284 my ($self) = @_;
285
286 my $nodes = $self->{ns}->list_nodes();
287 my $haenv = $self->{haenv};
288
289 my $results = {};
290 my $modes = {};
291 foreach my $node (@$nodes) {
292 my $lrm_status = $haenv->read_lrm_status($node);
293 $modes->{$node} = $lrm_status->{mode} || 'active';
294 foreach my $uid (keys %{$lrm_status->{results}}) {
295 next if $results->{$uid}; # should not happen
296 $results->{$uid} = $lrm_status->{results}->{$uid};
297 }
298 }
299
300 return ($results, $modes);
301 }
302
303 # read new crm commands and save them into crm master status
304 sub update_crm_commands {
305 my ($self) = @_;
306
307 my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss});
308
309 my $cmdlist = $haenv->read_crm_commands();
310
311 foreach my $cmd (split(/\n/, $cmdlist)) {
312 chomp $cmd;
313
314 if ($cmd =~ m/^(migrate|relocate)\s+(\S+)\s+(\S+)$/) {
315 my ($task, $sid, $node) = ($1, $2, $3);
316 if (my $sd = $ss->{$sid}) {
317 if (!$ns->node_is_online($node)) {
318 $haenv->log('err', "crm command error - node not online: $cmd");
319 } else {
320 if ($node eq $sd->{node}) {
321 $haenv->log('info', "ignore crm command - service already on target node: $cmd");
322 } else {
323 $haenv->log('info', "got crm command: $cmd");
324 $ss->{$sid}->{cmd} = [ $task, $node ];
325 }
326 }
327 } else {
328 $haenv->log('err', "crm command error - no such service: $cmd");
329 }
330
331 } elsif ($cmd =~ m/^stop\s+(\S+)\s+(\S+)$/) {
332 my ($sid, $timeout) = ($1, $2);
333 if (my $sd = $ss->{$sid}) {
334 $haenv->log('info', "got crm command: $cmd");
335 $ss->{$sid}->{cmd} = [ 'stop', $timeout ];
336 } else {
337 $haenv->log('err', "crm command error - no such service: $cmd");
338 }
339 } else {
340 $haenv->log('err', "unable to parse crm command: $cmd");
341 }
342 }
343
344 }
345
346 sub manage {
347 my ($self) = @_;
348
349 my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss});
350
351 my ($node_info) = $haenv->get_node_info();
352 my ($lrm_results, $lrm_modes) = $self->read_lrm_status();
353
354 $ns->update($node_info, $lrm_modes);
355
356 if (!$ns->node_is_operational($haenv->nodename())) {
357 $haenv->log('info', "master seems offline");
358 return;
359 }
360
361 my $sc = $haenv->read_service_config();
362
363 $self->{groups} = $haenv->read_group_config(); # update
364
365 # compute new service status
366
367 # add new service
368 foreach my $sid (sort keys %$sc) {
369 next if $ss->{$sid}; # already there
370 my $cd = $sc->{$sid};
371 next if $cd->{state} eq 'ignored';
372
373 $haenv->log('info', "adding new service '$sid' on node '$cd->{node}'");
374 # assume we are running to avoid relocate running service at add
375 my $state = ($cd->{state} eq 'started') ? 'started' : 'request_stop';
376 $ss->{$sid} = { state => $state, node => $cd->{node},
377 uid => compute_new_uuid('started') };
378 }
379
380 # remove stale or ignored services from manager state
381 foreach my $sid (keys %$ss) {
382 next if $sc->{$sid} && $sc->{$sid}->{state} ne 'ignored';
383
384 my $reason = defined($sc->{$sid}) ? 'ignored state requested' : 'no config';
385 $haenv->log('info', "removing stale service '$sid' ($reason)");
386
387 # remove all service related state information
388 delete $ss->{$sid};
389 }
390
391 $self->update_crm_commands();
392
393 for (;;) {
394 my $repeat = 0;
395
396 $self->recompute_online_node_usage();
397
398 foreach my $sid (sort keys %$ss) {
399 my $sd = $ss->{$sid};
400 my $cd = $sc->{$sid} || { state => 'disabled' };
401
402 my $lrm_res = $sd->{uid} ? $lrm_results->{$sd->{uid}} : undef;
403
404 my $last_state = $sd->{state};
405
406 if ($last_state eq 'stopped') {
407
408 $self->next_state_stopped($sid, $cd, $sd, $lrm_res);
409
410 } elsif ($last_state eq 'started') {
411
412 $self->next_state_started($sid, $cd, $sd, $lrm_res);
413
414 } elsif ($last_state eq 'migrate' || $last_state eq 'relocate') {
415
416 $self->next_state_migrate_relocate($sid, $cd, $sd, $lrm_res);
417
418 } elsif ($last_state eq 'fence') {
419
420 # do nothing here - wait until fenced
421
422 } elsif ($last_state eq 'recovery') {
423
424 $self->next_state_recovery($sid, $cd, $sd, $lrm_res);
425
426 } elsif ($last_state eq 'request_stop') {
427
428 $self->next_state_request_stop($sid, $cd, $sd, $lrm_res);
429
430 } elsif ($last_state eq 'freeze') {
431
432 my $lrm_mode = $sd->{node} ? $lrm_modes->{$sd->{node}} : undef;
433 # unfreeze
434 my $state = ($cd->{state} eq 'started') ? 'started' : 'request_stop';
435 &$change_service_state($self, $sid, $state)
436 if $lrm_mode && $lrm_mode eq 'active';
437
438 } elsif ($last_state eq 'error') {
439
440 $self->next_state_error($sid, $cd, $sd, $lrm_res);
441
442 } else {
443
444 die "unknown service state '$last_state'";
445 }
446
447 my $lrm_mode = $sd->{node} ? $lrm_modes->{$sd->{node}} : undef;
448 if ($lrm_mode && $lrm_mode eq 'restart') {
449 if (($sd->{state} eq 'started' || $sd->{state} eq 'stopped' ||
450 $sd->{state} eq 'request_stop')) {
451 &$change_service_state($self, $sid, 'freeze');
452 }
453 }
454
455 $repeat = 1 if $sd->{state} ne $last_state;
456 }
457
458 # handle fencing
459 my $fenced_nodes = {};
460 foreach my $sid (sort keys %$ss) {
461 my $sd = $ss->{$sid};
462 next if $sd->{state} ne 'fence';
463
464 my $service_node = $sd->{node};
465
466 if (!defined($fenced_nodes->{$service_node})) {
467 $fenced_nodes->{$service_node} = $ns->fence_node($sd->{node}) || 0;
468 }
469
470 next if !$fenced_nodes->{$service_node};
471
472 # node fence was successful - recover service
473 $change_service_state->($self, $sid, 'recovery');
474 $repeat = 1; # for faster recovery execution
475 }
476
477 last if !$repeat;
478 }
479
480 $self->flush_master_status();
481 }
482
483 # functions to compute next service states
484 # $cd: service configuration data (read only)
485 # $sd: service status data (read only)
486 #
487 # Note: use change_service_state() to alter state
488 #
489
490 sub next_state_request_stop {
491 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
492
493 my $haenv = $self->{haenv};
494 my $ns = $self->{ns};
495
496 # check result from LRM daemon
497 if ($lrm_res) {
498 my $exit_code = $lrm_res->{exit_code};
499 if ($exit_code == SUCCESS) {
500 &$change_service_state($self, $sid, 'stopped');
501 return;
502 } else {
503 $haenv->log('err', "service '$sid' stop failed (exit code $exit_code)");
504 &$change_service_state($self, $sid, 'error'); # fixme: what state?
505 return;
506 }
507 }
508
509 if ($ns->node_is_offline_delayed($sd->{node})) {
510 &$change_service_state($self, $sid, 'fence');
511 return;
512 }
513 }
514
515 sub next_state_migrate_relocate {
516 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
517
518 my $haenv = $self->{haenv};
519 my $ns = $self->{ns};
520
521 # check result from LRM daemon
522 if ($lrm_res) {
523 my $exit_code = $lrm_res->{exit_code};
524 my $req_state = $cd->{state} eq 'started' ? 'started' : 'request_stop';
525 if ($exit_code == SUCCESS) {
526 &$change_service_state($self, $sid, $req_state, node => $sd->{target});
527 return;
528 } elsif ($exit_code == EWRONG_NODE) {
529 $haenv->log('err', "service '$sid' - migration failed: service" .
530 " registered on wrong node!");
531 &$change_service_state($self, $sid, 'error');
532 } else {
533 $haenv->log('err', "service '$sid' - migration failed (exit code $exit_code)");
534 &$change_service_state($self, $sid, $req_state, node => $sd->{node});
535 return;
536 }
537 }
538
539 if ($ns->node_is_offline_delayed($sd->{node})) {
540 &$change_service_state($self, $sid, 'fence');
541 return;
542 }
543 }
544
545 sub next_state_stopped {
546 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
547
548 my $haenv = $self->{haenv};
549 my $ns = $self->{ns};
550
551 if ($sd->{node} ne $cd->{node}) {
552 # this can happen if we fence a node with active migrations
553 # hack: modify $sd (normally this should be considered read-only)
554 $haenv->log('info', "fixup service '$sid' location ($sd->{node} => $cd->{node})");
555 $sd->{node} = $cd->{node};
556 }
557
558 if ($sd->{cmd}) {
559 my $cmd = shift @{$sd->{cmd}};
560
561 if ($cmd eq 'migrate' || $cmd eq 'relocate') {
562 my $target = shift @{$sd->{cmd}};
563 if (!$ns->node_is_online($target)) {
564 $haenv->log('err', "ignore service '$sid' $cmd request - node '$target' not online");
565 } elsif ($sd->{node} eq $target) {
566 $haenv->log('info', "ignore service '$sid' $cmd request - service already on node '$target'");
567 } else {
568 &$change_service_state($self, $sid, $cmd, node => $sd->{node},
569 target => $target);
570 return;
571 }
572 } elsif ($cmd eq 'stop') {
573 $haenv->log('info', "ignore service '$sid' $cmd request - service already stopped");
574 } else {
575 $haenv->log('err', "unknown command '$cmd' for service '$sid'");
576 }
577 delete $sd->{cmd};
578 }
579
580 if ($cd->{state} eq 'disabled') {
581 # NOTE: do nothing here, the stop state is an exception as we do not
582 # process the LRM result here, thus the LRM always tries to stop the
583 # service (protection for the case no CRM is active)
584 return;
585 }
586
587 if ($ns->node_is_offline_delayed($sd->{node}) && $ns->get_node_state($sd->{node}) ne 'maintenance') {
588 &$change_service_state($self, $sid, 'fence');
589 return;
590 }
591
592 if ($cd->{state} eq 'stopped') {
593 # almost the same as 'disabled' state but the service will also get recovered
594 return;
595 }
596
597 if ($cd->{state} eq 'started') {
598 # simply mark it started, if it's on the wrong node
599 # next_state_started will fix that for us
600 &$change_service_state($self, $sid, 'started', node => $sd->{node});
601 return;
602 }
603
604 $haenv->log('err', "service '$sid' - unknown state '$cd->{state}' in service configuration");
605 }
606
607 sub record_service_failed_on_node {
608 my ($self, $sid, $node) = @_;
609
610 if (!defined($self->{ss}->{$sid}->{failed_nodes})) {
611 $self->{ss}->{$sid}->{failed_nodes} = [];
612 }
613
614 push @{$self->{ss}->{$sid}->{failed_nodes}}, $node;
615 }
616
617 sub next_state_started {
618 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
619
620 my $haenv = $self->{haenv};
621 my $master_status = $self->{ms};
622 my $ns = $self->{ns};
623
624 if (!$ns->node_is_online($sd->{node})) {
625 if ($ns->node_is_offline_delayed($sd->{node})) {
626 &$change_service_state($self, $sid, 'fence');
627 }
628 if ($ns->get_node_state($sd->{node}) ne 'maintenance') {
629 return;
630 } else {
631 # save current node as fallback for when it comes out of
632 # maintenance
633 $sd->{maintenance_node} = $sd->{node};
634 }
635 }
636
637 if ($cd->{state} eq 'disabled' || $cd->{state} eq 'stopped') {
638 &$change_service_state($self, $sid, 'request_stop');
639 return;
640 }
641
642 if ($cd->{state} eq 'started') {
643
644 if ($sd->{cmd}) {
645 my $cmd = shift @{$sd->{cmd}};
646
647 if ($cmd eq 'migrate' || $cmd eq 'relocate') {
648 my $target = shift @{$sd->{cmd}};
649 if (!$ns->node_is_online($target)) {
650 $haenv->log('err', "ignore service '$sid' $cmd request - node '$target' not online");
651 } elsif ($sd->{node} eq $target) {
652 $haenv->log('info', "ignore service '$sid' $cmd request - service already on node '$target'");
653 } else {
654 $haenv->log('info', "$cmd service '$sid' to node '$target'");
655 &$change_service_state($self, $sid, $cmd, node => $sd->{node}, target => $target);
656 }
657 } elsif ($cmd eq 'stop') {
658 my $timeout = shift @{$sd->{cmd}};
659 if ($timeout == 0) {
660 $haenv->log('info', "request immediate service hard-stop for service '$sid'");
661 } else {
662 $haenv->log('info', "request graceful stop with timeout '$timeout' for service '$sid'");
663 }
664 &$change_service_state($self, $sid, 'request_stop', timeout => $timeout);
665 $haenv->update_service_config($sid, {'state' => 'stopped'});
666 } else {
667 $haenv->log('err', "unknown command '$cmd' for service '$sid'");
668 }
669
670 delete $sd->{cmd};
671
672 } else {
673
674 my $try_next = 0;
675
676 if ($lrm_res) {
677
678 my $ec = $lrm_res->{exit_code};
679 if ($ec == SUCCESS) {
680
681 if (defined($sd->{failed_nodes})) {
682 $haenv->log('info', "relocation policy successful for '$sid' on node '$sd->{node}'," .
683 " failed nodes: " . join(', ', @{$sd->{failed_nodes}}) );
684 }
685
686 delete $sd->{failed_nodes};
687
688 # store flag to indicate successful start - only valid while state == 'started'
689 $sd->{running} = 1;
690
691 } elsif ($ec == ERROR) {
692
693 delete $sd->{running};
694
695 # apply our relocate policy if we got ERROR from the LRM
696 $self->record_service_failed_on_node($sid, $sd->{node});
697
698 if (scalar(@{$sd->{failed_nodes}}) <= $cd->{max_relocate}) {
699
700 # tell select_service_node to relocate if possible
701 $try_next = 1;
702
703 $haenv->log('warning', "starting service $sid on node".
704 " '$sd->{node}' failed, relocating service.");
705
706 } else {
707
708 $haenv->log('err', "recovery policy for service $sid " .
709 "failed, entering error state. Failed nodes: ".
710 join(', ', @{$sd->{failed_nodes}}));
711 &$change_service_state($self, $sid, 'error');
712 return;
713
714 }
715 } else {
716 $self->record_service_failed_on_node($sid, $sd->{node});
717
718 $haenv->log('err', "service '$sid' got unrecoverable error" .
719 " (exit code $ec))");
720 # we have no save way out (yet) for other errors
721 &$change_service_state($self, $sid, 'error');
722 return;
723 }
724 }
725
726 my $node = select_service_node(
727 $self->{groups},
728 $self->{online_node_usage},
729 $cd,
730 $sd->{node},
731 $try_next,
732 $sd->{failed_nodes},
733 $sd->{maintenance_node},
734 );
735
736 if ($node && ($sd->{node} ne $node)) {
737 $self->{online_node_usage}->{$node}++;
738
739 if (defined(my $fallback = $sd->{maintenance_node})) {
740 if ($node eq $fallback) {
741 $haenv->log('info', "moving service '$sid' back to '$fallback', node came back from maintenance.");
742 delete $sd->{maintenance_node};
743 } elsif ($sd->{node} ne $fallback) {
744 $haenv->log('info', "dropping maintenance fallback node '$fallback' for '$sid'");
745 delete $sd->{maintenance_node};
746 }
747 }
748
749 if ($cd->{type} eq 'vm') {
750 $haenv->log('info', "migrate service '$sid' to node '$node' (running)");
751 &$change_service_state($self, $sid, 'migrate', node => $sd->{node}, target => $node);
752 } else {
753 $haenv->log('info', "relocate service '$sid' to node '$node'");
754 &$change_service_state($self, $sid, 'relocate', node => $sd->{node}, target => $node);
755 }
756 } else {
757 if ($try_next && !defined($node)) {
758 $haenv->log('warning', "Start Error Recovery: Tried all available " .
759 " nodes for service '$sid', retry start on current node. " .
760 "Tried nodes: " . join(', ', @{$sd->{failed_nodes}}));
761 }
762 # ensure service get started again if it went unexpected down
763 # but ensure also no LRM result gets lost
764 $sd->{uid} = compute_new_uuid($sd->{state}) if defined($lrm_res);
765 }
766 }
767
768 return;
769 }
770
771 $haenv->log('err', "service '$sid' - unknown state '$cd->{state}' in service configuration");
772 }
773
774 sub next_state_error {
775 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
776
777 my $ns = $self->{ns};
778 my $ms = $self->{ms};
779
780 if ($cd->{state} eq 'disabled') {
781 # clean up on error recovery
782 delete $sd->{failed_nodes};
783
784 &$change_service_state($self, $sid, 'stopped');
785 return;
786 }
787
788 }
789
790 # after a node was fenced this recovers the service to a new node
791 sub next_state_recovery {
792 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
793
794 my ($haenv, $ss) = ($self->{haenv}, $self->{ss});
795 my $ns = $self->{ns};
796 my $ms = $self->{ms};
797
798 if ($sd->{state} ne 'recovery') { # should not happen
799 $haenv->log('err', "cannot recover service '$sid' from fencing, wrong state '$sd->{state}'");
800 return;
801 }
802
803 my $fenced_node = $sd->{node}; # for logging purpose
804
805 $self->recompute_online_node_usage(); # we want the most current node state
806
807 my $recovery_node = select_service_node(
808 $self->{groups},
809 $self->{online_node_usage},
810 $cd,
811 $sd->{node},
812 );
813
814 if ($recovery_node) {
815 my $msg = "recover service '$sid' from fenced node '$fenced_node' to node '$recovery_node'";
816 if ($recovery_node eq $fenced_node) {
817 # can happen if restriced groups and the node came up again OK
818 $msg = "recover service '$sid' to previous failed and fenced node '$fenced_node' again";
819 }
820 $haenv->log('info', "$msg");
821
822 $fence_recovery_cleanup->($self, $sid, $fenced_node);
823
824 $haenv->steal_service($sid, $sd->{node}, $recovery_node);
825 $self->{online_node_usage}->{$recovery_node}++;
826
827 # NOTE: $sd *is normally read-only*, fencing is the exception
828 $cd->{node} = $sd->{node} = $recovery_node;
829 my $new_state = ($cd->{state} eq 'started') ? 'started' : 'request_stop';
830 $change_service_state->($self, $sid, $new_state, node => $recovery_node);
831 } else {
832 # no possible node found, cannot recover - but retry later, as we always try to make it available
833 $haenv->log('err', "recovering service '$sid' from fenced node '$fenced_node' failed, no recovery node found");
834
835 if ($cd->{state} eq 'disabled') {
836 # allow getting a service out of recovery manually if an admin disables it.
837 delete $sd->{failed_nodes}; # clean up on recovery to stopped
838 $change_service_state->($self, $sid, 'stopped'); # must NOT go through request_stop
839 return;
840 }
841 }
842 }
843
844 1;