]> git.proxmox.com Git - pve-ha-manager.git/blob - src/PVE/HA/Manager.pm
aefb482ea0b030954c277d63de8a23afa3617c34
[pve-ha-manager.git] / src / PVE / HA / Manager.pm
1 package PVE::HA::Manager;
2
3 use strict;
4 use warnings;
5 use Digest::MD5 qw(md5_base64);
6
7 use PVE::Tools;
8 use PVE::HA::Tools ':exit_codes';
9 use PVE::HA::NodeStatus;
10
11 sub new {
12 my ($this, $haenv) = @_;
13
14 my $class = ref($this) || $this;
15
16 my $self = bless { haenv => $haenv }, $class;
17
18 my $old_ms = $haenv->read_manager_status();
19
20 # we only copy the state part of the manager which cannot be auto generated
21
22 $self->{ns} = PVE::HA::NodeStatus->new($haenv, $old_ms->{node_status} || {});
23
24 # fixme: use separate class PVE::HA::ServiceStatus
25 $self->{ss} = $old_ms->{service_status} || {};
26
27 $self->{ms} = { master_node => $haenv->nodename() };
28
29 return $self;
30 }
31
32 sub cleanup {
33 my ($self) = @_;
34
35 # todo: ?
36 }
37
38 sub flush_master_status {
39 my ($self) = @_;
40
41 my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss});
42
43 $ms->{node_status} = $ns->{status};
44 $ms->{service_status} = $ss;
45 $ms->{timestamp} = $haenv->get_time();
46
47 $haenv->write_manager_status($ms);
48 }
49
50 sub get_service_group {
51 my ($groups, $online_node_usage, $service_conf) = @_;
52
53 my $group = {};
54 # add all online nodes to default group to allow try_next when no group set
55 foreach my $node (keys %$online_node_usage) {
56 $group->{nodes}->{$node} = 1;
57 }
58
59 # overwrite default if service is bound to a specific group
60 if (my $group_id = $service_conf->{group}) {
61 $group = $groups->{ids}->{$group_id} if $groups->{ids}->{$group_id};
62 }
63
64 return $group;
65 }
66
67 # groups available nodes with their priority as group index
68 sub get_node_priority_groups {
69 my ($group, $online_node_usage) = @_;
70
71 my $pri_groups = {};
72 my $group_members = {};
73 foreach my $entry (keys %{$group->{nodes}}) {
74 my ($node, $pri) = ($entry, 0);
75 if ($entry =~ m/^(\S+):(\d+)$/) {
76 ($node, $pri) = ($1, $2);
77 }
78 next if !defined($online_node_usage->{$node}); # offline
79 $pri_groups->{$pri}->{$node} = 1;
80 $group_members->{$node} = $pri;
81 }
82
83 # add non-group members to unrestricted groups (priority -1)
84 if (!$group->{restricted}) {
85 my $pri = -1;
86 foreach my $node (keys %$online_node_usage) {
87 next if defined($group_members->{$node});
88 $pri_groups->{$pri}->{$node} = 1;
89 $group_members->{$node} = -1;
90 }
91 }
92
93 return ($pri_groups, $group_members);
94 }
95
96 sub select_service_node {
97 my ($groups, $online_node_usage, $service_conf, $current_node, $try_next, $tried_nodes, $maintenance_fallback) = @_;
98
99 my $group = get_service_group($groups, $online_node_usage, $service_conf);
100
101 my ($pri_groups, $group_members) = get_node_priority_groups($group, $online_node_usage);
102
103 my @pri_list = sort {$b <=> $a} keys %$pri_groups;
104 return undef if !scalar(@pri_list);
105
106 # stay on current node if possible (avoids random migrations)
107 if (!$try_next && $group->{nofailback} && defined($group_members->{$current_node})) {
108 return $current_node;
109 }
110
111 # select node from top priority node list
112
113 my $top_pri = $pri_list[0];
114
115 # try to avoid nodes where the service failed already if we want to relocate
116 if ($try_next) {
117 foreach my $node (@$tried_nodes) {
118 delete $pri_groups->{$top_pri}->{$node};
119 }
120 }
121
122 my @nodes = sort {
123 $online_node_usage->{$a} <=> $online_node_usage->{$b} || $a cmp $b
124 } keys %{$pri_groups->{$top_pri}};
125
126 my $found;
127 my $found_maintenance_fallback;
128 for (my $i = scalar(@nodes) - 1; $i >= 0; $i--) {
129 my $node = $nodes[$i];
130 if ($node eq $current_node) {
131 $found = $i;
132 }
133 if (defined($maintenance_fallback) && $node eq $maintenance_fallback) {
134 $found_maintenance_fallback = $i;
135 }
136 }
137
138 if (defined($found_maintenance_fallback)) {
139 return $nodes[$found_maintenance_fallback];
140 }
141
142 if ($try_next) {
143 if (defined($found) && ($found < (scalar(@nodes) - 1))) {
144 return $nodes[$found + 1];
145 } else {
146 return $nodes[0];
147 }
148 } elsif (defined($found)) {
149 return $nodes[$found];
150 } else {
151 return $nodes[0];
152 }
153 }
154
155 my $uid_counter = 0;
156
157 sub compute_new_uuid {
158 my ($state) = @_;
159
160 $uid_counter++;
161 return md5_base64($state . $$ . time() . $uid_counter);
162 }
163
164 my $valid_service_states = {
165 stopped => 1,
166 request_stop => 1,
167 started => 1,
168 fence => 1,
169 recovery => 1,
170 migrate => 1,
171 relocate => 1,
172 freeze => 1,
173 error => 1,
174 };
175
176 sub recompute_online_node_usage {
177 my ($self) = @_;
178
179 my $online_node_usage = {};
180
181 my $online_nodes = $self->{ns}->list_online_nodes();
182
183 foreach my $node (@$online_nodes) {
184 $online_node_usage->{$node} = 0;
185 }
186
187 foreach my $sid (keys %{$self->{ss}}) {
188 my $sd = $self->{ss}->{$sid};
189 my $state = $sd->{state};
190 if (defined($online_node_usage->{$sd->{node}})) {
191 if (
192 $state eq 'started' || $state eq 'request_stop' || $state eq 'fence' ||
193 $state eq 'freeze' || $state eq 'error' || $state eq 'recovery'
194 ) {
195 $online_node_usage->{$sd->{node}}++;
196 } elsif (($state eq 'migrate') || ($state eq 'relocate')) {
197 # count it for both, source and target as load is put on both
198 $online_node_usage->{$sd->{node}}++;
199 $online_node_usage->{$sd->{target}}++;
200 } elsif ($state eq 'stopped') {
201 # do nothing
202 } else {
203 die "should not be reached (sid = '$sid', state = '$state')";
204 }
205 } elsif (defined(my $target = $sd->{target})) {
206 if ($state eq 'migrate' || $state eq 'relocate') {
207 # to correctly track maintenance modi and also consider the target as used for the
208 # case a node dies, as we cannot really know if the to-be-aborted incoming migration
209 # has already cleaned up all used resources
210 $online_node_usage->{$target}++;
211 }
212 }
213 }
214
215 $self->{online_node_usage} = $online_node_usage;
216 }
217
218 my $change_service_state = sub {
219 my ($self, $sid, $new_state, %params) = @_;
220
221 my ($haenv, $ss) = ($self->{haenv}, $self->{ss});
222
223 my $sd = $ss->{$sid} || die "no such service '$sid";
224
225 my $old_state = $sd->{state};
226 my $old_node = $sd->{node};
227 my $old_failed_nodes = $sd->{failed_nodes};
228 my $old_maintenance_node = $sd->{maintenance_node};
229
230 die "no state change" if $old_state eq $new_state; # just to be sure
231
232 die "invalid CRM service state '$new_state'\n" if !$valid_service_states->{$new_state};
233
234 foreach my $k (keys %$sd) { delete $sd->{$k}; };
235
236 $sd->{state} = $new_state;
237 $sd->{node} = $old_node;
238 $sd->{failed_nodes} = $old_failed_nodes if defined($old_failed_nodes);
239 $sd->{maintenance_node} = $old_maintenance_node if defined($old_maintenance_node);
240
241 my $text_state = '';
242 foreach my $k (sort keys %params) {
243 my $v = $params{$k};
244 $text_state .= ", " if $text_state;
245 $text_state .= "$k = $v";
246 $sd->{$k} = $v;
247 }
248
249 $self->recompute_online_node_usage();
250
251 $sd->{uid} = compute_new_uuid($new_state);
252
253 $text_state = " ($text_state)" if $text_state;
254 $haenv->log('info', "service '$sid': state changed from '${old_state}'" .
255 " to '${new_state}'$text_state");
256 };
257
258 # clean up a possible bad state from a recovered service to allow its start
259 my $fence_recovery_cleanup = sub {
260 my ($self, $sid, $fenced_node) = @_;
261
262 my $haenv = $self->{haenv};
263
264 my (undef, $type, $id) = $haenv->parse_sid($sid);
265 my $plugin = PVE::HA::Resources->lookup($type);
266
267 # should not happen
268 die "unknown resource type '$type'" if !$plugin;
269
270 # locks may block recovery, cleanup those which are safe to remove after fencing,
271 # i.e., after the original node was reset and thus all it's state
272 my $removable_locks = [
273 'backup',
274 'mounted',
275 'migrate',
276 'clone',
277 'rollback',
278 'snapshot',
279 'snapshot-delete',
280 'suspending',
281 'suspended',
282 ];
283 if (my $removed_lock = $plugin->remove_locks($haenv, $id, $removable_locks, $fenced_node)) {
284 $haenv->log('warning', "removed leftover lock '$removed_lock' from recovered " .
285 "service '$sid' to allow its start.");
286 }
287 };
288
289 # read LRM status for all nodes
290 sub read_lrm_status {
291 my ($self) = @_;
292
293 my $nodes = $self->{ns}->list_nodes();
294 my $haenv = $self->{haenv};
295
296 my $results = {};
297 my $modes = {};
298 foreach my $node (@$nodes) {
299 my $lrm_status = $haenv->read_lrm_status($node);
300 $modes->{$node} = $lrm_status->{mode} || 'active';
301 foreach my $uid (keys %{$lrm_status->{results}}) {
302 next if $results->{$uid}; # should not happen
303 $results->{$uid} = $lrm_status->{results}->{$uid};
304 }
305 }
306
307 return ($results, $modes);
308 }
309
310 # read new crm commands and save them into crm master status
311 sub update_crm_commands {
312 my ($self) = @_;
313
314 my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss});
315
316 my $cmdlist = $haenv->read_crm_commands();
317
318 foreach my $cmd (split(/\n/, $cmdlist)) {
319 chomp $cmd;
320
321 if ($cmd =~ m/^(migrate|relocate)\s+(\S+)\s+(\S+)$/) {
322 my ($task, $sid, $node) = ($1, $2, $3);
323 if (my $sd = $ss->{$sid}) {
324 if (!$ns->node_is_online($node)) {
325 $haenv->log('err', "crm command error - node not online: $cmd");
326 } else {
327 if ($node eq $sd->{node}) {
328 $haenv->log('info', "ignore crm command - service already on target node: $cmd");
329 } else {
330 $haenv->log('info', "got crm command: $cmd");
331 $ss->{$sid}->{cmd} = [ $task, $node ];
332 }
333 }
334 } else {
335 $haenv->log('err', "crm command error - no such service: $cmd");
336 }
337
338 } elsif ($cmd =~ m/^stop\s+(\S+)\s+(\S+)$/) {
339 my ($sid, $timeout) = ($1, $2);
340 if (my $sd = $ss->{$sid}) {
341 $haenv->log('info', "got crm command: $cmd");
342 $ss->{$sid}->{cmd} = [ 'stop', $timeout ];
343 } else {
344 $haenv->log('err', "crm command error - no such service: $cmd");
345 }
346 } else {
347 $haenv->log('err', "unable to parse crm command: $cmd");
348 }
349 }
350
351 }
352
353 sub manage {
354 my ($self) = @_;
355
356 my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss});
357
358 my ($node_info) = $haenv->get_node_info();
359 my ($lrm_results, $lrm_modes) = $self->read_lrm_status();
360
361 $ns->update($node_info, $lrm_modes);
362
363 if (!$ns->node_is_operational($haenv->nodename())) {
364 $haenv->log('info', "master seems offline");
365 return;
366 }
367
368 my $sc = $haenv->read_service_config();
369
370 $self->{groups} = $haenv->read_group_config(); # update
371
372 # compute new service status
373
374 # add new service
375 foreach my $sid (sort keys %$sc) {
376 next if $ss->{$sid}; # already there
377 my $cd = $sc->{$sid};
378 next if $cd->{state} eq 'ignored';
379
380 $haenv->log('info', "adding new service '$sid' on node '$cd->{node}'");
381 # assume we are running to avoid relocate running service at add
382 my $state = ($cd->{state} eq 'started') ? 'started' : 'request_stop';
383 $ss->{$sid} = { state => $state, node => $cd->{node},
384 uid => compute_new_uuid('started') };
385 }
386
387 # remove stale or ignored services from manager state
388 foreach my $sid (keys %$ss) {
389 next if $sc->{$sid} && $sc->{$sid}->{state} ne 'ignored';
390
391 my $reason = defined($sc->{$sid}) ? 'ignored state requested' : 'no config';
392 $haenv->log('info', "removing stale service '$sid' ($reason)");
393
394 # remove all service related state information
395 delete $ss->{$sid};
396 }
397
398 $self->update_crm_commands();
399
400 for (;;) {
401 my $repeat = 0;
402
403 $self->recompute_online_node_usage();
404
405 foreach my $sid (sort keys %$ss) {
406 my $sd = $ss->{$sid};
407 my $cd = $sc->{$sid} || { state => 'disabled' };
408
409 my $lrm_res = $sd->{uid} ? $lrm_results->{$sd->{uid}} : undef;
410
411 my $last_state = $sd->{state};
412
413 if ($last_state eq 'stopped') {
414
415 $self->next_state_stopped($sid, $cd, $sd, $lrm_res);
416
417 } elsif ($last_state eq 'started') {
418
419 $self->next_state_started($sid, $cd, $sd, $lrm_res);
420
421 } elsif ($last_state eq 'migrate' || $last_state eq 'relocate') {
422
423 $self->next_state_migrate_relocate($sid, $cd, $sd, $lrm_res);
424
425 } elsif ($last_state eq 'fence') {
426
427 # do nothing here - wait until fenced
428
429 } elsif ($last_state eq 'recovery') {
430
431 $self->next_state_recovery($sid, $cd, $sd, $lrm_res);
432
433 } elsif ($last_state eq 'request_stop') {
434
435 $self->next_state_request_stop($sid, $cd, $sd, $lrm_res);
436
437 } elsif ($last_state eq 'freeze') {
438
439 my $lrm_mode = $sd->{node} ? $lrm_modes->{$sd->{node}} : undef;
440 # unfreeze
441 my $state = ($cd->{state} eq 'started') ? 'started' : 'request_stop';
442 &$change_service_state($self, $sid, $state)
443 if $lrm_mode && $lrm_mode eq 'active';
444
445 } elsif ($last_state eq 'error') {
446
447 $self->next_state_error($sid, $cd, $sd, $lrm_res);
448
449 } else {
450
451 die "unknown service state '$last_state'";
452 }
453
454 my $lrm_mode = $sd->{node} ? $lrm_modes->{$sd->{node}} : undef;
455 if ($lrm_mode && $lrm_mode eq 'restart') {
456 if (($sd->{state} eq 'started' || $sd->{state} eq 'stopped' ||
457 $sd->{state} eq 'request_stop')) {
458 &$change_service_state($self, $sid, 'freeze');
459 }
460 }
461
462 $repeat = 1 if $sd->{state} ne $last_state;
463 }
464
465 # handle fencing
466 my $fenced_nodes = {};
467 foreach my $sid (sort keys %$ss) {
468 my ($service_state, $service_node) = $ss->{$sid}->@{'state', 'node'};
469 next if $service_state ne 'fence';
470
471 if (!defined($fenced_nodes->{$service_node})) {
472 $fenced_nodes->{$service_node} = $ns->fence_node($service_node) || 0;
473 }
474
475 next if !$fenced_nodes->{$service_node};
476
477 # node fence was successful - recover service
478 $change_service_state->($self, $sid, 'recovery');
479 $repeat = 1; # for faster recovery execution
480 }
481
482 # Avoid that a node without services in 'fence' state (e.g., removed
483 # manually by admin) is stuck with the 'fence' node state.
484 for my $node (sort grep { !defined($fenced_nodes->{$_}) } keys $ns->{status}->%*) {
485 next if $ns->get_node_state($node) ne 'fence';
486
487 $haenv->log('notice', "node '$node' in fence state but no services to-fence! admin interference?!");
488 $repeat = 1 if $ns->fence_node($node);
489 }
490
491 last if !$repeat;
492 }
493
494 $self->flush_master_status();
495 }
496
497 # functions to compute next service states
498 # $cd: service configuration data (read only)
499 # $sd: service status data (read only)
500 #
501 # Note: use change_service_state() to alter state
502 #
503
504 sub next_state_request_stop {
505 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
506
507 my $haenv = $self->{haenv};
508 my $ns = $self->{ns};
509
510 # check result from LRM daemon
511 if ($lrm_res) {
512 my $exit_code = $lrm_res->{exit_code};
513 if ($exit_code == SUCCESS) {
514 &$change_service_state($self, $sid, 'stopped');
515 return;
516 } else {
517 $haenv->log('err', "service '$sid' stop failed (exit code $exit_code)");
518 &$change_service_state($self, $sid, 'error'); # fixme: what state?
519 return;
520 }
521 }
522
523 if ($ns->node_is_offline_delayed($sd->{node})) {
524 &$change_service_state($self, $sid, 'fence');
525 return;
526 }
527 }
528
529 sub next_state_migrate_relocate {
530 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
531
532 my $haenv = $self->{haenv};
533 my $ns = $self->{ns};
534
535 # check result from LRM daemon
536 if ($lrm_res) {
537 my $exit_code = $lrm_res->{exit_code};
538 my $req_state = $cd->{state} eq 'started' ? 'started' : 'request_stop';
539 if ($exit_code == SUCCESS) {
540 &$change_service_state($self, $sid, $req_state, node => $sd->{target});
541 return;
542 } elsif ($exit_code == EWRONG_NODE) {
543 $haenv->log('err', "service '$sid' - migration failed: service" .
544 " registered on wrong node!");
545 &$change_service_state($self, $sid, 'error');
546 } else {
547 $haenv->log('err', "service '$sid' - migration failed (exit code $exit_code)");
548 &$change_service_state($self, $sid, $req_state, node => $sd->{node});
549 return;
550 }
551 }
552
553 if ($ns->node_is_offline_delayed($sd->{node})) {
554 &$change_service_state($self, $sid, 'fence');
555 return;
556 }
557 }
558
559 sub next_state_stopped {
560 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
561
562 my $haenv = $self->{haenv};
563 my $ns = $self->{ns};
564
565 if ($sd->{node} ne $cd->{node}) {
566 # this can happen if we fence a node with active migrations
567 # hack: modify $sd (normally this should be considered read-only)
568 $haenv->log('info', "fixup service '$sid' location ($sd->{node} => $cd->{node})");
569 $sd->{node} = $cd->{node};
570 }
571
572 if ($sd->{cmd}) {
573 my $cmd = shift @{$sd->{cmd}};
574
575 if ($cmd eq 'migrate' || $cmd eq 'relocate') {
576 my $target = shift @{$sd->{cmd}};
577 if (!$ns->node_is_online($target)) {
578 $haenv->log('err', "ignore service '$sid' $cmd request - node '$target' not online");
579 } elsif ($sd->{node} eq $target) {
580 $haenv->log('info', "ignore service '$sid' $cmd request - service already on node '$target'");
581 } else {
582 &$change_service_state($self, $sid, $cmd, node => $sd->{node},
583 target => $target);
584 return;
585 }
586 } elsif ($cmd eq 'stop') {
587 $haenv->log('info', "ignore service '$sid' $cmd request - service already stopped");
588 } else {
589 $haenv->log('err', "unknown command '$cmd' for service '$sid'");
590 }
591 delete $sd->{cmd};
592 }
593
594 if ($cd->{state} eq 'disabled') {
595 # NOTE: do nothing here, the stop state is an exception as we do not
596 # process the LRM result here, thus the LRM always tries to stop the
597 # service (protection for the case no CRM is active)
598 return;
599 }
600
601 if ($ns->node_is_offline_delayed($sd->{node}) && $ns->get_node_state($sd->{node}) ne 'maintenance') {
602 &$change_service_state($self, $sid, 'fence');
603 return;
604 }
605
606 if ($cd->{state} eq 'stopped') {
607 # almost the same as 'disabled' state but the service will also get recovered
608 return;
609 }
610
611 if ($cd->{state} eq 'started') {
612 # simply mark it started, if it's on the wrong node
613 # next_state_started will fix that for us
614 &$change_service_state($self, $sid, 'started', node => $sd->{node});
615 return;
616 }
617
618 $haenv->log('err', "service '$sid' - unknown state '$cd->{state}' in service configuration");
619 }
620
621 sub record_service_failed_on_node {
622 my ($self, $sid, $node) = @_;
623
624 if (!defined($self->{ss}->{$sid}->{failed_nodes})) {
625 $self->{ss}->{$sid}->{failed_nodes} = [];
626 }
627
628 push @{$self->{ss}->{$sid}->{failed_nodes}}, $node;
629 }
630
631 sub next_state_started {
632 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
633
634 my $haenv = $self->{haenv};
635 my $master_status = $self->{ms};
636 my $ns = $self->{ns};
637
638 if (!$ns->node_is_online($sd->{node})) {
639 if ($ns->node_is_offline_delayed($sd->{node})) {
640 &$change_service_state($self, $sid, 'fence');
641 }
642 if ($ns->get_node_state($sd->{node}) ne 'maintenance') {
643 return;
644 } else {
645 # save current node as fallback for when it comes out of
646 # maintenance
647 $sd->{maintenance_node} = $sd->{node};
648 }
649 }
650
651 if ($cd->{state} eq 'disabled' || $cd->{state} eq 'stopped') {
652 &$change_service_state($self, $sid, 'request_stop');
653 return;
654 }
655
656 if ($cd->{state} eq 'started') {
657
658 if ($sd->{cmd}) {
659 my $cmd = shift @{$sd->{cmd}};
660
661 if ($cmd eq 'migrate' || $cmd eq 'relocate') {
662 my $target = shift @{$sd->{cmd}};
663 if (!$ns->node_is_online($target)) {
664 $haenv->log('err', "ignore service '$sid' $cmd request - node '$target' not online");
665 } elsif ($sd->{node} eq $target) {
666 $haenv->log('info', "ignore service '$sid' $cmd request - service already on node '$target'");
667 } else {
668 $haenv->log('info', "$cmd service '$sid' to node '$target'");
669 &$change_service_state($self, $sid, $cmd, node => $sd->{node}, target => $target);
670 }
671 } elsif ($cmd eq 'stop') {
672 my $timeout = shift @{$sd->{cmd}};
673 if ($timeout == 0) {
674 $haenv->log('info', "request immediate service hard-stop for service '$sid'");
675 } else {
676 $haenv->log('info', "request graceful stop with timeout '$timeout' for service '$sid'");
677 }
678 &$change_service_state($self, $sid, 'request_stop', timeout => $timeout);
679 $haenv->update_service_config($sid, {'state' => 'stopped'});
680 } else {
681 $haenv->log('err', "unknown command '$cmd' for service '$sid'");
682 }
683
684 delete $sd->{cmd};
685
686 } else {
687
688 my $try_next = 0;
689
690 if ($lrm_res) {
691
692 my $ec = $lrm_res->{exit_code};
693 if ($ec == SUCCESS) {
694
695 if (defined($sd->{failed_nodes})) {
696 $haenv->log('info', "relocation policy successful for '$sid' on node '$sd->{node}'," .
697 " failed nodes: " . join(', ', @{$sd->{failed_nodes}}) );
698 }
699
700 delete $sd->{failed_nodes};
701
702 # store flag to indicate successful start - only valid while state == 'started'
703 $sd->{running} = 1;
704
705 } elsif ($ec == ERROR) {
706
707 delete $sd->{running};
708
709 # apply our relocate policy if we got ERROR from the LRM
710 $self->record_service_failed_on_node($sid, $sd->{node});
711
712 if (scalar(@{$sd->{failed_nodes}}) <= $cd->{max_relocate}) {
713
714 # tell select_service_node to relocate if possible
715 $try_next = 1;
716
717 $haenv->log('warning', "starting service $sid on node".
718 " '$sd->{node}' failed, relocating service.");
719
720 } else {
721
722 $haenv->log('err', "recovery policy for service $sid " .
723 "failed, entering error state. Failed nodes: ".
724 join(', ', @{$sd->{failed_nodes}}));
725 &$change_service_state($self, $sid, 'error');
726 return;
727
728 }
729 } else {
730 $self->record_service_failed_on_node($sid, $sd->{node});
731
732 $haenv->log('err', "service '$sid' got unrecoverable error" .
733 " (exit code $ec))");
734 # we have no save way out (yet) for other errors
735 &$change_service_state($self, $sid, 'error');
736 return;
737 }
738 }
739
740 my $node = select_service_node(
741 $self->{groups},
742 $self->{online_node_usage},
743 $cd,
744 $sd->{node},
745 $try_next,
746 $sd->{failed_nodes},
747 $sd->{maintenance_node},
748 );
749
750 if ($node && ($sd->{node} ne $node)) {
751 $self->{online_node_usage}->{$node}++;
752
753 if (defined(my $fallback = $sd->{maintenance_node})) {
754 if ($node eq $fallback) {
755 $haenv->log('info', "moving service '$sid' back to '$fallback', node came back from maintenance.");
756 delete $sd->{maintenance_node};
757 } elsif ($sd->{node} ne $fallback) {
758 $haenv->log('info', "dropping maintenance fallback node '$fallback' for '$sid'");
759 delete $sd->{maintenance_node};
760 }
761 }
762
763 if ($cd->{type} eq 'vm') {
764 $haenv->log('info', "migrate service '$sid' to node '$node' (running)");
765 &$change_service_state($self, $sid, 'migrate', node => $sd->{node}, target => $node);
766 } else {
767 $haenv->log('info', "relocate service '$sid' to node '$node'");
768 &$change_service_state($self, $sid, 'relocate', node => $sd->{node}, target => $node);
769 }
770 } else {
771 if ($try_next && !defined($node)) {
772 $haenv->log('warning', "Start Error Recovery: Tried all available " .
773 " nodes for service '$sid', retry start on current node. " .
774 "Tried nodes: " . join(', ', @{$sd->{failed_nodes}}));
775 }
776 # ensure service get started again if it went unexpected down
777 # but ensure also no LRM result gets lost
778 $sd->{uid} = compute_new_uuid($sd->{state}) if defined($lrm_res);
779 }
780 }
781
782 return;
783 }
784
785 $haenv->log('err', "service '$sid' - unknown state '$cd->{state}' in service configuration");
786 }
787
788 sub next_state_error {
789 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
790
791 my $ns = $self->{ns};
792 my $ms = $self->{ms};
793
794 if ($cd->{state} eq 'disabled') {
795 # clean up on error recovery
796 delete $sd->{failed_nodes};
797
798 &$change_service_state($self, $sid, 'stopped');
799 return;
800 }
801
802 }
803
804 # after a node was fenced this recovers the service to a new node
805 sub next_state_recovery {
806 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
807
808 my ($haenv, $ss) = ($self->{haenv}, $self->{ss});
809 my $ns = $self->{ns};
810 my $ms = $self->{ms};
811
812 if ($sd->{state} ne 'recovery') { # should not happen
813 $haenv->log('err', "cannot recover service '$sid' from fencing, wrong state '$sd->{state}'");
814 return;
815 }
816
817 my $fenced_node = $sd->{node}; # for logging purpose
818
819 $self->recompute_online_node_usage(); # we want the most current node state
820
821 my $recovery_node = select_service_node(
822 $self->{groups},
823 $self->{online_node_usage},
824 $cd,
825 $sd->{node},
826 );
827
828 if ($recovery_node) {
829 my $msg = "recover service '$sid' from fenced node '$fenced_node' to node '$recovery_node'";
830 if ($recovery_node eq $fenced_node) {
831 # can happen if restriced groups and the node came up again OK
832 $msg = "recover service '$sid' to previous failed and fenced node '$fenced_node' again";
833 }
834 $haenv->log('info', "$msg");
835
836 $fence_recovery_cleanup->($self, $sid, $fenced_node);
837
838 $haenv->steal_service($sid, $sd->{node}, $recovery_node);
839 $self->{online_node_usage}->{$recovery_node}++;
840
841 # NOTE: $sd *is normally read-only*, fencing is the exception
842 $cd->{node} = $sd->{node} = $recovery_node;
843 my $new_state = ($cd->{state} eq 'started') ? 'started' : 'request_stop';
844 $change_service_state->($self, $sid, $new_state, node => $recovery_node);
845 } else {
846 # no possible node found, cannot recover - but retry later, as we always try to make it available
847 $haenv->log('err', "recovering service '$sid' from fenced node '$fenced_node' failed, no recovery node found");
848
849 if ($cd->{state} eq 'disabled') {
850 # allow getting a service out of recovery manually if an admin disables it.
851 delete $sd->{failed_nodes}; # clean up on recovery to stopped
852 $change_service_state->($self, $sid, 'stopped'); # must NOT go through request_stop
853 return;
854 }
855 }
856 }
857
858 1;