]> git.proxmox.com Git - pve-ha-manager.git/blob - src/PVE/HA/Manager.pm
2deea57769f9aafaf9e4a8ec62fbf940a51e9272
[pve-ha-manager.git] / src / PVE / HA / Manager.pm
1 package PVE::HA::Manager;
2
3 use strict;
4 use warnings;
5 use Digest::MD5 qw(md5_base64);
6
7 use PVE::Tools;
8 use PVE::HA::Tools ':exit_codes';
9 use PVE::HA::NodeStatus;
10
11 sub new {
12 my ($this, $haenv) = @_;
13
14 my $class = ref($this) || $this;
15
16 my $self = bless { haenv => $haenv }, $class;
17
18 my $old_ms = $haenv->read_manager_status();
19
20 # we only copy the state part of the manager which cannot be auto generated
21
22 $self->{ns} = PVE::HA::NodeStatus->new($haenv, $old_ms->{node_status} || {});
23
24 # fixme: use separate class PVE::HA::ServiceStatus
25 $self->{ss} = $old_ms->{service_status} || {};
26
27 $self->{ms} = { master_node => $haenv->nodename() };
28
29 return $self;
30 }
31
32 sub cleanup {
33 my ($self) = @_;
34
35 # todo: ?
36 }
37
38 sub flush_master_status {
39 my ($self) = @_;
40
41 my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss});
42
43 $ms->{node_status} = $ns->{status};
44 $ms->{service_status} = $ss;
45 $ms->{timestamp} = $haenv->get_time();
46
47 $haenv->write_manager_status($ms);
48 }
49
50 sub get_service_group {
51 my ($groups, $online_node_usage, $service_conf) = @_;
52
53 my $group = {};
54 # add all online nodes to default group to allow try_next when no group set
55 foreach my $node (keys %$online_node_usage) {
56 $group->{nodes}->{$node} = 1;
57 }
58
59 # overwrite default if service is bound to a specific group
60 if (my $group_id = $service_conf->{group}) {
61 $group = $groups->{ids}->{$group_id} if $groups->{ids}->{$group_id};
62 }
63
64 return $group;
65 }
66
67 # groups available nodes with their priority as group index
68 sub get_node_priority_groups {
69 my ($group, $online_node_usage) = @_;
70
71 my $pri_groups = {};
72 my $group_members = {};
73 foreach my $entry (keys %{$group->{nodes}}) {
74 my ($node, $pri) = ($entry, 0);
75 if ($entry =~ m/^(\S+):(\d+)$/) {
76 ($node, $pri) = ($1, $2);
77 }
78 next if !defined($online_node_usage->{$node}); # offline
79 $pri_groups->{$pri}->{$node} = 1;
80 $group_members->{$node} = $pri;
81 }
82
83 # add non-group members to unrestricted groups (priority -1)
84 if (!$group->{restricted}) {
85 my $pri = -1;
86 foreach my $node (keys %$online_node_usage) {
87 next if defined($group_members->{$node});
88 $pri_groups->{$pri}->{$node} = 1;
89 $group_members->{$node} = -1;
90 }
91 }
92
93 return ($pri_groups, $group_members);
94 }
95
96 sub select_service_node {
97 my ($groups, $online_node_usage, $service_conf, $current_node, $try_next, $tried_nodes, $maintenance_fallback) = @_;
98
99 my $group = get_service_group($groups, $online_node_usage, $service_conf);
100
101 my ($pri_groups, $group_members) = get_node_priority_groups($group, $online_node_usage);
102
103 my @pri_list = sort {$b <=> $a} keys %$pri_groups;
104 return undef if !scalar(@pri_list);
105
106 # stay on current node if possible (avoids random migrations)
107 if (!$try_next && $group->{nofailback} && defined($group_members->{$current_node})) {
108 return $current_node;
109 }
110
111 # select node from top priority node list
112
113 my $top_pri = $pri_list[0];
114
115 # try to avoid nodes where the service failed already if we want to relocate
116 if ($try_next) {
117 foreach my $node (@$tried_nodes) {
118 delete $pri_groups->{$top_pri}->{$node};
119 }
120 }
121
122 my @nodes = sort {
123 $online_node_usage->{$a} <=> $online_node_usage->{$b} || $a cmp $b
124 } keys %{$pri_groups->{$top_pri}};
125
126 my $found;
127 my $found_maintenace_fallback;
128 for (my $i = scalar(@nodes) - 1; $i >= 0; $i--) {
129 my $node = $nodes[$i];
130 if ($node eq $current_node) {
131 $found = $i;
132 }
133 if (defined($maintenance_fallback) && $node eq $maintenance_fallback) {
134 $found_maintenace_fallback = $i;
135 }
136 }
137
138 if (defined($found_maintenace_fallback)) {
139 return $nodes[$found_maintenace_fallback];
140 }
141
142 if ($try_next) {
143 if (defined($found) && ($found < (scalar(@nodes) - 1))) {
144 return $nodes[$found + 1];
145 } else {
146 return $nodes[0];
147 }
148 } elsif (defined($found)) {
149 return $nodes[$found];
150 } else {
151 return $nodes[0];
152 }
153 }
154
155 my $uid_counter = 0;
156
157 sub compute_new_uuid {
158 my ($state) = @_;
159
160 $uid_counter++;
161 return md5_base64($state . $$ . time() . $uid_counter);
162 }
163
164 my $valid_service_states = {
165 stopped => 1,
166 request_stop => 1,
167 started => 1,
168 fence => 1,
169 recovery => 1,
170 migrate => 1,
171 relocate => 1,
172 freeze => 1,
173 error => 1,
174 };
175
176 sub recompute_online_node_usage {
177 my ($self) = @_;
178
179 my $online_node_usage = {};
180
181 my $online_nodes = $self->{ns}->list_online_nodes();
182
183 foreach my $node (@$online_nodes) {
184 $online_node_usage->{$node} = 0;
185 }
186
187 foreach my $sid (keys %{$self->{ss}}) {
188 my $sd = $self->{ss}->{$sid};
189 my $state = $sd->{state};
190 if (defined($online_node_usage->{$sd->{node}})) {
191 if (
192 $state eq 'started' || $state eq 'request_stop' || $state eq 'fence' ||
193 $state eq 'freeze' || $state eq 'error' || $state eq 'recovery'
194 ) {
195 $online_node_usage->{$sd->{node}}++;
196 } elsif (($state eq 'migrate') || ($state eq 'relocate')) {
197 # count it for both, source and target as load is put on both
198 $online_node_usage->{$sd->{node}}++;
199 $online_node_usage->{$sd->{target}}++;
200 } elsif ($state eq 'stopped') {
201 # do nothing
202 } else {
203 die "should not be reached (sid = '$sid', state = '$state')";
204 }
205 }
206 }
207
208 $self->{online_node_usage} = $online_node_usage;
209 }
210
211 my $change_service_state = sub {
212 my ($self, $sid, $new_state, %params) = @_;
213
214 my ($haenv, $ss) = ($self->{haenv}, $self->{ss});
215
216 my $sd = $ss->{$sid} || die "no such service '$sid";
217
218 my $old_state = $sd->{state};
219 my $old_node = $sd->{node};
220 my $old_failed_nodes = $sd->{failed_nodes};
221 my $old_maintenance_node = $sd->{maintenance_node};
222
223 die "no state change" if $old_state eq $new_state; # just to be sure
224
225 die "invalid CRM service state '$new_state'\n" if !$valid_service_states->{$new_state};
226
227 foreach my $k (keys %$sd) { delete $sd->{$k}; };
228
229 $sd->{state} = $new_state;
230 $sd->{node} = $old_node;
231 $sd->{failed_nodes} = $old_failed_nodes if defined($old_failed_nodes);
232 $sd->{maintenance_node} = $old_maintenance_node if defined($old_maintenance_node);
233
234 my $text_state = '';
235 foreach my $k (sort keys %params) {
236 my $v = $params{$k};
237 $text_state .= ", " if $text_state;
238 $text_state .= "$k = $v";
239 $sd->{$k} = $v;
240 }
241
242 $self->recompute_online_node_usage();
243
244 $sd->{uid} = compute_new_uuid($new_state);
245
246 $text_state = " ($text_state)" if $text_state;
247 $haenv->log('info', "service '$sid': state changed from '${old_state}'" .
248 " to '${new_state}'$text_state");
249 };
250
251 # clean up a possible bad state from a recovered service to allow its start
252 my $fence_recovery_cleanup = sub {
253 my ($self, $sid, $fenced_node) = @_;
254
255 my $haenv = $self->{haenv};
256
257 my (undef, $type, $id) = $haenv->parse_sid($sid);
258 my $plugin = PVE::HA::Resources->lookup($type);
259
260 # should not happen
261 die "unknown resource type '$type'" if !$plugin;
262
263 # locks may block recovery, cleanup those which are safe to remove after fencing,
264 # i.e., after the original node was reset and thus all it's state
265 my $removable_locks = [
266 'backup',
267 'mounted',
268 'migrate',
269 'clone',
270 'rollback',
271 'snapshot',
272 'snapshot-delete',
273 'suspending',
274 'suspended',
275 ];
276 if (my $removed_lock = $plugin->remove_locks($haenv, $id, $removable_locks, $fenced_node)) {
277 $haenv->log('warning', "removed leftover lock '$removed_lock' from recovered " .
278 "service '$sid' to allow its start.");
279 }
280 };
281
282 # read LRM status for all nodes
283 sub read_lrm_status {
284 my ($self) = @_;
285
286 my $nodes = $self->{ns}->list_nodes();
287 my $haenv = $self->{haenv};
288
289 my $results = {};
290 my $modes = {};
291 foreach my $node (@$nodes) {
292 my $lrm_status = $haenv->read_lrm_status($node);
293 $modes->{$node} = $lrm_status->{mode} || 'active';
294 foreach my $uid (keys %{$lrm_status->{results}}) {
295 next if $results->{$uid}; # should not happen
296 $results->{$uid} = $lrm_status->{results}->{$uid};
297 }
298 }
299
300 return ($results, $modes);
301 }
302
303 # read new crm commands and save them into crm master status
304 sub update_crm_commands {
305 my ($self) = @_;
306
307 my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss});
308
309 my $cmdlist = $haenv->read_crm_commands();
310
311 foreach my $cmd (split(/\n/, $cmdlist)) {
312 chomp $cmd;
313
314 if ($cmd =~ m/^(migrate|relocate)\s+(\S+)\s+(\S+)$/) {
315 my ($task, $sid, $node) = ($1, $2, $3);
316 if (my $sd = $ss->{$sid}) {
317 if (!$ns->node_is_online($node)) {
318 $haenv->log('err', "crm command error - node not online: $cmd");
319 } else {
320 if ($node eq $sd->{node}) {
321 $haenv->log('info', "ignore crm command - service already on target node: $cmd");
322 } else {
323 $haenv->log('info', "got crm command: $cmd");
324 $ss->{$sid}->{cmd} = [ $task, $node ];
325 }
326 }
327 } else {
328 $haenv->log('err', "crm command error - no such service: $cmd");
329 }
330
331 } elsif ($cmd =~ m/^stop\s+(\S+)\s+(\S+)$/) {
332 my ($sid, $timeout) = ($1, $2);
333 if (my $sd = $ss->{$sid}) {
334 $haenv->log('info', "got crm command: $cmd");
335 $ss->{$sid}->{cmd} = [ 'stop', $timeout ];
336 } else {
337 $haenv->log('err', "crm command error - no such service: $cmd");
338 }
339 } else {
340 $haenv->log('err', "unable to parse crm command: $cmd");
341 }
342 }
343
344 }
345
346 sub manage {
347 my ($self) = @_;
348
349 my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss});
350
351 my ($node_info) = $haenv->get_node_info();
352 my ($lrm_results, $lrm_modes) = $self->read_lrm_status();
353
354 $ns->update($node_info, $lrm_modes);
355
356 if (!$ns->node_is_operational($haenv->nodename())) {
357 $haenv->log('info', "master seems offline");
358 return;
359 }
360
361 my $sc = $haenv->read_service_config();
362
363 $self->{groups} = $haenv->read_group_config(); # update
364
365 # compute new service status
366
367 # add new service
368 foreach my $sid (sort keys %$sc) {
369 next if $ss->{$sid}; # already there
370 my $cd = $sc->{$sid};
371 next if $cd->{state} eq 'ignored';
372
373 $haenv->log('info', "adding new service '$sid' on node '$cd->{node}'");
374 # assume we are running to avoid relocate running service at add
375 my $state = ($cd->{state} eq 'started') ? 'started' : 'request_stop';
376 $ss->{$sid} = { state => $state, node => $cd->{node},
377 uid => compute_new_uuid('started') };
378 }
379
380 # remove stale or ignored services from manager state
381 foreach my $sid (keys %$ss) {
382 next if $sc->{$sid} && $sc->{$sid}->{state} ne 'ignored';
383
384 my $reason = defined($sc->{$sid}) ? 'ignored state requested' : 'no config';
385 $haenv->log('info', "removing stale service '$sid' ($reason)");
386
387 # remove all service related state information
388 delete $ss->{$sid};
389 }
390
391 $self->update_crm_commands();
392
393 for (;;) {
394 my $repeat = 0;
395
396 $self->recompute_online_node_usage();
397
398 foreach my $sid (sort keys %$ss) {
399 my $sd = $ss->{$sid};
400 my $cd = $sc->{$sid} || { state => 'disabled' };
401
402 my $lrm_res = $sd->{uid} ? $lrm_results->{$sd->{uid}} : undef;
403
404 my $last_state = $sd->{state};
405
406 if ($last_state eq 'stopped') {
407
408 $self->next_state_stopped($sid, $cd, $sd, $lrm_res);
409
410 } elsif ($last_state eq 'started') {
411
412 $self->next_state_started($sid, $cd, $sd, $lrm_res);
413
414 } elsif ($last_state eq 'migrate' || $last_state eq 'relocate') {
415
416 $self->next_state_migrate_relocate($sid, $cd, $sd, $lrm_res);
417
418 } elsif ($last_state eq 'fence') {
419
420 # do nothing here - wait until fenced
421
422 } elsif ($last_state eq 'recovery') {
423
424 $self->next_state_recovery($sid, $cd, $sd, $lrm_res);
425
426 } elsif ($last_state eq 'request_stop') {
427
428 $self->next_state_request_stop($sid, $cd, $sd, $lrm_res);
429
430 } elsif ($last_state eq 'freeze') {
431
432 my $lrm_mode = $sd->{node} ? $lrm_modes->{$sd->{node}} : undef;
433 # unfreeze
434 my $state = ($cd->{state} eq 'started') ? 'started' : 'request_stop';
435 &$change_service_state($self, $sid, $state)
436 if $lrm_mode && $lrm_mode eq 'active';
437
438 } elsif ($last_state eq 'error') {
439
440 $self->next_state_error($sid, $cd, $sd, $lrm_res);
441
442 } else {
443
444 die "unknown service state '$last_state'";
445 }
446
447 my $lrm_mode = $sd->{node} ? $lrm_modes->{$sd->{node}} : undef;
448 if ($lrm_mode && $lrm_mode eq 'restart') {
449 if (($sd->{state} eq 'started' || $sd->{state} eq 'stopped' ||
450 $sd->{state} eq 'request_stop')) {
451 &$change_service_state($self, $sid, 'freeze');
452 }
453 }
454
455 $repeat = 1 if $sd->{state} ne $last_state;
456 }
457
458 # handle fencing
459 my $fenced_nodes = {};
460 foreach my $sid (sort keys %$ss) {
461 my ($service_state, $service_node) = $ss->{$sid}->@{'state', 'node'};
462 next if $service_state ne 'fence';
463
464 if (!defined($fenced_nodes->{$service_node})) {
465 $fenced_nodes->{$service_node} = $ns->fence_node($service_node) || 0;
466 }
467
468 next if !$fenced_nodes->{$service_node};
469
470 # node fence was successful - recover service
471 $change_service_state->($self, $sid, 'recovery');
472 $repeat = 1; # for faster recovery execution
473 }
474
475 # Avoid that a node without services in 'fence' state (e.g., removed
476 # manually by admin) is stuck with the 'fence' node state.
477 for my $node (sort grep { !defined($fenced_nodes->{$_}) } keys $ns->{status}->%*) {
478 next if $ns->get_node_state($node) ne 'fence';
479
480 $haenv->log('notice', "node '$node' in fence state but no services to-fence! admin interference?!");
481 $repeat = 1 if $ns->fence_node($node);
482 }
483
484 last if !$repeat;
485 }
486
487 $self->flush_master_status();
488 }
489
490 # functions to compute next service states
491 # $cd: service configuration data (read only)
492 # $sd: service status data (read only)
493 #
494 # Note: use change_service_state() to alter state
495 #
496
497 sub next_state_request_stop {
498 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
499
500 my $haenv = $self->{haenv};
501 my $ns = $self->{ns};
502
503 # check result from LRM daemon
504 if ($lrm_res) {
505 my $exit_code = $lrm_res->{exit_code};
506 if ($exit_code == SUCCESS) {
507 &$change_service_state($self, $sid, 'stopped');
508 return;
509 } else {
510 $haenv->log('err', "service '$sid' stop failed (exit code $exit_code)");
511 &$change_service_state($self, $sid, 'error'); # fixme: what state?
512 return;
513 }
514 }
515
516 if ($ns->node_is_offline_delayed($sd->{node})) {
517 &$change_service_state($self, $sid, 'fence');
518 return;
519 }
520 }
521
522 sub next_state_migrate_relocate {
523 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
524
525 my $haenv = $self->{haenv};
526 my $ns = $self->{ns};
527
528 # check result from LRM daemon
529 if ($lrm_res) {
530 my $exit_code = $lrm_res->{exit_code};
531 my $req_state = $cd->{state} eq 'started' ? 'started' : 'request_stop';
532 if ($exit_code == SUCCESS) {
533 &$change_service_state($self, $sid, $req_state, node => $sd->{target});
534 return;
535 } elsif ($exit_code == EWRONG_NODE) {
536 $haenv->log('err', "service '$sid' - migration failed: service" .
537 " registered on wrong node!");
538 &$change_service_state($self, $sid, 'error');
539 } else {
540 $haenv->log('err', "service '$sid' - migration failed (exit code $exit_code)");
541 &$change_service_state($self, $sid, $req_state, node => $sd->{node});
542 return;
543 }
544 }
545
546 if ($ns->node_is_offline_delayed($sd->{node})) {
547 &$change_service_state($self, $sid, 'fence');
548 return;
549 }
550 }
551
552 sub next_state_stopped {
553 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
554
555 my $haenv = $self->{haenv};
556 my $ns = $self->{ns};
557
558 if ($sd->{node} ne $cd->{node}) {
559 # this can happen if we fence a node with active migrations
560 # hack: modify $sd (normally this should be considered read-only)
561 $haenv->log('info', "fixup service '$sid' location ($sd->{node} => $cd->{node})");
562 $sd->{node} = $cd->{node};
563 }
564
565 if ($sd->{cmd}) {
566 my $cmd = shift @{$sd->{cmd}};
567
568 if ($cmd eq 'migrate' || $cmd eq 'relocate') {
569 my $target = shift @{$sd->{cmd}};
570 if (!$ns->node_is_online($target)) {
571 $haenv->log('err', "ignore service '$sid' $cmd request - node '$target' not online");
572 } elsif ($sd->{node} eq $target) {
573 $haenv->log('info', "ignore service '$sid' $cmd request - service already on node '$target'");
574 } else {
575 &$change_service_state($self, $sid, $cmd, node => $sd->{node},
576 target => $target);
577 return;
578 }
579 } elsif ($cmd eq 'stop') {
580 $haenv->log('info', "ignore service '$sid' $cmd request - service already stopped");
581 } else {
582 $haenv->log('err', "unknown command '$cmd' for service '$sid'");
583 }
584 delete $sd->{cmd};
585 }
586
587 if ($cd->{state} eq 'disabled') {
588 # NOTE: do nothing here, the stop state is an exception as we do not
589 # process the LRM result here, thus the LRM always tries to stop the
590 # service (protection for the case no CRM is active)
591 return;
592 }
593
594 if ($ns->node_is_offline_delayed($sd->{node}) && $ns->get_node_state($sd->{node}) ne 'maintenance') {
595 &$change_service_state($self, $sid, 'fence');
596 return;
597 }
598
599 if ($cd->{state} eq 'stopped') {
600 # almost the same as 'disabled' state but the service will also get recovered
601 return;
602 }
603
604 if ($cd->{state} eq 'started') {
605 # simply mark it started, if it's on the wrong node
606 # next_state_started will fix that for us
607 &$change_service_state($self, $sid, 'started', node => $sd->{node});
608 return;
609 }
610
611 $haenv->log('err', "service '$sid' - unknown state '$cd->{state}' in service configuration");
612 }
613
614 sub record_service_failed_on_node {
615 my ($self, $sid, $node) = @_;
616
617 if (!defined($self->{ss}->{$sid}->{failed_nodes})) {
618 $self->{ss}->{$sid}->{failed_nodes} = [];
619 }
620
621 push @{$self->{ss}->{$sid}->{failed_nodes}}, $node;
622 }
623
624 sub next_state_started {
625 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
626
627 my $haenv = $self->{haenv};
628 my $master_status = $self->{ms};
629 my $ns = $self->{ns};
630
631 if (!$ns->node_is_online($sd->{node})) {
632 if ($ns->node_is_offline_delayed($sd->{node})) {
633 &$change_service_state($self, $sid, 'fence');
634 }
635 if ($ns->get_node_state($sd->{node}) ne 'maintenance') {
636 return;
637 } else {
638 # save current node as fallback for when it comes out of
639 # maintenance
640 $sd->{maintenance_node} = $sd->{node};
641 }
642 }
643
644 if ($cd->{state} eq 'disabled' || $cd->{state} eq 'stopped') {
645 &$change_service_state($self, $sid, 'request_stop');
646 return;
647 }
648
649 if ($cd->{state} eq 'started') {
650
651 if ($sd->{cmd}) {
652 my $cmd = shift @{$sd->{cmd}};
653
654 if ($cmd eq 'migrate' || $cmd eq 'relocate') {
655 my $target = shift @{$sd->{cmd}};
656 if (!$ns->node_is_online($target)) {
657 $haenv->log('err', "ignore service '$sid' $cmd request - node '$target' not online");
658 } elsif ($sd->{node} eq $target) {
659 $haenv->log('info', "ignore service '$sid' $cmd request - service already on node '$target'");
660 } else {
661 $haenv->log('info', "$cmd service '$sid' to node '$target'");
662 &$change_service_state($self, $sid, $cmd, node => $sd->{node}, target => $target);
663 }
664 } elsif ($cmd eq 'stop') {
665 my $timeout = shift @{$sd->{cmd}};
666 if ($timeout == 0) {
667 $haenv->log('info', "request immediate service hard-stop for service '$sid'");
668 } else {
669 $haenv->log('info', "request graceful stop with timeout '$timeout' for service '$sid'");
670 }
671 &$change_service_state($self, $sid, 'request_stop', timeout => $timeout);
672 $haenv->update_service_config($sid, {'state' => 'stopped'});
673 } else {
674 $haenv->log('err', "unknown command '$cmd' for service '$sid'");
675 }
676
677 delete $sd->{cmd};
678
679 } else {
680
681 my $try_next = 0;
682
683 if ($lrm_res) {
684
685 my $ec = $lrm_res->{exit_code};
686 if ($ec == SUCCESS) {
687
688 if (defined($sd->{failed_nodes})) {
689 $haenv->log('info', "relocation policy successful for '$sid' on node '$sd->{node}'," .
690 " failed nodes: " . join(', ', @{$sd->{failed_nodes}}) );
691 }
692
693 delete $sd->{failed_nodes};
694
695 # store flag to indicate successful start - only valid while state == 'started'
696 $sd->{running} = 1;
697
698 } elsif ($ec == ERROR) {
699
700 delete $sd->{running};
701
702 # apply our relocate policy if we got ERROR from the LRM
703 $self->record_service_failed_on_node($sid, $sd->{node});
704
705 if (scalar(@{$sd->{failed_nodes}}) <= $cd->{max_relocate}) {
706
707 # tell select_service_node to relocate if possible
708 $try_next = 1;
709
710 $haenv->log('warning', "starting service $sid on node".
711 " '$sd->{node}' failed, relocating service.");
712
713 } else {
714
715 $haenv->log('err', "recovery policy for service $sid " .
716 "failed, entering error state. Failed nodes: ".
717 join(', ', @{$sd->{failed_nodes}}));
718 &$change_service_state($self, $sid, 'error');
719 return;
720
721 }
722 } else {
723 $self->record_service_failed_on_node($sid, $sd->{node});
724
725 $haenv->log('err', "service '$sid' got unrecoverable error" .
726 " (exit code $ec))");
727 # we have no save way out (yet) for other errors
728 &$change_service_state($self, $sid, 'error');
729 return;
730 }
731 }
732
733 my $node = select_service_node(
734 $self->{groups},
735 $self->{online_node_usage},
736 $cd,
737 $sd->{node},
738 $try_next,
739 $sd->{failed_nodes},
740 $sd->{maintenance_node},
741 );
742
743 if ($node && ($sd->{node} ne $node)) {
744 $self->{online_node_usage}->{$node}++;
745
746 if (defined(my $fallback = $sd->{maintenance_node})) {
747 if ($node eq $fallback) {
748 $haenv->log('info', "moving service '$sid' back to '$fallback', node came back from maintenance.");
749 delete $sd->{maintenance_node};
750 } elsif ($sd->{node} ne $fallback) {
751 $haenv->log('info', "dropping maintenance fallback node '$fallback' for '$sid'");
752 delete $sd->{maintenance_node};
753 }
754 }
755
756 if ($cd->{type} eq 'vm') {
757 $haenv->log('info', "migrate service '$sid' to node '$node' (running)");
758 &$change_service_state($self, $sid, 'migrate', node => $sd->{node}, target => $node);
759 } else {
760 $haenv->log('info', "relocate service '$sid' to node '$node'");
761 &$change_service_state($self, $sid, 'relocate', node => $sd->{node}, target => $node);
762 }
763 } else {
764 if ($try_next && !defined($node)) {
765 $haenv->log('warning', "Start Error Recovery: Tried all available " .
766 " nodes for service '$sid', retry start on current node. " .
767 "Tried nodes: " . join(', ', @{$sd->{failed_nodes}}));
768 }
769 # ensure service get started again if it went unexpected down
770 # but ensure also no LRM result gets lost
771 $sd->{uid} = compute_new_uuid($sd->{state}) if defined($lrm_res);
772 }
773 }
774
775 return;
776 }
777
778 $haenv->log('err', "service '$sid' - unknown state '$cd->{state}' in service configuration");
779 }
780
781 sub next_state_error {
782 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
783
784 my $ns = $self->{ns};
785 my $ms = $self->{ms};
786
787 if ($cd->{state} eq 'disabled') {
788 # clean up on error recovery
789 delete $sd->{failed_nodes};
790
791 &$change_service_state($self, $sid, 'stopped');
792 return;
793 }
794
795 }
796
797 # after a node was fenced this recovers the service to a new node
798 sub next_state_recovery {
799 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
800
801 my ($haenv, $ss) = ($self->{haenv}, $self->{ss});
802 my $ns = $self->{ns};
803 my $ms = $self->{ms};
804
805 if ($sd->{state} ne 'recovery') { # should not happen
806 $haenv->log('err', "cannot recover service '$sid' from fencing, wrong state '$sd->{state}'");
807 return;
808 }
809
810 my $fenced_node = $sd->{node}; # for logging purpose
811
812 $self->recompute_online_node_usage(); # we want the most current node state
813
814 my $recovery_node = select_service_node(
815 $self->{groups},
816 $self->{online_node_usage},
817 $cd,
818 $sd->{node},
819 );
820
821 if ($recovery_node) {
822 my $msg = "recover service '$sid' from fenced node '$fenced_node' to node '$recovery_node'";
823 if ($recovery_node eq $fenced_node) {
824 # can happen if restriced groups and the node came up again OK
825 $msg = "recover service '$sid' to previous failed and fenced node '$fenced_node' again";
826 }
827 $haenv->log('info', "$msg");
828
829 $fence_recovery_cleanup->($self, $sid, $fenced_node);
830
831 $haenv->steal_service($sid, $sd->{node}, $recovery_node);
832 $self->{online_node_usage}->{$recovery_node}++;
833
834 # NOTE: $sd *is normally read-only*, fencing is the exception
835 $cd->{node} = $sd->{node} = $recovery_node;
836 my $new_state = ($cd->{state} eq 'started') ? 'started' : 'request_stop';
837 $change_service_state->($self, $sid, $new_state, node => $recovery_node);
838 } else {
839 # no possible node found, cannot recover - but retry later, as we always try to make it available
840 $haenv->log('err', "recovering service '$sid' from fenced node '$fenced_node' failed, no recovery node found");
841
842 if ($cd->{state} eq 'disabled') {
843 # allow getting a service out of recovery manually if an admin disables it.
844 delete $sd->{failed_nodes}; # clean up on recovery to stopped
845 $change_service_state->($self, $sid, 'stopped'); # must NOT go through request_stop
846 return;
847 }
848 }
849 }
850
851 1;