]> git.proxmox.com Git - pve-ha-manager.git/blob - src/PVE/HA/Manager.pm
manager: online node usage: factor out possible traget and future proof
[pve-ha-manager.git] / src / PVE / HA / Manager.pm
1 package PVE::HA::Manager;
2
3 use strict;
4 use warnings;
5 use Digest::MD5 qw(md5_base64);
6
7 use PVE::Tools;
8 use PVE::HA::Tools ':exit_codes';
9 use PVE::HA::NodeStatus;
10
11 sub new {
12 my ($this, $haenv) = @_;
13
14 my $class = ref($this) || $this;
15
16 my $self = bless { haenv => $haenv }, $class;
17
18 my $old_ms = $haenv->read_manager_status();
19
20 # we only copy the state part of the manager which cannot be auto generated
21
22 $self->{ns} = PVE::HA::NodeStatus->new($haenv, $old_ms->{node_status} || {});
23
24 # fixme: use separate class PVE::HA::ServiceStatus
25 $self->{ss} = $old_ms->{service_status} || {};
26
27 $self->{ms} = { master_node => $haenv->nodename() };
28
29 return $self;
30 }
31
32 sub cleanup {
33 my ($self) = @_;
34
35 # todo: ?
36 }
37
38 sub flush_master_status {
39 my ($self) = @_;
40
41 my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss});
42
43 $ms->{node_status} = $ns->{status};
44 $ms->{service_status} = $ss;
45 $ms->{timestamp} = $haenv->get_time();
46
47 $haenv->write_manager_status($ms);
48 }
49
50 sub get_service_group {
51 my ($groups, $online_node_usage, $service_conf) = @_;
52
53 my $group = {};
54 # add all online nodes to default group to allow try_next when no group set
55 foreach my $node (keys %$online_node_usage) {
56 $group->{nodes}->{$node} = 1;
57 }
58
59 # overwrite default if service is bound to a specific group
60 if (my $group_id = $service_conf->{group}) {
61 $group = $groups->{ids}->{$group_id} if $groups->{ids}->{$group_id};
62 }
63
64 return $group;
65 }
66
67 # groups available nodes with their priority as group index
68 sub get_node_priority_groups {
69 my ($group, $online_node_usage) = @_;
70
71 my $pri_groups = {};
72 my $group_members = {};
73 foreach my $entry (keys %{$group->{nodes}}) {
74 my ($node, $pri) = ($entry, 0);
75 if ($entry =~ m/^(\S+):(\d+)$/) {
76 ($node, $pri) = ($1, $2);
77 }
78 next if !defined($online_node_usage->{$node}); # offline
79 $pri_groups->{$pri}->{$node} = 1;
80 $group_members->{$node} = $pri;
81 }
82
83 # add non-group members to unrestricted groups (priority -1)
84 if (!$group->{restricted}) {
85 my $pri = -1;
86 foreach my $node (keys %$online_node_usage) {
87 next if defined($group_members->{$node});
88 $pri_groups->{$pri}->{$node} = 1;
89 $group_members->{$node} = -1;
90 }
91 }
92
93 return ($pri_groups, $group_members);
94 }
95
96 sub select_service_node {
97 my ($groups, $online_node_usage, $service_conf, $current_node, $try_next, $tried_nodes, $maintenance_fallback) = @_;
98
99 my $group = get_service_group($groups, $online_node_usage, $service_conf);
100
101 my ($pri_groups, $group_members) = get_node_priority_groups($group, $online_node_usage);
102
103 my @pri_list = sort {$b <=> $a} keys %$pri_groups;
104 return undef if !scalar(@pri_list);
105
106 # stay on current node if possible (avoids random migrations)
107 if (!$try_next && $group->{nofailback} && defined($group_members->{$current_node})) {
108 return $current_node;
109 }
110
111 # select node from top priority node list
112
113 my $top_pri = $pri_list[0];
114
115 # try to avoid nodes where the service failed already if we want to relocate
116 if ($try_next) {
117 foreach my $node (@$tried_nodes) {
118 delete $pri_groups->{$top_pri}->{$node};
119 }
120 }
121
122 my @nodes = sort {
123 $online_node_usage->{$a} <=> $online_node_usage->{$b} || $a cmp $b
124 } keys %{$pri_groups->{$top_pri}};
125
126 my $found;
127 my $found_maintenance_fallback;
128 for (my $i = scalar(@nodes) - 1; $i >= 0; $i--) {
129 my $node = $nodes[$i];
130 if ($node eq $current_node) {
131 $found = $i;
132 }
133 if (defined($maintenance_fallback) && $node eq $maintenance_fallback) {
134 $found_maintenance_fallback = $i;
135 }
136 }
137
138 if (defined($found_maintenance_fallback)) {
139 return $nodes[$found_maintenance_fallback];
140 }
141
142 if ($try_next) {
143 if (defined($found) && ($found < (scalar(@nodes) - 1))) {
144 return $nodes[$found + 1];
145 } else {
146 return $nodes[0];
147 }
148 } elsif (defined($found)) {
149 return $nodes[$found];
150 } else {
151 return $nodes[0];
152 }
153 }
154
155 my $uid_counter = 0;
156
157 sub compute_new_uuid {
158 my ($state) = @_;
159
160 $uid_counter++;
161 return md5_base64($state . $$ . time() . $uid_counter);
162 }
163
164 my $valid_service_states = {
165 stopped => 1,
166 request_stop => 1,
167 started => 1,
168 fence => 1,
169 recovery => 1,
170 migrate => 1,
171 relocate => 1,
172 freeze => 1,
173 error => 1,
174 };
175
176 sub recompute_online_node_usage {
177 my ($self) = @_;
178
179 my $online_node_usage = {};
180
181 my $online_nodes = $self->{ns}->list_online_nodes();
182
183 foreach my $node (@$online_nodes) {
184 $online_node_usage->{$node} = 0;
185 }
186
187 foreach my $sid (keys %{$self->{ss}}) {
188 my $sd = $self->{ss}->{$sid};
189 my $state = $sd->{state};
190 my $target = $sd->{target}; # optional
191 if (defined($online_node_usage->{$sd->{node}})) {
192 if (
193 $state eq 'started' || $state eq 'request_stop' || $state eq 'fence' ||
194 $state eq 'freeze' || $state eq 'error' || $state eq 'recovery'
195 ) {
196 $online_node_usage->{$sd->{node}}++;
197 } elsif (($state eq 'migrate') || ($state eq 'relocate')) {
198 # count it for both, source and target as load is put on both
199 $online_node_usage->{$sd->{node}}++;
200 $online_node_usage->{$target}++;
201 } elsif ($state eq 'stopped') {
202 # do nothing
203 } else {
204 die "should not be reached (sid = '$sid', state = '$state')";
205 }
206 } elsif (defined($target) && defined($online_node_usage->{$target})) {
207 if ($state eq 'migrate' || $state eq 'relocate') {
208 # to correctly track maintenance modi and also consider the target as used for the
209 # case a node dies, as we cannot really know if the to-be-aborted incoming migration
210 # has already cleaned up all used resources
211 $online_node_usage->{$target}++;
212 }
213 }
214 }
215
216 $self->{online_node_usage} = $online_node_usage;
217 }
218
219 my $change_service_state = sub {
220 my ($self, $sid, $new_state, %params) = @_;
221
222 my ($haenv, $ss) = ($self->{haenv}, $self->{ss});
223
224 my $sd = $ss->{$sid} || die "no such service '$sid";
225
226 my $old_state = $sd->{state};
227 my $old_node = $sd->{node};
228 my $old_failed_nodes = $sd->{failed_nodes};
229 my $old_maintenance_node = $sd->{maintenance_node};
230
231 die "no state change" if $old_state eq $new_state; # just to be sure
232
233 die "invalid CRM service state '$new_state'\n" if !$valid_service_states->{$new_state};
234
235 foreach my $k (keys %$sd) { delete $sd->{$k}; };
236
237 $sd->{state} = $new_state;
238 $sd->{node} = $old_node;
239 $sd->{failed_nodes} = $old_failed_nodes if defined($old_failed_nodes);
240 $sd->{maintenance_node} = $old_maintenance_node if defined($old_maintenance_node);
241
242 my $text_state = '';
243 foreach my $k (sort keys %params) {
244 my $v = $params{$k};
245 $text_state .= ", " if $text_state;
246 $text_state .= "$k = $v";
247 $sd->{$k} = $v;
248 }
249
250 $self->recompute_online_node_usage();
251
252 $sd->{uid} = compute_new_uuid($new_state);
253
254 $text_state = " ($text_state)" if $text_state;
255 $haenv->log('info', "service '$sid': state changed from '${old_state}'" .
256 " to '${new_state}'$text_state");
257 };
258
259 # clean up a possible bad state from a recovered service to allow its start
260 my $fence_recovery_cleanup = sub {
261 my ($self, $sid, $fenced_node) = @_;
262
263 my $haenv = $self->{haenv};
264
265 my (undef, $type, $id) = $haenv->parse_sid($sid);
266 my $plugin = PVE::HA::Resources->lookup($type);
267
268 # should not happen
269 die "unknown resource type '$type'" if !$plugin;
270
271 # locks may block recovery, cleanup those which are safe to remove after fencing,
272 # i.e., after the original node was reset and thus all it's state
273 my $removable_locks = [
274 'backup',
275 'mounted',
276 'migrate',
277 'clone',
278 'rollback',
279 'snapshot',
280 'snapshot-delete',
281 'suspending',
282 'suspended',
283 ];
284 if (my $removed_lock = $plugin->remove_locks($haenv, $id, $removable_locks, $fenced_node)) {
285 $haenv->log('warning', "removed leftover lock '$removed_lock' from recovered " .
286 "service '$sid' to allow its start.");
287 }
288 };
289
290 # read LRM status for all nodes
291 sub read_lrm_status {
292 my ($self) = @_;
293
294 my $nodes = $self->{ns}->list_nodes();
295 my $haenv = $self->{haenv};
296
297 my $results = {};
298 my $modes = {};
299 foreach my $node (@$nodes) {
300 my $lrm_status = $haenv->read_lrm_status($node);
301 $modes->{$node} = $lrm_status->{mode} || 'active';
302 foreach my $uid (keys %{$lrm_status->{results}}) {
303 next if $results->{$uid}; # should not happen
304 $results->{$uid} = $lrm_status->{results}->{$uid};
305 }
306 }
307
308 return ($results, $modes);
309 }
310
311 # read new crm commands and save them into crm master status
312 sub update_crm_commands {
313 my ($self) = @_;
314
315 my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss});
316
317 my $cmdlist = $haenv->read_crm_commands();
318
319 foreach my $cmd (split(/\n/, $cmdlist)) {
320 chomp $cmd;
321
322 if ($cmd =~ m/^(migrate|relocate)\s+(\S+)\s+(\S+)$/) {
323 my ($task, $sid, $node) = ($1, $2, $3);
324 if (my $sd = $ss->{$sid}) {
325 if (!$ns->node_is_online($node)) {
326 $haenv->log('err', "crm command error - node not online: $cmd");
327 } else {
328 if ($node eq $sd->{node}) {
329 $haenv->log('info', "ignore crm command - service already on target node: $cmd");
330 } else {
331 $haenv->log('info', "got crm command: $cmd");
332 $ss->{$sid}->{cmd} = [ $task, $node ];
333 }
334 }
335 } else {
336 $haenv->log('err', "crm command error - no such service: $cmd");
337 }
338
339 } elsif ($cmd =~ m/^stop\s+(\S+)\s+(\S+)$/) {
340 my ($sid, $timeout) = ($1, $2);
341 if (my $sd = $ss->{$sid}) {
342 $haenv->log('info', "got crm command: $cmd");
343 $ss->{$sid}->{cmd} = [ 'stop', $timeout ];
344 } else {
345 $haenv->log('err', "crm command error - no such service: $cmd");
346 }
347 } else {
348 $haenv->log('err', "unable to parse crm command: $cmd");
349 }
350 }
351
352 }
353
354 sub manage {
355 my ($self) = @_;
356
357 my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss});
358
359 my ($node_info) = $haenv->get_node_info();
360 my ($lrm_results, $lrm_modes) = $self->read_lrm_status();
361
362 $ns->update($node_info, $lrm_modes);
363
364 if (!$ns->node_is_operational($haenv->nodename())) {
365 $haenv->log('info', "master seems offline");
366 return;
367 }
368
369 my $sc = $haenv->read_service_config();
370
371 $self->{groups} = $haenv->read_group_config(); # update
372
373 # compute new service status
374
375 # add new service
376 foreach my $sid (sort keys %$sc) {
377 next if $ss->{$sid}; # already there
378 my $cd = $sc->{$sid};
379 next if $cd->{state} eq 'ignored';
380
381 $haenv->log('info', "adding new service '$sid' on node '$cd->{node}'");
382 # assume we are running to avoid relocate running service at add
383 my $state = ($cd->{state} eq 'started') ? 'started' : 'request_stop';
384 $ss->{$sid} = { state => $state, node => $cd->{node},
385 uid => compute_new_uuid('started') };
386 }
387
388 # remove stale or ignored services from manager state
389 foreach my $sid (keys %$ss) {
390 next if $sc->{$sid} && $sc->{$sid}->{state} ne 'ignored';
391
392 my $reason = defined($sc->{$sid}) ? 'ignored state requested' : 'no config';
393 $haenv->log('info', "removing stale service '$sid' ($reason)");
394
395 # remove all service related state information
396 delete $ss->{$sid};
397 }
398
399 $self->update_crm_commands();
400
401 for (;;) {
402 my $repeat = 0;
403
404 $self->recompute_online_node_usage();
405
406 foreach my $sid (sort keys %$ss) {
407 my $sd = $ss->{$sid};
408 my $cd = $sc->{$sid} || { state => 'disabled' };
409
410 my $lrm_res = $sd->{uid} ? $lrm_results->{$sd->{uid}} : undef;
411
412 my $last_state = $sd->{state};
413
414 if ($last_state eq 'stopped') {
415
416 $self->next_state_stopped($sid, $cd, $sd, $lrm_res);
417
418 } elsif ($last_state eq 'started') {
419
420 $self->next_state_started($sid, $cd, $sd, $lrm_res);
421
422 } elsif ($last_state eq 'migrate' || $last_state eq 'relocate') {
423
424 $self->next_state_migrate_relocate($sid, $cd, $sd, $lrm_res);
425
426 } elsif ($last_state eq 'fence') {
427
428 # do nothing here - wait until fenced
429
430 } elsif ($last_state eq 'recovery') {
431
432 $self->next_state_recovery($sid, $cd, $sd, $lrm_res);
433
434 } elsif ($last_state eq 'request_stop') {
435
436 $self->next_state_request_stop($sid, $cd, $sd, $lrm_res);
437
438 } elsif ($last_state eq 'freeze') {
439
440 my $lrm_mode = $sd->{node} ? $lrm_modes->{$sd->{node}} : undef;
441 # unfreeze
442 my $state = ($cd->{state} eq 'started') ? 'started' : 'request_stop';
443 &$change_service_state($self, $sid, $state)
444 if $lrm_mode && $lrm_mode eq 'active';
445
446 } elsif ($last_state eq 'error') {
447
448 $self->next_state_error($sid, $cd, $sd, $lrm_res);
449
450 } else {
451
452 die "unknown service state '$last_state'";
453 }
454
455 my $lrm_mode = $sd->{node} ? $lrm_modes->{$sd->{node}} : undef;
456 if ($lrm_mode && $lrm_mode eq 'restart') {
457 if (($sd->{state} eq 'started' || $sd->{state} eq 'stopped' ||
458 $sd->{state} eq 'request_stop')) {
459 &$change_service_state($self, $sid, 'freeze');
460 }
461 }
462
463 $repeat = 1 if $sd->{state} ne $last_state;
464 }
465
466 # handle fencing
467 my $fenced_nodes = {};
468 foreach my $sid (sort keys %$ss) {
469 my ($service_state, $service_node) = $ss->{$sid}->@{'state', 'node'};
470 next if $service_state ne 'fence';
471
472 if (!defined($fenced_nodes->{$service_node})) {
473 $fenced_nodes->{$service_node} = $ns->fence_node($service_node) || 0;
474 }
475
476 next if !$fenced_nodes->{$service_node};
477
478 # node fence was successful - recover service
479 $change_service_state->($self, $sid, 'recovery');
480 $repeat = 1; # for faster recovery execution
481 }
482
483 # Avoid that a node without services in 'fence' state (e.g., removed
484 # manually by admin) is stuck with the 'fence' node state.
485 for my $node (sort grep { !defined($fenced_nodes->{$_}) } keys $ns->{status}->%*) {
486 next if $ns->get_node_state($node) ne 'fence';
487
488 $haenv->log('notice', "node '$node' in fence state but no services to-fence! admin interference?!");
489 $repeat = 1 if $ns->fence_node($node);
490 }
491
492 last if !$repeat;
493 }
494
495 $self->flush_master_status();
496 }
497
498 # functions to compute next service states
499 # $cd: service configuration data (read only)
500 # $sd: service status data (read only)
501 #
502 # Note: use change_service_state() to alter state
503 #
504
505 sub next_state_request_stop {
506 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
507
508 my $haenv = $self->{haenv};
509 my $ns = $self->{ns};
510
511 # check result from LRM daemon
512 if ($lrm_res) {
513 my $exit_code = $lrm_res->{exit_code};
514 if ($exit_code == SUCCESS) {
515 &$change_service_state($self, $sid, 'stopped');
516 return;
517 } else {
518 $haenv->log('err', "service '$sid' stop failed (exit code $exit_code)");
519 &$change_service_state($self, $sid, 'error'); # fixme: what state?
520 return;
521 }
522 }
523
524 if ($ns->node_is_offline_delayed($sd->{node})) {
525 &$change_service_state($self, $sid, 'fence');
526 return;
527 }
528 }
529
530 sub next_state_migrate_relocate {
531 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
532
533 my $haenv = $self->{haenv};
534 my $ns = $self->{ns};
535
536 # check result from LRM daemon
537 if ($lrm_res) {
538 my $exit_code = $lrm_res->{exit_code};
539 my $req_state = $cd->{state} eq 'started' ? 'started' : 'request_stop';
540 if ($exit_code == SUCCESS) {
541 &$change_service_state($self, $sid, $req_state, node => $sd->{target});
542 return;
543 } elsif ($exit_code == EWRONG_NODE) {
544 $haenv->log('err', "service '$sid' - migration failed: service" .
545 " registered on wrong node!");
546 &$change_service_state($self, $sid, 'error');
547 } else {
548 $haenv->log('err', "service '$sid' - migration failed (exit code $exit_code)");
549 &$change_service_state($self, $sid, $req_state, node => $sd->{node});
550 return;
551 }
552 }
553
554 if ($ns->node_is_offline_delayed($sd->{node})) {
555 &$change_service_state($self, $sid, 'fence');
556 return;
557 }
558 }
559
560 sub next_state_stopped {
561 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
562
563 my $haenv = $self->{haenv};
564 my $ns = $self->{ns};
565
566 if ($sd->{node} ne $cd->{node}) {
567 # this can happen if we fence a node with active migrations
568 # hack: modify $sd (normally this should be considered read-only)
569 $haenv->log('info', "fixup service '$sid' location ($sd->{node} => $cd->{node})");
570 $sd->{node} = $cd->{node};
571 }
572
573 if ($sd->{cmd}) {
574 my $cmd = shift @{$sd->{cmd}};
575
576 if ($cmd eq 'migrate' || $cmd eq 'relocate') {
577 my $target = shift @{$sd->{cmd}};
578 if (!$ns->node_is_online($target)) {
579 $haenv->log('err', "ignore service '$sid' $cmd request - node '$target' not online");
580 } elsif ($sd->{node} eq $target) {
581 $haenv->log('info', "ignore service '$sid' $cmd request - service already on node '$target'");
582 } else {
583 &$change_service_state($self, $sid, $cmd, node => $sd->{node},
584 target => $target);
585 return;
586 }
587 } elsif ($cmd eq 'stop') {
588 $haenv->log('info', "ignore service '$sid' $cmd request - service already stopped");
589 } else {
590 $haenv->log('err', "unknown command '$cmd' for service '$sid'");
591 }
592 delete $sd->{cmd};
593 }
594
595 if ($cd->{state} eq 'disabled') {
596 # NOTE: do nothing here, the stop state is an exception as we do not
597 # process the LRM result here, thus the LRM always tries to stop the
598 # service (protection for the case no CRM is active)
599 return;
600 }
601
602 if ($ns->node_is_offline_delayed($sd->{node}) && $ns->get_node_state($sd->{node}) ne 'maintenance') {
603 &$change_service_state($self, $sid, 'fence');
604 return;
605 }
606
607 if ($cd->{state} eq 'stopped') {
608 # almost the same as 'disabled' state but the service will also get recovered
609 return;
610 }
611
612 if ($cd->{state} eq 'started') {
613 # simply mark it started, if it's on the wrong node
614 # next_state_started will fix that for us
615 &$change_service_state($self, $sid, 'started', node => $sd->{node});
616 return;
617 }
618
619 $haenv->log('err', "service '$sid' - unknown state '$cd->{state}' in service configuration");
620 }
621
622 sub record_service_failed_on_node {
623 my ($self, $sid, $node) = @_;
624
625 if (!defined($self->{ss}->{$sid}->{failed_nodes})) {
626 $self->{ss}->{$sid}->{failed_nodes} = [];
627 }
628
629 push @{$self->{ss}->{$sid}->{failed_nodes}}, $node;
630 }
631
632 sub next_state_started {
633 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
634
635 my $haenv = $self->{haenv};
636 my $master_status = $self->{ms};
637 my $ns = $self->{ns};
638
639 if (!$ns->node_is_online($sd->{node})) {
640 if ($ns->node_is_offline_delayed($sd->{node})) {
641 &$change_service_state($self, $sid, 'fence');
642 }
643 if ($ns->get_node_state($sd->{node}) ne 'maintenance') {
644 return;
645 } else {
646 # save current node as fallback for when it comes out of
647 # maintenance
648 $sd->{maintenance_node} = $sd->{node};
649 }
650 }
651
652 if ($cd->{state} eq 'disabled' || $cd->{state} eq 'stopped') {
653 &$change_service_state($self, $sid, 'request_stop');
654 return;
655 }
656
657 if ($cd->{state} eq 'started') {
658
659 if ($sd->{cmd}) {
660 my $cmd = shift @{$sd->{cmd}};
661
662 if ($cmd eq 'migrate' || $cmd eq 'relocate') {
663 my $target = shift @{$sd->{cmd}};
664 if (!$ns->node_is_online($target)) {
665 $haenv->log('err', "ignore service '$sid' $cmd request - node '$target' not online");
666 } elsif ($sd->{node} eq $target) {
667 $haenv->log('info', "ignore service '$sid' $cmd request - service already on node '$target'");
668 } else {
669 $haenv->log('info', "$cmd service '$sid' to node '$target'");
670 &$change_service_state($self, $sid, $cmd, node => $sd->{node}, target => $target);
671 }
672 } elsif ($cmd eq 'stop') {
673 my $timeout = shift @{$sd->{cmd}};
674 if ($timeout == 0) {
675 $haenv->log('info', "request immediate service hard-stop for service '$sid'");
676 } else {
677 $haenv->log('info', "request graceful stop with timeout '$timeout' for service '$sid'");
678 }
679 &$change_service_state($self, $sid, 'request_stop', timeout => $timeout);
680 $haenv->update_service_config($sid, {'state' => 'stopped'});
681 } else {
682 $haenv->log('err', "unknown command '$cmd' for service '$sid'");
683 }
684
685 delete $sd->{cmd};
686
687 } else {
688
689 my $try_next = 0;
690
691 if ($lrm_res) {
692
693 my $ec = $lrm_res->{exit_code};
694 if ($ec == SUCCESS) {
695
696 if (defined($sd->{failed_nodes})) {
697 $haenv->log('info', "relocation policy successful for '$sid' on node '$sd->{node}'," .
698 " failed nodes: " . join(', ', @{$sd->{failed_nodes}}) );
699 }
700
701 delete $sd->{failed_nodes};
702
703 # store flag to indicate successful start - only valid while state == 'started'
704 $sd->{running} = 1;
705
706 } elsif ($ec == ERROR) {
707
708 delete $sd->{running};
709
710 # apply our relocate policy if we got ERROR from the LRM
711 $self->record_service_failed_on_node($sid, $sd->{node});
712
713 if (scalar(@{$sd->{failed_nodes}}) <= $cd->{max_relocate}) {
714
715 # tell select_service_node to relocate if possible
716 $try_next = 1;
717
718 $haenv->log('warning', "starting service $sid on node".
719 " '$sd->{node}' failed, relocating service.");
720
721 } else {
722
723 $haenv->log('err', "recovery policy for service $sid " .
724 "failed, entering error state. Failed nodes: ".
725 join(', ', @{$sd->{failed_nodes}}));
726 &$change_service_state($self, $sid, 'error');
727 return;
728
729 }
730 } else {
731 $self->record_service_failed_on_node($sid, $sd->{node});
732
733 $haenv->log('err', "service '$sid' got unrecoverable error" .
734 " (exit code $ec))");
735 # we have no save way out (yet) for other errors
736 &$change_service_state($self, $sid, 'error');
737 return;
738 }
739 }
740
741 my $node = select_service_node(
742 $self->{groups},
743 $self->{online_node_usage},
744 $cd,
745 $sd->{node},
746 $try_next,
747 $sd->{failed_nodes},
748 $sd->{maintenance_node},
749 );
750
751 if ($node && ($sd->{node} ne $node)) {
752 $self->{online_node_usage}->{$node}++;
753
754 if (defined(my $fallback = $sd->{maintenance_node})) {
755 if ($node eq $fallback) {
756 $haenv->log('info', "moving service '$sid' back to '$fallback', node came back from maintenance.");
757 delete $sd->{maintenance_node};
758 } elsif ($sd->{node} ne $fallback) {
759 $haenv->log('info', "dropping maintenance fallback node '$fallback' for '$sid'");
760 delete $sd->{maintenance_node};
761 }
762 }
763
764 if ($cd->{type} eq 'vm') {
765 $haenv->log('info', "migrate service '$sid' to node '$node' (running)");
766 &$change_service_state($self, $sid, 'migrate', node => $sd->{node}, target => $node);
767 } else {
768 $haenv->log('info', "relocate service '$sid' to node '$node'");
769 &$change_service_state($self, $sid, 'relocate', node => $sd->{node}, target => $node);
770 }
771 } else {
772 if ($try_next && !defined($node)) {
773 $haenv->log('warning', "Start Error Recovery: Tried all available " .
774 " nodes for service '$sid', retry start on current node. " .
775 "Tried nodes: " . join(', ', @{$sd->{failed_nodes}}));
776 }
777 # ensure service get started again if it went unexpected down
778 # but ensure also no LRM result gets lost
779 $sd->{uid} = compute_new_uuid($sd->{state}) if defined($lrm_res);
780 }
781 }
782
783 return;
784 }
785
786 $haenv->log('err', "service '$sid' - unknown state '$cd->{state}' in service configuration");
787 }
788
789 sub next_state_error {
790 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
791
792 my $ns = $self->{ns};
793 my $ms = $self->{ms};
794
795 if ($cd->{state} eq 'disabled') {
796 # clean up on error recovery
797 delete $sd->{failed_nodes};
798
799 &$change_service_state($self, $sid, 'stopped');
800 return;
801 }
802
803 }
804
805 # after a node was fenced this recovers the service to a new node
806 sub next_state_recovery {
807 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
808
809 my ($haenv, $ss) = ($self->{haenv}, $self->{ss});
810 my $ns = $self->{ns};
811 my $ms = $self->{ms};
812
813 if ($sd->{state} ne 'recovery') { # should not happen
814 $haenv->log('err', "cannot recover service '$sid' from fencing, wrong state '$sd->{state}'");
815 return;
816 }
817
818 my $fenced_node = $sd->{node}; # for logging purpose
819
820 $self->recompute_online_node_usage(); # we want the most current node state
821
822 my $recovery_node = select_service_node(
823 $self->{groups},
824 $self->{online_node_usage},
825 $cd,
826 $sd->{node},
827 );
828
829 if ($recovery_node) {
830 my $msg = "recover service '$sid' from fenced node '$fenced_node' to node '$recovery_node'";
831 if ($recovery_node eq $fenced_node) {
832 # can happen if restriced groups and the node came up again OK
833 $msg = "recover service '$sid' to previous failed and fenced node '$fenced_node' again";
834 }
835 $haenv->log('info', "$msg");
836
837 $fence_recovery_cleanup->($self, $sid, $fenced_node);
838
839 $haenv->steal_service($sid, $sd->{node}, $recovery_node);
840 $self->{online_node_usage}->{$recovery_node}++;
841
842 # NOTE: $sd *is normally read-only*, fencing is the exception
843 $cd->{node} = $sd->{node} = $recovery_node;
844 my $new_state = ($cd->{state} eq 'started') ? 'started' : 'request_stop';
845 $change_service_state->($self, $sid, $new_state, node => $recovery_node);
846 } else {
847 # no possible node found, cannot recover - but retry later, as we always try to make it available
848 $haenv->log('err', "recovering service '$sid' from fenced node '$fenced_node' failed, no recovery node found");
849
850 if ($cd->{state} eq 'disabled') {
851 # allow getting a service out of recovery manually if an admin disables it.
852 delete $sd->{failed_nodes}; # clean up on recovery to stopped
853 $change_service_state->($self, $sid, 'stopped'); # must NOT go through request_stop
854 return;
855 }
856 }
857 }
858
859 1;