]> git.proxmox.com Git - pve-ha-manager.git/blob - src/PVE/HA/Manager.pm
manager: slightly clarify log message for fallback on init-failure
[pve-ha-manager.git] / src / PVE / HA / Manager.pm
1 package PVE::HA::Manager;
2
3 use strict;
4 use warnings;
5 use Digest::MD5 qw(md5_base64);
6
7 use PVE::Tools;
8 use PVE::HA::Tools ':exit_codes';
9 use PVE::HA::NodeStatus;
10 use PVE::HA::Usage::Basic;
11 use PVE::HA::Usage::Static;
12
13 ## Variable Name & Abbreviations Convention
14 #
15 # The HA stack has some variables it uses frequently and thus abbreviates it such that it may be
16 # confusing for new readers. Here's a short list of the most common used.
17 #
18 # NOTE: variables should be assumed to be read only if not otherwise stated, only use the specific
19 # methods to re-compute/read/alter them.
20 #
21 # - $haenv -> HA environment, the main interface to the simulator/test/real world
22 # - $sid -> Service ID, unique identifier for a service, `type:vmid` is common
23 #
24 # - $ms -> Master/Manager Status, contains runtime info from the current active manager
25 # - $ns -> Node Status, hash holding online/offline status about all nodes
26 #
27 # - $ss -> Service Status, hash holding the current state (last LRM cmd result, failed starts
28 # or migrates, maintenance fallback node, for *all* services ...
29 # - $sd -> Service Data, the service status of a *single* service, iow. $ss->{$sid}
30 #
31 # - $sc -> Service Configuration, hash for all services including target state, group, ...
32 # - $cd -> Configuration Data, the service config of a *single* service, iow. $sc->{$sid}
33 #
34 # Try to avoid adding new two letter (or similar over abbreviated) names, but also don't send
35 # patches for changing above, as that set is mostly sensible and should be easy to remember once
36 # spending a bit time in the HA code base.
37
38 sub new {
39 my ($this, $haenv) = @_;
40
41 my $class = ref($this) || $this;
42
43 my $self = bless { haenv => $haenv, crs => {} }, $class;
44
45 my $old_ms = $haenv->read_manager_status();
46
47 # we only copy the state part of the manager which cannot be auto generated
48
49 $self->{ns} = PVE::HA::NodeStatus->new($haenv, $old_ms->{node_status} || {});
50
51 # fixme: use separate class PVE::HA::ServiceStatus
52 $self->{ss} = $old_ms->{service_status} || {};
53
54 $self->{ms} = { master_node => $haenv->nodename() };
55
56 $self->update_crs_scheduler_mode(); # initial set, we update it once every loop
57
58 return $self;
59 }
60
61 sub update_crs_scheduler_mode {
62 my ($self) = @_;
63
64 my $haenv = $self->{haenv};
65 my $dc_cfg = $haenv->get_datacenter_settings();
66
67 my $old_mode = $self->{crs}->{scheduler};
68 my $new_mode = $dc_cfg->{crs}->{ha} || 'basic';
69
70 if (!defined($old_mode)) {
71 $haenv->log('info', "using scheduler mode '$new_mode'") if $new_mode ne 'basic';
72 } elsif ($new_mode eq $old_mode) {
73 return; # nothing to do
74 } else {
75 $haenv->log('info', "switching scheduler mode from '$old_mode' to '$new_mode'");
76 }
77
78 $self->{crs}->{scheduler} = $new_mode;
79
80 return;
81 }
82
83 sub cleanup {
84 my ($self) = @_;
85
86 # todo: ?
87 }
88
89 sub flush_master_status {
90 my ($self) = @_;
91
92 my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss});
93
94 $ms->{node_status} = $ns->{status};
95 $ms->{service_status} = $ss;
96 $ms->{timestamp} = $haenv->get_time();
97
98 $haenv->write_manager_status($ms);
99 }
100
101 sub get_service_group {
102 my ($groups, $online_node_usage, $service_conf) = @_;
103
104 my $group = {};
105 # add all online nodes to default group to allow try_next when no group set
106 $group->{nodes}->{$_} = 1 for $online_node_usage->list_nodes();
107
108 # overwrite default if service is bound to a specific group
109 if (my $group_id = $service_conf->{group}) {
110 $group = $groups->{ids}->{$group_id} if $groups->{ids}->{$group_id};
111 }
112
113 return $group;
114 }
115
116 # groups available nodes with their priority as group index
117 sub get_node_priority_groups {
118 my ($group, $online_node_usage) = @_;
119
120 my $pri_groups = {};
121 my $group_members = {};
122 foreach my $entry (keys %{$group->{nodes}}) {
123 my ($node, $pri) = ($entry, 0);
124 if ($entry =~ m/^(\S+):(\d+)$/) {
125 ($node, $pri) = ($1, $2);
126 }
127 next if !$online_node_usage->contains_node($node); # offline
128 $pri_groups->{$pri}->{$node} = 1;
129 $group_members->{$node} = $pri;
130 }
131
132 # add non-group members to unrestricted groups (priority -1)
133 if (!$group->{restricted}) {
134 my $pri = -1;
135 for my $node ($online_node_usage->list_nodes()) {
136 next if defined($group_members->{$node});
137 $pri_groups->{$pri}->{$node} = 1;
138 $group_members->{$node} = -1;
139 }
140 }
141
142 return ($pri_groups, $group_members);
143 }
144
145 sub select_service_node {
146 my ($groups, $online_node_usage, $sid, $service_conf, $current_node, $try_next, $tried_nodes, $maintenance_fallback) = @_;
147
148 my $group = get_service_group($groups, $online_node_usage, $service_conf);
149
150 my ($pri_groups, $group_members) = get_node_priority_groups($group, $online_node_usage);
151
152 my @pri_list = sort {$b <=> $a} keys %$pri_groups;
153 return undef if !scalar(@pri_list);
154
155 # stay on current node if possible (avoids random migrations)
156 if (!$try_next && $group->{nofailback} && defined($group_members->{$current_node})) {
157 return $current_node;
158 }
159
160 # select node from top priority node list
161
162 my $top_pri = $pri_list[0];
163
164 # try to avoid nodes where the service failed already if we want to relocate
165 if ($try_next) {
166 foreach my $node (@$tried_nodes) {
167 delete $pri_groups->{$top_pri}->{$node};
168 }
169 }
170
171 return $maintenance_fallback
172 if defined($maintenance_fallback) && $pri_groups->{$top_pri}->{$maintenance_fallback};
173
174 return $current_node if !$try_next && $pri_groups->{$top_pri}->{$current_node};
175
176 my $scores = $online_node_usage->score_nodes_to_start_service($sid, $current_node);
177 my @nodes = sort {
178 $scores->{$a} <=> $scores->{$b} || $a cmp $b
179 } keys %{$pri_groups->{$top_pri}};
180
181 my $found;
182 for (my $i = scalar(@nodes) - 1; $i >= 0; $i--) {
183 my $node = $nodes[$i];
184 if ($node eq $current_node) {
185 $found = $i;
186 }
187 }
188
189 if ($try_next) {
190 if (defined($found) && ($found < (scalar(@nodes) - 1))) {
191 return $nodes[$found + 1];
192 } else {
193 return $nodes[0];
194 }
195 } else {
196 return $nodes[0];
197 }
198 }
199
200 my $uid_counter = 0;
201
202 sub compute_new_uuid {
203 my ($state) = @_;
204
205 $uid_counter++;
206 return md5_base64($state . $$ . time() . $uid_counter);
207 }
208
209 my $valid_service_states = {
210 stopped => 1,
211 request_stop => 1,
212 started => 1,
213 fence => 1,
214 recovery => 1,
215 migrate => 1,
216 relocate => 1,
217 freeze => 1,
218 error => 1,
219 };
220
221 # FIXME with 'static' mode and thousands of services, the overhead can be noticable and the fact
222 # that this function is called for each state change and upon recovery doesn't help.
223 sub recompute_online_node_usage {
224 my ($self) = @_;
225
226 my $haenv = $self->{haenv};
227
228 my $online_nodes = $self->{ns}->list_online_nodes();
229
230 my $online_node_usage;
231
232 if (my $mode = $self->{crs}->{scheduler}) {
233 if ($mode eq 'static') {
234 $online_node_usage = eval {
235 my $scheduler = PVE::HA::Usage::Static->new($haenv);
236 $scheduler->add_node($_) for $online_nodes->@*;
237 return $scheduler;
238 };
239 $haenv->log('warning', "fallback to 'basic' scheduler mode, init for 'static' failed - $@")
240 if $@;
241 } elsif ($mode eq 'basic') {
242 # handled below in the general fall-back case
243 } else {
244 $haenv->log('warning', "got unknown scheduler mode '$mode', using 'basic'");
245 }
246 }
247
248 # fallback to the basic algorithm in any case
249 if (!$online_node_usage) {
250 $online_node_usage = PVE::HA::Usage::Basic->new($haenv);
251 $online_node_usage->add_node($_) for $online_nodes->@*;
252 }
253
254 foreach my $sid (keys %{$self->{ss}}) {
255 my $sd = $self->{ss}->{$sid};
256 my $state = $sd->{state};
257 my $target = $sd->{target}; # optional
258 if ($online_node_usage->contains_node($sd->{node})) {
259 if (
260 $state eq 'started' || $state eq 'request_stop' || $state eq 'fence' ||
261 $state eq 'freeze' || $state eq 'error' || $state eq 'recovery'
262 ) {
263 $online_node_usage->add_service_usage_to_node($sd->{node}, $sid, $sd->{node});
264 } elsif (($state eq 'migrate') || ($state eq 'relocate')) {
265 my $source = $sd->{node};
266 # count it for both, source and target as load is put on both
267 $online_node_usage->add_service_usage_to_node($source, $sid, $source, $target);
268 $online_node_usage->add_service_usage_to_node($target, $sid, $source, $target);
269 } elsif ($state eq 'stopped') {
270 # do nothing
271 } else {
272 die "should not be reached (sid = '$sid', state = '$state')";
273 }
274 } elsif (defined($target) && $online_node_usage->contains_node($target)) {
275 if ($state eq 'migrate' || $state eq 'relocate') {
276 # to correctly track maintenance modi and also consider the target as used for the
277 # case a node dies, as we cannot really know if the to-be-aborted incoming migration
278 # has already cleaned up all used resources
279 $online_node_usage->add_service_usage_to_node($target, $sid, $sd->{node}, $target);
280 }
281 }
282 }
283
284 $self->{online_node_usage} = $online_node_usage;
285 }
286
287 my $change_service_state = sub {
288 my ($self, $sid, $new_state, %params) = @_;
289
290 my ($haenv, $ss) = ($self->{haenv}, $self->{ss});
291
292 my $sd = $ss->{$sid} || die "no such service '$sid";
293
294 my $old_state = $sd->{state};
295 my $old_node = $sd->{node};
296 my $old_failed_nodes = $sd->{failed_nodes};
297 my $old_maintenance_node = $sd->{maintenance_node};
298
299 die "no state change" if $old_state eq $new_state; # just to be sure
300
301 die "invalid CRM service state '$new_state'\n" if !$valid_service_states->{$new_state};
302
303 foreach my $k (keys %$sd) { delete $sd->{$k}; };
304
305 $sd->{state} = $new_state;
306 $sd->{node} = $old_node;
307 $sd->{failed_nodes} = $old_failed_nodes if defined($old_failed_nodes);
308 $sd->{maintenance_node} = $old_maintenance_node if defined($old_maintenance_node);
309
310 my $text_state = '';
311 foreach my $k (sort keys %params) {
312 my $v = $params{$k};
313 $text_state .= ", " if $text_state;
314 $text_state .= "$k = $v";
315 $sd->{$k} = $v;
316 }
317
318 $self->recompute_online_node_usage();
319
320 $sd->{uid} = compute_new_uuid($new_state);
321
322 $text_state = " ($text_state)" if $text_state;
323 $haenv->log('info', "service '$sid': state changed from '${old_state}'" .
324 " to '${new_state}'$text_state");
325 };
326
327 # clean up a possible bad state from a recovered service to allow its start
328 my $fence_recovery_cleanup = sub {
329 my ($self, $sid, $fenced_node) = @_;
330
331 my $haenv = $self->{haenv};
332
333 my (undef, $type, $id) = $haenv->parse_sid($sid);
334 my $plugin = PVE::HA::Resources->lookup($type);
335
336 # should not happen
337 die "unknown resource type '$type'" if !$plugin;
338
339 # locks may block recovery, cleanup those which are safe to remove after fencing,
340 # i.e., after the original node was reset and thus all it's state
341 my $removable_locks = [
342 'backup',
343 'mounted',
344 'migrate',
345 'clone',
346 'rollback',
347 'snapshot',
348 'snapshot-delete',
349 'suspending',
350 'suspended',
351 ];
352 if (my $removed_lock = $plugin->remove_locks($haenv, $id, $removable_locks, $fenced_node)) {
353 $haenv->log('warning', "removed leftover lock '$removed_lock' from recovered " .
354 "service '$sid' to allow its start.");
355 }
356 };
357
358 # read LRM status for all nodes
359 sub read_lrm_status {
360 my ($self) = @_;
361
362 my $nodes = $self->{ns}->list_nodes();
363 my $haenv = $self->{haenv};
364
365 my $results = {};
366 my $modes = {};
367 foreach my $node (@$nodes) {
368 my $lrm_status = $haenv->read_lrm_status($node);
369 $modes->{$node} = $lrm_status->{mode} || 'active';
370 foreach my $uid (keys %{$lrm_status->{results}}) {
371 next if $results->{$uid}; # should not happen
372 $results->{$uid} = $lrm_status->{results}->{$uid};
373 }
374 }
375
376 return ($results, $modes);
377 }
378
379 # read new crm commands and save them into crm master status
380 sub update_crm_commands {
381 my ($self) = @_;
382
383 my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss});
384
385 my $cmdlist = $haenv->read_crm_commands();
386
387 foreach my $cmd (split(/\n/, $cmdlist)) {
388 chomp $cmd;
389
390 if ($cmd =~ m/^(migrate|relocate)\s+(\S+)\s+(\S+)$/) {
391 my ($task, $sid, $node) = ($1, $2, $3);
392 if (my $sd = $ss->{$sid}) {
393 if (!$ns->node_is_online($node)) {
394 $haenv->log('err', "crm command error - node not online: $cmd");
395 } else {
396 if ($node eq $sd->{node}) {
397 $haenv->log('info', "ignore crm command - service already on target node: $cmd");
398 } else {
399 $haenv->log('info', "got crm command: $cmd");
400 $ss->{$sid}->{cmd} = [ $task, $node ];
401 }
402 }
403 } else {
404 $haenv->log('err', "crm command error - no such service: $cmd");
405 }
406
407 } elsif ($cmd =~ m/^stop\s+(\S+)\s+(\S+)$/) {
408 my ($sid, $timeout) = ($1, $2);
409 if (my $sd = $ss->{$sid}) {
410 $haenv->log('info', "got crm command: $cmd");
411 $ss->{$sid}->{cmd} = [ 'stop', $timeout ];
412 } else {
413 $haenv->log('err', "crm command error - no such service: $cmd");
414 }
415 } else {
416 $haenv->log('err', "unable to parse crm command: $cmd");
417 }
418 }
419
420 }
421
422 sub manage {
423 my ($self) = @_;
424
425 my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss});
426
427 my ($node_info) = $haenv->get_node_info();
428 my ($lrm_results, $lrm_modes) = $self->read_lrm_status();
429
430 $ns->update($node_info, $lrm_modes);
431
432 if (!$ns->node_is_operational($haenv->nodename())) {
433 $haenv->log('info', "master seems offline");
434 return;
435 }
436
437 $self->update_crs_scheduler_mode();
438
439 my $sc = $haenv->read_service_config();
440
441 $self->{groups} = $haenv->read_group_config(); # update
442
443 # compute new service status
444
445 # add new service
446 foreach my $sid (sort keys %$sc) {
447 next if $ss->{$sid}; # already there
448 my $cd = $sc->{$sid};
449 next if $cd->{state} eq 'ignored';
450
451 $haenv->log('info', "adding new service '$sid' on node '$cd->{node}'");
452 # assume we are running to avoid relocate running service at add
453 my $state = ($cd->{state} eq 'started') ? 'started' : 'request_stop';
454 $ss->{$sid} = {
455 state => $state, node => $cd->{node}, uid => compute_new_uuid('started'),
456 };
457 }
458
459 # remove stale or ignored services from manager state
460 foreach my $sid (keys %$ss) {
461 next if $sc->{$sid} && $sc->{$sid}->{state} ne 'ignored';
462
463 my $reason = defined($sc->{$sid}) ? 'ignored state requested' : 'no config';
464 $haenv->log('info', "removing stale service '$sid' ($reason)");
465
466 # remove all service related state information
467 delete $ss->{$sid};
468 }
469
470 $self->update_crm_commands();
471
472 for (;;) {
473 my $repeat = 0;
474
475 $self->recompute_online_node_usage();
476
477 foreach my $sid (sort keys %$ss) {
478 my $sd = $ss->{$sid};
479 my $cd = $sc->{$sid} || { state => 'disabled' };
480
481 my $lrm_res = $sd->{uid} ? $lrm_results->{$sd->{uid}} : undef;
482
483 my $last_state = $sd->{state};
484
485 if ($last_state eq 'stopped') {
486
487 $self->next_state_stopped($sid, $cd, $sd, $lrm_res);
488
489 } elsif ($last_state eq 'started') {
490
491 $self->next_state_started($sid, $cd, $sd, $lrm_res);
492
493 } elsif ($last_state eq 'migrate' || $last_state eq 'relocate') {
494
495 $self->next_state_migrate_relocate($sid, $cd, $sd, $lrm_res);
496
497 } elsif ($last_state eq 'fence') {
498
499 # do nothing here - wait until fenced
500
501 } elsif ($last_state eq 'recovery') {
502
503 $self->next_state_recovery($sid, $cd, $sd, $lrm_res);
504
505 } elsif ($last_state eq 'request_stop') {
506
507 $self->next_state_request_stop($sid, $cd, $sd, $lrm_res);
508
509 } elsif ($last_state eq 'freeze') {
510
511 my $lrm_mode = $sd->{node} ? $lrm_modes->{$sd->{node}} : undef;
512 if ($lrm_mode && $lrm_mode eq 'active') { # unfreeze if active again
513 my $state = ($cd->{state} eq 'started') ? 'started' : 'request_stop';
514 $change_service_state->($self, $sid, $state);
515 }
516
517 } elsif ($last_state eq 'error') {
518
519 $self->next_state_error($sid, $cd, $sd, $lrm_res);
520
521 } else {
522
523 die "unknown service state '$last_state'";
524 }
525
526 my $lrm_mode = $sd->{node} ? $lrm_modes->{$sd->{node}} : undef;
527 if ($lrm_mode && $lrm_mode eq 'restart') {
528 my $state = $sd->{state};
529 if ($state eq 'started' || $state eq 'stopped'|| $state eq 'request_stop') {
530 $change_service_state->($self, $sid, 'freeze');
531 }
532 }
533
534 $repeat = 1 if $sd->{state} ne $last_state;
535 }
536
537 # handle fencing
538 my $fenced_nodes = {};
539 foreach my $sid (sort keys %$ss) {
540 my ($service_state, $service_node) = $ss->{$sid}->@{'state', 'node'};
541 next if $service_state ne 'fence';
542
543 if (!defined($fenced_nodes->{$service_node})) {
544 $fenced_nodes->{$service_node} = $ns->fence_node($service_node) || 0;
545 }
546
547 next if !$fenced_nodes->{$service_node};
548
549 # node fence was successful - recover service
550 $change_service_state->($self, $sid, 'recovery');
551 $repeat = 1; # for faster recovery execution
552 }
553
554 # Avoid that a node without services in 'fence' state (e.g., removed
555 # manually by admin) is stuck with the 'fence' node state.
556 for my $node (sort grep { !defined($fenced_nodes->{$_}) } keys $ns->{status}->%*) {
557 next if $ns->get_node_state($node) ne 'fence';
558
559 $haenv->log('notice', "node '$node' in fence state but no services to-fence! admin interference?!");
560 $repeat = 1 if $ns->fence_node($node);
561 }
562
563 last if !$repeat;
564 }
565
566 $self->flush_master_status();
567 }
568
569 # functions to compute next service states
570 # $cd: service configuration data (read only)
571 # $sd: service status data (read only)
572 #
573 # Note: use change_service_state() to alter state
574 #
575
576 sub next_state_request_stop {
577 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
578
579 my $haenv = $self->{haenv};
580 my $ns = $self->{ns};
581
582 # check result from LRM daemon
583 if ($lrm_res) {
584 my $exit_code = $lrm_res->{exit_code};
585 if ($exit_code == SUCCESS) {
586 &$change_service_state($self, $sid, 'stopped');
587 return;
588 } else {
589 $haenv->log('err', "service '$sid' stop failed (exit code $exit_code)");
590 &$change_service_state($self, $sid, 'error'); # fixme: what state?
591 return;
592 }
593 }
594
595 if ($ns->node_is_offline_delayed($sd->{node})) {
596 &$change_service_state($self, $sid, 'fence');
597 return;
598 }
599 }
600
601 sub next_state_migrate_relocate {
602 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
603
604 my $haenv = $self->{haenv};
605 my $ns = $self->{ns};
606
607 # check result from LRM daemon
608 if ($lrm_res) {
609 my $exit_code = $lrm_res->{exit_code};
610 my $req_state = $cd->{state} eq 'started' ? 'started' : 'request_stop';
611 if ($exit_code == SUCCESS) {
612 &$change_service_state($self, $sid, $req_state, node => $sd->{target});
613 return;
614 } elsif ($exit_code == EWRONG_NODE) {
615 $haenv->log('err', "service '$sid' - migration failed: service" .
616 " registered on wrong node!");
617 &$change_service_state($self, $sid, 'error');
618 } else {
619 $haenv->log('err', "service '$sid' - migration failed (exit code $exit_code)");
620 &$change_service_state($self, $sid, $req_state, node => $sd->{node});
621 return;
622 }
623 }
624
625 if ($ns->node_is_offline_delayed($sd->{node})) {
626 &$change_service_state($self, $sid, 'fence');
627 return;
628 }
629 }
630
631 sub next_state_stopped {
632 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
633
634 my $haenv = $self->{haenv};
635 my $ns = $self->{ns};
636
637 if ($sd->{node} ne $cd->{node}) {
638 # this can happen if we fence a node with active migrations
639 # hack: modify $sd (normally this should be considered read-only)
640 $haenv->log('info', "fixup service '$sid' location ($sd->{node} => $cd->{node})");
641 $sd->{node} = $cd->{node};
642 }
643
644 if ($sd->{cmd}) {
645 my $cmd = shift @{$sd->{cmd}};
646
647 if ($cmd eq 'migrate' || $cmd eq 'relocate') {
648 my $target = shift @{$sd->{cmd}};
649 if (!$ns->node_is_online($target)) {
650 $haenv->log('err', "ignore service '$sid' $cmd request - node '$target' not online");
651 } elsif ($sd->{node} eq $target) {
652 $haenv->log('info', "ignore service '$sid' $cmd request - service already on node '$target'");
653 } else {
654 $change_service_state->($self, $sid, $cmd, node => $sd->{node}, target => $target);
655 return;
656 }
657 } elsif ($cmd eq 'stop') {
658 $haenv->log('info', "ignore service '$sid' $cmd request - service already stopped");
659 } else {
660 $haenv->log('err', "unknown command '$cmd' for service '$sid'");
661 }
662 delete $sd->{cmd};
663 }
664
665 if ($cd->{state} eq 'disabled') {
666 # NOTE: do nothing here, the stop state is an exception as we do not
667 # process the LRM result here, thus the LRM always tries to stop the
668 # service (protection for the case no CRM is active)
669 return;
670 }
671
672 if ($ns->node_is_offline_delayed($sd->{node}) && $ns->get_node_state($sd->{node}) ne 'maintenance') {
673 &$change_service_state($self, $sid, 'fence');
674 return;
675 }
676
677 if ($cd->{state} eq 'stopped') {
678 # almost the same as 'disabled' state but the service will also get recovered
679 return;
680 }
681
682 if ($cd->{state} eq 'started') {
683 # simply mark it started, if it's on the wrong node
684 # next_state_started will fix that for us
685 &$change_service_state($self, $sid, 'started', node => $sd->{node});
686 return;
687 }
688
689 $haenv->log('err', "service '$sid' - unknown state '$cd->{state}' in service configuration");
690 }
691
692 sub record_service_failed_on_node {
693 my ($self, $sid, $node) = @_;
694
695 if (!defined($self->{ss}->{$sid}->{failed_nodes})) {
696 $self->{ss}->{$sid}->{failed_nodes} = [];
697 }
698
699 push @{$self->{ss}->{$sid}->{failed_nodes}}, $node;
700 }
701
702 sub next_state_started {
703 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
704
705 my $haenv = $self->{haenv};
706 my $master_status = $self->{ms};
707 my $ns = $self->{ns};
708
709 if (!$ns->node_is_online($sd->{node})) {
710 if ($ns->node_is_offline_delayed($sd->{node})) {
711 &$change_service_state($self, $sid, 'fence');
712 }
713 if ($ns->get_node_state($sd->{node}) ne 'maintenance') {
714 return;
715 } else {
716 # save current node as fallback for when it comes out of
717 # maintenance
718 $sd->{maintenance_node} = $sd->{node};
719 }
720 }
721
722 if ($cd->{state} eq 'disabled' || $cd->{state} eq 'stopped') {
723 &$change_service_state($self, $sid, 'request_stop');
724 return;
725 }
726
727 if ($cd->{state} eq 'started') {
728
729 if ($sd->{cmd}) {
730 my $cmd = shift @{$sd->{cmd}};
731
732 if ($cmd eq 'migrate' || $cmd eq 'relocate') {
733 my $target = shift @{$sd->{cmd}};
734 if (!$ns->node_is_online($target)) {
735 $haenv->log('err', "ignore service '$sid' $cmd request - node '$target' not online");
736 } elsif ($sd->{node} eq $target) {
737 $haenv->log('info', "ignore service '$sid' $cmd request - service already on node '$target'");
738 } else {
739 $haenv->log('info', "$cmd service '$sid' to node '$target'");
740 &$change_service_state($self, $sid, $cmd, node => $sd->{node}, target => $target);
741 }
742 } elsif ($cmd eq 'stop') {
743 my $timeout = shift @{$sd->{cmd}};
744 if ($timeout == 0) {
745 $haenv->log('info', "request immediate service hard-stop for service '$sid'");
746 } else {
747 $haenv->log('info', "request graceful stop with timeout '$timeout' for service '$sid'");
748 }
749 &$change_service_state($self, $sid, 'request_stop', timeout => $timeout);
750 $haenv->update_service_config($sid, {'state' => 'stopped'});
751 } else {
752 $haenv->log('err', "unknown command '$cmd' for service '$sid'");
753 }
754
755 delete $sd->{cmd};
756
757 } else {
758
759 my $try_next = 0;
760
761 if ($lrm_res) {
762
763 my $ec = $lrm_res->{exit_code};
764 if ($ec == SUCCESS) {
765
766 if (defined($sd->{failed_nodes})) {
767 $haenv->log('info', "relocation policy successful for '$sid' on node '$sd->{node}'," .
768 " failed nodes: " . join(', ', @{$sd->{failed_nodes}}) );
769 }
770
771 delete $sd->{failed_nodes};
772
773 # store flag to indicate successful start - only valid while state == 'started'
774 $sd->{running} = 1;
775
776 } elsif ($ec == ERROR) {
777
778 delete $sd->{running};
779
780 # apply our relocate policy if we got ERROR from the LRM
781 $self->record_service_failed_on_node($sid, $sd->{node});
782
783 if (scalar(@{$sd->{failed_nodes}}) <= $cd->{max_relocate}) {
784
785 # tell select_service_node to relocate if possible
786 $try_next = 1;
787
788 $haenv->log('warning', "starting service $sid on node".
789 " '$sd->{node}' failed, relocating service.");
790
791 } else {
792
793 $haenv->log('err', "recovery policy for service $sid " .
794 "failed, entering error state. Failed nodes: ".
795 join(', ', @{$sd->{failed_nodes}}));
796 &$change_service_state($self, $sid, 'error');
797 return;
798
799 }
800 } else {
801 $self->record_service_failed_on_node($sid, $sd->{node});
802
803 $haenv->log('err', "service '$sid' got unrecoverable error" .
804 " (exit code $ec))");
805 # we have no save way out (yet) for other errors
806 &$change_service_state($self, $sid, 'error');
807 return;
808 }
809 }
810
811 my $node = select_service_node(
812 $self->{groups},
813 $self->{online_node_usage},
814 $sid,
815 $cd,
816 $sd->{node},
817 $try_next,
818 $sd->{failed_nodes},
819 $sd->{maintenance_node},
820 );
821
822 if ($node && ($sd->{node} ne $node)) {
823 $self->{online_node_usage}->add_service_usage_to_node($node, $sid, $sd->{node});
824
825 if (defined(my $fallback = $sd->{maintenance_node})) {
826 if ($node eq $fallback) {
827 $haenv->log('info', "moving service '$sid' back to '$fallback', node came back from maintenance.");
828 delete $sd->{maintenance_node};
829 } elsif ($sd->{node} ne $fallback) {
830 $haenv->log('info', "dropping maintenance fallback node '$fallback' for '$sid'");
831 delete $sd->{maintenance_node};
832 }
833 }
834
835 if ($cd->{type} eq 'vm') {
836 $haenv->log('info', "migrate service '$sid' to node '$node' (running)");
837 &$change_service_state($self, $sid, 'migrate', node => $sd->{node}, target => $node);
838 } else {
839 $haenv->log('info', "relocate service '$sid' to node '$node'");
840 &$change_service_state($self, $sid, 'relocate', node => $sd->{node}, target => $node);
841 }
842 } else {
843 if ($try_next && !defined($node)) {
844 $haenv->log('warning', "Start Error Recovery: Tried all available " .
845 " nodes for service '$sid', retry start on current node. " .
846 "Tried nodes: " . join(', ', @{$sd->{failed_nodes}}));
847 }
848 # ensure service get started again if it went unexpected down
849 # but ensure also no LRM result gets lost
850 $sd->{uid} = compute_new_uuid($sd->{state}) if defined($lrm_res);
851 }
852 }
853
854 return;
855 }
856
857 $haenv->log('err', "service '$sid' - unknown state '$cd->{state}' in service configuration");
858 }
859
860 sub next_state_error {
861 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
862
863 my $ns = $self->{ns};
864 my $ms = $self->{ms};
865
866 if ($cd->{state} eq 'disabled') {
867 # clean up on error recovery
868 delete $sd->{failed_nodes};
869
870 &$change_service_state($self, $sid, 'stopped');
871 return;
872 }
873
874 }
875
876 # after a node was fenced this recovers the service to a new node
877 sub next_state_recovery {
878 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
879
880 my ($haenv, $ss) = ($self->{haenv}, $self->{ss});
881 my $ns = $self->{ns};
882 my $ms = $self->{ms};
883
884 if ($sd->{state} ne 'recovery') { # should not happen
885 $haenv->log('err', "cannot recover service '$sid' from fencing, wrong state '$sd->{state}'");
886 return;
887 }
888
889 my $fenced_node = $sd->{node}; # for logging purpose
890
891 $self->recompute_online_node_usage(); # we want the most current node state
892
893 my $recovery_node = select_service_node(
894 $self->{groups},
895 $self->{online_node_usage},
896 $sid,
897 $cd,
898 $sd->{node},
899 );
900
901 if ($recovery_node) {
902 my $msg = "recover service '$sid' from fenced node '$fenced_node' to node '$recovery_node'";
903 if ($recovery_node eq $fenced_node) {
904 # can happen if restriced groups and the node came up again OK
905 $msg = "recover service '$sid' to previous failed and fenced node '$fenced_node' again";
906 }
907 $haenv->log('info', "$msg");
908
909 $fence_recovery_cleanup->($self, $sid, $fenced_node);
910
911 $haenv->steal_service($sid, $sd->{node}, $recovery_node);
912 $self->{online_node_usage}->add_service_usage_to_node($recovery_node, $sid, $recovery_node);
913
914 # NOTE: $sd *is normally read-only*, fencing is the exception
915 $cd->{node} = $sd->{node} = $recovery_node;
916 my $new_state = ($cd->{state} eq 'started') ? 'started' : 'request_stop';
917 $change_service_state->($self, $sid, $new_state, node => $recovery_node);
918 } else {
919 # no possible node found, cannot recover - but retry later, as we always try to make it available
920 $haenv->log('err', "recovering service '$sid' from fenced node '$fenced_node' failed, no recovery node found");
921
922 if ($cd->{state} eq 'disabled') {
923 # allow getting a service out of recovery manually if an admin disables it.
924 delete $sd->{failed_nodes}; # clean up on recovery to stopped
925 $change_service_state->($self, $sid, 'stopped'); # must NOT go through request_stop
926 return;
927 }
928 }
929 }
930
931 1;