]> git.proxmox.com Git - pve-ha-manager.git/blob - src/PVE/HA/Manager.pm
manager: better convey that basic is always the fallback
[pve-ha-manager.git] / src / PVE / HA / Manager.pm
1 package PVE::HA::Manager;
2
3 use strict;
4 use warnings;
5 use Digest::MD5 qw(md5_base64);
6
7 use PVE::Tools;
8 use PVE::HA::Tools ':exit_codes';
9 use PVE::HA::NodeStatus;
10 use PVE::HA::Usage::Basic;
11 use PVE::HA::Usage::Static;
12
13 ## Variable Name & Abbreviations Convention
14 #
15 # The HA stack has some variables it uses frequently and thus abbreviates it such that it may be
16 # confusing for new readers. Here's a short list of the most common used.
17 #
18 # NOTE: variables should be assumed to be read only if not otherwise stated, only use the specific
19 # methods to re-compute/read/alter them.
20 #
21 # - $haenv -> HA environment, the main interface to the simulator/test/real world
22 # - $sid -> Service ID, unique identifier for a service, `type:vmid` is common
23 #
24 # - $ms -> Master/Manager Status, contains runtime info from the current active manager
25 # - $ns -> Node Status, hash holding online/offline status about all nodes
26 #
27 # - $ss -> Service Status, hash holding the current state (last LRM cmd result, failed starts
28 # or migrates, maintenance fallback node, for *all* services ...
29 # - $sd -> Service Data, the service status of a *single* service, iow. $ss->{$sid}
30 #
31 # - $sc -> Service Configuration, hash for all services including target state, group, ...
32 # - $cd -> Configuration Data, the service config of a *single* service, iow. $sc->{$sid}
33 #
34 # Try to avoid adding new two letter (or similar over abbreviated) names, but also don't send
35 # patches for changing above, as that set is mostly sensible and should be easy to remember once
36 # spending a bit time in the HA code base.
37
38 sub new {
39 my ($this, $haenv) = @_;
40
41 my $class = ref($this) || $this;
42
43 my $self = bless { haenv => $haenv }, $class;
44
45 my $old_ms = $haenv->read_manager_status();
46
47 # we only copy the state part of the manager which cannot be auto generated
48
49 $self->{ns} = PVE::HA::NodeStatus->new($haenv, $old_ms->{node_status} || {});
50
51 # fixme: use separate class PVE::HA::ServiceStatus
52 $self->{ss} = $old_ms->{service_status} || {};
53
54 $self->{ms} = { master_node => $haenv->nodename() };
55
56 my $dc_cfg = $haenv->get_datacenter_settings();
57 $self->{'scheduler-mode'} = $dc_cfg->{crs}->{ha} ? $dc_cfg->{crs}->{ha} : 'basic';
58 $haenv->log('info', "using scheduler mode '$self->{'scheduler-mode'}'")
59 if $self->{'scheduler-mode'} ne 'basic';
60
61 return $self;
62 }
63
64 sub cleanup {
65 my ($self) = @_;
66
67 # todo: ?
68 }
69
70 sub flush_master_status {
71 my ($self) = @_;
72
73 my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss});
74
75 $ms->{node_status} = $ns->{status};
76 $ms->{service_status} = $ss;
77 $ms->{timestamp} = $haenv->get_time();
78
79 $haenv->write_manager_status($ms);
80 }
81
82 sub get_service_group {
83 my ($groups, $online_node_usage, $service_conf) = @_;
84
85 my $group = {};
86 # add all online nodes to default group to allow try_next when no group set
87 $group->{nodes}->{$_} = 1 for $online_node_usage->list_nodes();
88
89 # overwrite default if service is bound to a specific group
90 if (my $group_id = $service_conf->{group}) {
91 $group = $groups->{ids}->{$group_id} if $groups->{ids}->{$group_id};
92 }
93
94 return $group;
95 }
96
97 # groups available nodes with their priority as group index
98 sub get_node_priority_groups {
99 my ($group, $online_node_usage) = @_;
100
101 my $pri_groups = {};
102 my $group_members = {};
103 foreach my $entry (keys %{$group->{nodes}}) {
104 my ($node, $pri) = ($entry, 0);
105 if ($entry =~ m/^(\S+):(\d+)$/) {
106 ($node, $pri) = ($1, $2);
107 }
108 next if !$online_node_usage->contains_node($node); # offline
109 $pri_groups->{$pri}->{$node} = 1;
110 $group_members->{$node} = $pri;
111 }
112
113 # add non-group members to unrestricted groups (priority -1)
114 if (!$group->{restricted}) {
115 my $pri = -1;
116 for my $node ($online_node_usage->list_nodes()) {
117 next if defined($group_members->{$node});
118 $pri_groups->{$pri}->{$node} = 1;
119 $group_members->{$node} = -1;
120 }
121 }
122
123 return ($pri_groups, $group_members);
124 }
125
126 sub select_service_node {
127 my ($groups, $online_node_usage, $sid, $service_conf, $current_node, $try_next, $tried_nodes, $maintenance_fallback) = @_;
128
129 my $group = get_service_group($groups, $online_node_usage, $service_conf);
130
131 my ($pri_groups, $group_members) = get_node_priority_groups($group, $online_node_usage);
132
133 my @pri_list = sort {$b <=> $a} keys %$pri_groups;
134 return undef if !scalar(@pri_list);
135
136 # stay on current node if possible (avoids random migrations)
137 if (!$try_next && $group->{nofailback} && defined($group_members->{$current_node})) {
138 return $current_node;
139 }
140
141 # select node from top priority node list
142
143 my $top_pri = $pri_list[0];
144
145 # try to avoid nodes where the service failed already if we want to relocate
146 if ($try_next) {
147 foreach my $node (@$tried_nodes) {
148 delete $pri_groups->{$top_pri}->{$node};
149 }
150 }
151
152 return $maintenance_fallback
153 if defined($maintenance_fallback) && $pri_groups->{$top_pri}->{$maintenance_fallback};
154
155 return $current_node if !$try_next && $pri_groups->{$top_pri}->{$current_node};
156
157 my $scores = $online_node_usage->score_nodes_to_start_service($sid, $current_node);
158 my @nodes = sort {
159 $scores->{$a} <=> $scores->{$b} || $a cmp $b
160 } keys %{$pri_groups->{$top_pri}};
161
162 my $found;
163 for (my $i = scalar(@nodes) - 1; $i >= 0; $i--) {
164 my $node = $nodes[$i];
165 if ($node eq $current_node) {
166 $found = $i;
167 }
168 }
169
170 if ($try_next) {
171 if (defined($found) && ($found < (scalar(@nodes) - 1))) {
172 return $nodes[$found + 1];
173 } else {
174 return $nodes[0];
175 }
176 } else {
177 return $nodes[0];
178 }
179 }
180
181 my $uid_counter = 0;
182
183 sub compute_new_uuid {
184 my ($state) = @_;
185
186 $uid_counter++;
187 return md5_base64($state . $$ . time() . $uid_counter);
188 }
189
190 my $valid_service_states = {
191 stopped => 1,
192 request_stop => 1,
193 started => 1,
194 fence => 1,
195 recovery => 1,
196 migrate => 1,
197 relocate => 1,
198 freeze => 1,
199 error => 1,
200 };
201
202 # FIXME with 'static' mode and thousands of services, the overhead can be noticable and the fact
203 # that this function is called for each state change and upon recovery doesn't help.
204 sub recompute_online_node_usage {
205 my ($self) = @_;
206
207 my $haenv = $self->{haenv};
208
209 my $online_nodes = $self->{ns}->list_online_nodes();
210
211 my $online_node_usage;
212
213 if (my $mode = $self->{'scheduler-mode'}) {
214 if ($mode eq 'static') {
215 $online_node_usage = eval {
216 my $scheduler = PVE::HA::Usage::Static->new($haenv);
217 $scheduler->add_node($_) for $online_nodes->@*;
218 return $scheduler;
219 };
220 $haenv->log('warning', "using 'basic' scheduler mode, init for 'static' failed - $@")
221 if $@;
222 } elsif ($mode eq 'basic') {
223 # handled below in the general fall-back case
224 } else {
225 $haenv->log('warning', "got unknown scheduler mode '$mode', using 'basic'");
226 }
227 }
228
229 # fallback to the basic algorithm in any case
230 if (!$online_node_usage) {
231 $online_node_usage = PVE::HA::Usage::Basic->new($haenv);
232 $online_node_usage->add_node($_) for $online_nodes->@*;
233 }
234
235 foreach my $sid (keys %{$self->{ss}}) {
236 my $sd = $self->{ss}->{$sid};
237 my $state = $sd->{state};
238 my $target = $sd->{target}; # optional
239 if ($online_node_usage->contains_node($sd->{node})) {
240 if (
241 $state eq 'started' || $state eq 'request_stop' || $state eq 'fence' ||
242 $state eq 'freeze' || $state eq 'error' || $state eq 'recovery'
243 ) {
244 $online_node_usage->add_service_usage_to_node($sd->{node}, $sid, $sd->{node});
245 } elsif (($state eq 'migrate') || ($state eq 'relocate')) {
246 my $source = $sd->{node};
247 # count it for both, source and target as load is put on both
248 $online_node_usage->add_service_usage_to_node($source, $sid, $source, $target);
249 $online_node_usage->add_service_usage_to_node($target, $sid, $source, $target);
250 } elsif ($state eq 'stopped') {
251 # do nothing
252 } else {
253 die "should not be reached (sid = '$sid', state = '$state')";
254 }
255 } elsif (defined($target) && $online_node_usage->contains_node($target)) {
256 if ($state eq 'migrate' || $state eq 'relocate') {
257 # to correctly track maintenance modi and also consider the target as used for the
258 # case a node dies, as we cannot really know if the to-be-aborted incoming migration
259 # has already cleaned up all used resources
260 $online_node_usage->add_service_usage_to_node($target, $sid, $sd->{node}, $target);
261 }
262 }
263 }
264
265 $self->{online_node_usage} = $online_node_usage;
266 }
267
268 my $change_service_state = sub {
269 my ($self, $sid, $new_state, %params) = @_;
270
271 my ($haenv, $ss) = ($self->{haenv}, $self->{ss});
272
273 my $sd = $ss->{$sid} || die "no such service '$sid";
274
275 my $old_state = $sd->{state};
276 my $old_node = $sd->{node};
277 my $old_failed_nodes = $sd->{failed_nodes};
278 my $old_maintenance_node = $sd->{maintenance_node};
279
280 die "no state change" if $old_state eq $new_state; # just to be sure
281
282 die "invalid CRM service state '$new_state'\n" if !$valid_service_states->{$new_state};
283
284 foreach my $k (keys %$sd) { delete $sd->{$k}; };
285
286 $sd->{state} = $new_state;
287 $sd->{node} = $old_node;
288 $sd->{failed_nodes} = $old_failed_nodes if defined($old_failed_nodes);
289 $sd->{maintenance_node} = $old_maintenance_node if defined($old_maintenance_node);
290
291 my $text_state = '';
292 foreach my $k (sort keys %params) {
293 my $v = $params{$k};
294 $text_state .= ", " if $text_state;
295 $text_state .= "$k = $v";
296 $sd->{$k} = $v;
297 }
298
299 $self->recompute_online_node_usage();
300
301 $sd->{uid} = compute_new_uuid($new_state);
302
303 $text_state = " ($text_state)" if $text_state;
304 $haenv->log('info', "service '$sid': state changed from '${old_state}'" .
305 " to '${new_state}'$text_state");
306 };
307
308 # clean up a possible bad state from a recovered service to allow its start
309 my $fence_recovery_cleanup = sub {
310 my ($self, $sid, $fenced_node) = @_;
311
312 my $haenv = $self->{haenv};
313
314 my (undef, $type, $id) = $haenv->parse_sid($sid);
315 my $plugin = PVE::HA::Resources->lookup($type);
316
317 # should not happen
318 die "unknown resource type '$type'" if !$plugin;
319
320 # locks may block recovery, cleanup those which are safe to remove after fencing,
321 # i.e., after the original node was reset and thus all it's state
322 my $removable_locks = [
323 'backup',
324 'mounted',
325 'migrate',
326 'clone',
327 'rollback',
328 'snapshot',
329 'snapshot-delete',
330 'suspending',
331 'suspended',
332 ];
333 if (my $removed_lock = $plugin->remove_locks($haenv, $id, $removable_locks, $fenced_node)) {
334 $haenv->log('warning', "removed leftover lock '$removed_lock' from recovered " .
335 "service '$sid' to allow its start.");
336 }
337 };
338
339 # read LRM status for all nodes
340 sub read_lrm_status {
341 my ($self) = @_;
342
343 my $nodes = $self->{ns}->list_nodes();
344 my $haenv = $self->{haenv};
345
346 my $results = {};
347 my $modes = {};
348 foreach my $node (@$nodes) {
349 my $lrm_status = $haenv->read_lrm_status($node);
350 $modes->{$node} = $lrm_status->{mode} || 'active';
351 foreach my $uid (keys %{$lrm_status->{results}}) {
352 next if $results->{$uid}; # should not happen
353 $results->{$uid} = $lrm_status->{results}->{$uid};
354 }
355 }
356
357 return ($results, $modes);
358 }
359
360 # read new crm commands and save them into crm master status
361 sub update_crm_commands {
362 my ($self) = @_;
363
364 my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss});
365
366 my $cmdlist = $haenv->read_crm_commands();
367
368 foreach my $cmd (split(/\n/, $cmdlist)) {
369 chomp $cmd;
370
371 if ($cmd =~ m/^(migrate|relocate)\s+(\S+)\s+(\S+)$/) {
372 my ($task, $sid, $node) = ($1, $2, $3);
373 if (my $sd = $ss->{$sid}) {
374 if (!$ns->node_is_online($node)) {
375 $haenv->log('err', "crm command error - node not online: $cmd");
376 } else {
377 if ($node eq $sd->{node}) {
378 $haenv->log('info', "ignore crm command - service already on target node: $cmd");
379 } else {
380 $haenv->log('info', "got crm command: $cmd");
381 $ss->{$sid}->{cmd} = [ $task, $node ];
382 }
383 }
384 } else {
385 $haenv->log('err', "crm command error - no such service: $cmd");
386 }
387
388 } elsif ($cmd =~ m/^stop\s+(\S+)\s+(\S+)$/) {
389 my ($sid, $timeout) = ($1, $2);
390 if (my $sd = $ss->{$sid}) {
391 $haenv->log('info', "got crm command: $cmd");
392 $ss->{$sid}->{cmd} = [ 'stop', $timeout ];
393 } else {
394 $haenv->log('err', "crm command error - no such service: $cmd");
395 }
396 } else {
397 $haenv->log('err', "unable to parse crm command: $cmd");
398 }
399 }
400
401 }
402
403 sub manage {
404 my ($self) = @_;
405
406 my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss});
407
408 my ($node_info) = $haenv->get_node_info();
409 my ($lrm_results, $lrm_modes) = $self->read_lrm_status();
410
411 $ns->update($node_info, $lrm_modes);
412
413 if (!$ns->node_is_operational($haenv->nodename())) {
414 $haenv->log('info', "master seems offline");
415 return;
416 }
417
418 my $sc = $haenv->read_service_config();
419
420 $self->{groups} = $haenv->read_group_config(); # update
421
422 # compute new service status
423
424 # add new service
425 foreach my $sid (sort keys %$sc) {
426 next if $ss->{$sid}; # already there
427 my $cd = $sc->{$sid};
428 next if $cd->{state} eq 'ignored';
429
430 $haenv->log('info', "adding new service '$sid' on node '$cd->{node}'");
431 # assume we are running to avoid relocate running service at add
432 my $state = ($cd->{state} eq 'started') ? 'started' : 'request_stop';
433 $ss->{$sid} = { state => $state, node => $cd->{node},
434 uid => compute_new_uuid('started') };
435 }
436
437 # remove stale or ignored services from manager state
438 foreach my $sid (keys %$ss) {
439 next if $sc->{$sid} && $sc->{$sid}->{state} ne 'ignored';
440
441 my $reason = defined($sc->{$sid}) ? 'ignored state requested' : 'no config';
442 $haenv->log('info', "removing stale service '$sid' ($reason)");
443
444 # remove all service related state information
445 delete $ss->{$sid};
446 }
447
448 $self->update_crm_commands();
449
450 for (;;) {
451 my $repeat = 0;
452
453 $self->recompute_online_node_usage();
454
455 foreach my $sid (sort keys %$ss) {
456 my $sd = $ss->{$sid};
457 my $cd = $sc->{$sid} || { state => 'disabled' };
458
459 my $lrm_res = $sd->{uid} ? $lrm_results->{$sd->{uid}} : undef;
460
461 my $last_state = $sd->{state};
462
463 if ($last_state eq 'stopped') {
464
465 $self->next_state_stopped($sid, $cd, $sd, $lrm_res);
466
467 } elsif ($last_state eq 'started') {
468
469 $self->next_state_started($sid, $cd, $sd, $lrm_res);
470
471 } elsif ($last_state eq 'migrate' || $last_state eq 'relocate') {
472
473 $self->next_state_migrate_relocate($sid, $cd, $sd, $lrm_res);
474
475 } elsif ($last_state eq 'fence') {
476
477 # do nothing here - wait until fenced
478
479 } elsif ($last_state eq 'recovery') {
480
481 $self->next_state_recovery($sid, $cd, $sd, $lrm_res);
482
483 } elsif ($last_state eq 'request_stop') {
484
485 $self->next_state_request_stop($sid, $cd, $sd, $lrm_res);
486
487 } elsif ($last_state eq 'freeze') {
488
489 my $lrm_mode = $sd->{node} ? $lrm_modes->{$sd->{node}} : undef;
490 # unfreeze
491 my $state = ($cd->{state} eq 'started') ? 'started' : 'request_stop';
492 &$change_service_state($self, $sid, $state)
493 if $lrm_mode && $lrm_mode eq 'active';
494
495 } elsif ($last_state eq 'error') {
496
497 $self->next_state_error($sid, $cd, $sd, $lrm_res);
498
499 } else {
500
501 die "unknown service state '$last_state'";
502 }
503
504 my $lrm_mode = $sd->{node} ? $lrm_modes->{$sd->{node}} : undef;
505 if ($lrm_mode && $lrm_mode eq 'restart') {
506 if (($sd->{state} eq 'started' || $sd->{state} eq 'stopped' ||
507 $sd->{state} eq 'request_stop')) {
508 &$change_service_state($self, $sid, 'freeze');
509 }
510 }
511
512 $repeat = 1 if $sd->{state} ne $last_state;
513 }
514
515 # handle fencing
516 my $fenced_nodes = {};
517 foreach my $sid (sort keys %$ss) {
518 my ($service_state, $service_node) = $ss->{$sid}->@{'state', 'node'};
519 next if $service_state ne 'fence';
520
521 if (!defined($fenced_nodes->{$service_node})) {
522 $fenced_nodes->{$service_node} = $ns->fence_node($service_node) || 0;
523 }
524
525 next if !$fenced_nodes->{$service_node};
526
527 # node fence was successful - recover service
528 $change_service_state->($self, $sid, 'recovery');
529 $repeat = 1; # for faster recovery execution
530 }
531
532 # Avoid that a node without services in 'fence' state (e.g., removed
533 # manually by admin) is stuck with the 'fence' node state.
534 for my $node (sort grep { !defined($fenced_nodes->{$_}) } keys $ns->{status}->%*) {
535 next if $ns->get_node_state($node) ne 'fence';
536
537 $haenv->log('notice', "node '$node' in fence state but no services to-fence! admin interference?!");
538 $repeat = 1 if $ns->fence_node($node);
539 }
540
541 last if !$repeat;
542 }
543
544 $self->flush_master_status();
545 }
546
547 # functions to compute next service states
548 # $cd: service configuration data (read only)
549 # $sd: service status data (read only)
550 #
551 # Note: use change_service_state() to alter state
552 #
553
554 sub next_state_request_stop {
555 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
556
557 my $haenv = $self->{haenv};
558 my $ns = $self->{ns};
559
560 # check result from LRM daemon
561 if ($lrm_res) {
562 my $exit_code = $lrm_res->{exit_code};
563 if ($exit_code == SUCCESS) {
564 &$change_service_state($self, $sid, 'stopped');
565 return;
566 } else {
567 $haenv->log('err', "service '$sid' stop failed (exit code $exit_code)");
568 &$change_service_state($self, $sid, 'error'); # fixme: what state?
569 return;
570 }
571 }
572
573 if ($ns->node_is_offline_delayed($sd->{node})) {
574 &$change_service_state($self, $sid, 'fence');
575 return;
576 }
577 }
578
579 sub next_state_migrate_relocate {
580 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
581
582 my $haenv = $self->{haenv};
583 my $ns = $self->{ns};
584
585 # check result from LRM daemon
586 if ($lrm_res) {
587 my $exit_code = $lrm_res->{exit_code};
588 my $req_state = $cd->{state} eq 'started' ? 'started' : 'request_stop';
589 if ($exit_code == SUCCESS) {
590 &$change_service_state($self, $sid, $req_state, node => $sd->{target});
591 return;
592 } elsif ($exit_code == EWRONG_NODE) {
593 $haenv->log('err', "service '$sid' - migration failed: service" .
594 " registered on wrong node!");
595 &$change_service_state($self, $sid, 'error');
596 } else {
597 $haenv->log('err', "service '$sid' - migration failed (exit code $exit_code)");
598 &$change_service_state($self, $sid, $req_state, node => $sd->{node});
599 return;
600 }
601 }
602
603 if ($ns->node_is_offline_delayed($sd->{node})) {
604 &$change_service_state($self, $sid, 'fence');
605 return;
606 }
607 }
608
609 sub next_state_stopped {
610 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
611
612 my $haenv = $self->{haenv};
613 my $ns = $self->{ns};
614
615 if ($sd->{node} ne $cd->{node}) {
616 # this can happen if we fence a node with active migrations
617 # hack: modify $sd (normally this should be considered read-only)
618 $haenv->log('info', "fixup service '$sid' location ($sd->{node} => $cd->{node})");
619 $sd->{node} = $cd->{node};
620 }
621
622 if ($sd->{cmd}) {
623 my $cmd = shift @{$sd->{cmd}};
624
625 if ($cmd eq 'migrate' || $cmd eq 'relocate') {
626 my $target = shift @{$sd->{cmd}};
627 if (!$ns->node_is_online($target)) {
628 $haenv->log('err', "ignore service '$sid' $cmd request - node '$target' not online");
629 } elsif ($sd->{node} eq $target) {
630 $haenv->log('info', "ignore service '$sid' $cmd request - service already on node '$target'");
631 } else {
632 &$change_service_state($self, $sid, $cmd, node => $sd->{node},
633 target => $target);
634 return;
635 }
636 } elsif ($cmd eq 'stop') {
637 $haenv->log('info', "ignore service '$sid' $cmd request - service already stopped");
638 } else {
639 $haenv->log('err', "unknown command '$cmd' for service '$sid'");
640 }
641 delete $sd->{cmd};
642 }
643
644 if ($cd->{state} eq 'disabled') {
645 # NOTE: do nothing here, the stop state is an exception as we do not
646 # process the LRM result here, thus the LRM always tries to stop the
647 # service (protection for the case no CRM is active)
648 return;
649 }
650
651 if ($ns->node_is_offline_delayed($sd->{node}) && $ns->get_node_state($sd->{node}) ne 'maintenance') {
652 &$change_service_state($self, $sid, 'fence');
653 return;
654 }
655
656 if ($cd->{state} eq 'stopped') {
657 # almost the same as 'disabled' state but the service will also get recovered
658 return;
659 }
660
661 if ($cd->{state} eq 'started') {
662 # simply mark it started, if it's on the wrong node
663 # next_state_started will fix that for us
664 &$change_service_state($self, $sid, 'started', node => $sd->{node});
665 return;
666 }
667
668 $haenv->log('err', "service '$sid' - unknown state '$cd->{state}' in service configuration");
669 }
670
671 sub record_service_failed_on_node {
672 my ($self, $sid, $node) = @_;
673
674 if (!defined($self->{ss}->{$sid}->{failed_nodes})) {
675 $self->{ss}->{$sid}->{failed_nodes} = [];
676 }
677
678 push @{$self->{ss}->{$sid}->{failed_nodes}}, $node;
679 }
680
681 sub next_state_started {
682 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
683
684 my $haenv = $self->{haenv};
685 my $master_status = $self->{ms};
686 my $ns = $self->{ns};
687
688 if (!$ns->node_is_online($sd->{node})) {
689 if ($ns->node_is_offline_delayed($sd->{node})) {
690 &$change_service_state($self, $sid, 'fence');
691 }
692 if ($ns->get_node_state($sd->{node}) ne 'maintenance') {
693 return;
694 } else {
695 # save current node as fallback for when it comes out of
696 # maintenance
697 $sd->{maintenance_node} = $sd->{node};
698 }
699 }
700
701 if ($cd->{state} eq 'disabled' || $cd->{state} eq 'stopped') {
702 &$change_service_state($self, $sid, 'request_stop');
703 return;
704 }
705
706 if ($cd->{state} eq 'started') {
707
708 if ($sd->{cmd}) {
709 my $cmd = shift @{$sd->{cmd}};
710
711 if ($cmd eq 'migrate' || $cmd eq 'relocate') {
712 my $target = shift @{$sd->{cmd}};
713 if (!$ns->node_is_online($target)) {
714 $haenv->log('err', "ignore service '$sid' $cmd request - node '$target' not online");
715 } elsif ($sd->{node} eq $target) {
716 $haenv->log('info', "ignore service '$sid' $cmd request - service already on node '$target'");
717 } else {
718 $haenv->log('info', "$cmd service '$sid' to node '$target'");
719 &$change_service_state($self, $sid, $cmd, node => $sd->{node}, target => $target);
720 }
721 } elsif ($cmd eq 'stop') {
722 my $timeout = shift @{$sd->{cmd}};
723 if ($timeout == 0) {
724 $haenv->log('info', "request immediate service hard-stop for service '$sid'");
725 } else {
726 $haenv->log('info', "request graceful stop with timeout '$timeout' for service '$sid'");
727 }
728 &$change_service_state($self, $sid, 'request_stop', timeout => $timeout);
729 $haenv->update_service_config($sid, {'state' => 'stopped'});
730 } else {
731 $haenv->log('err', "unknown command '$cmd' for service '$sid'");
732 }
733
734 delete $sd->{cmd};
735
736 } else {
737
738 my $try_next = 0;
739
740 if ($lrm_res) {
741
742 my $ec = $lrm_res->{exit_code};
743 if ($ec == SUCCESS) {
744
745 if (defined($sd->{failed_nodes})) {
746 $haenv->log('info', "relocation policy successful for '$sid' on node '$sd->{node}'," .
747 " failed nodes: " . join(', ', @{$sd->{failed_nodes}}) );
748 }
749
750 delete $sd->{failed_nodes};
751
752 # store flag to indicate successful start - only valid while state == 'started'
753 $sd->{running} = 1;
754
755 } elsif ($ec == ERROR) {
756
757 delete $sd->{running};
758
759 # apply our relocate policy if we got ERROR from the LRM
760 $self->record_service_failed_on_node($sid, $sd->{node});
761
762 if (scalar(@{$sd->{failed_nodes}}) <= $cd->{max_relocate}) {
763
764 # tell select_service_node to relocate if possible
765 $try_next = 1;
766
767 $haenv->log('warning', "starting service $sid on node".
768 " '$sd->{node}' failed, relocating service.");
769
770 } else {
771
772 $haenv->log('err', "recovery policy for service $sid " .
773 "failed, entering error state. Failed nodes: ".
774 join(', ', @{$sd->{failed_nodes}}));
775 &$change_service_state($self, $sid, 'error');
776 return;
777
778 }
779 } else {
780 $self->record_service_failed_on_node($sid, $sd->{node});
781
782 $haenv->log('err', "service '$sid' got unrecoverable error" .
783 " (exit code $ec))");
784 # we have no save way out (yet) for other errors
785 &$change_service_state($self, $sid, 'error');
786 return;
787 }
788 }
789
790 my $node = select_service_node(
791 $self->{groups},
792 $self->{online_node_usage},
793 $sid,
794 $cd,
795 $sd->{node},
796 $try_next,
797 $sd->{failed_nodes},
798 $sd->{maintenance_node},
799 );
800
801 if ($node && ($sd->{node} ne $node)) {
802 $self->{online_node_usage}->add_service_usage_to_node($node, $sid, $sd->{node});
803
804 if (defined(my $fallback = $sd->{maintenance_node})) {
805 if ($node eq $fallback) {
806 $haenv->log('info', "moving service '$sid' back to '$fallback', node came back from maintenance.");
807 delete $sd->{maintenance_node};
808 } elsif ($sd->{node} ne $fallback) {
809 $haenv->log('info', "dropping maintenance fallback node '$fallback' for '$sid'");
810 delete $sd->{maintenance_node};
811 }
812 }
813
814 if ($cd->{type} eq 'vm') {
815 $haenv->log('info', "migrate service '$sid' to node '$node' (running)");
816 &$change_service_state($self, $sid, 'migrate', node => $sd->{node}, target => $node);
817 } else {
818 $haenv->log('info', "relocate service '$sid' to node '$node'");
819 &$change_service_state($self, $sid, 'relocate', node => $sd->{node}, target => $node);
820 }
821 } else {
822 if ($try_next && !defined($node)) {
823 $haenv->log('warning', "Start Error Recovery: Tried all available " .
824 " nodes for service '$sid', retry start on current node. " .
825 "Tried nodes: " . join(', ', @{$sd->{failed_nodes}}));
826 }
827 # ensure service get started again if it went unexpected down
828 # but ensure also no LRM result gets lost
829 $sd->{uid} = compute_new_uuid($sd->{state}) if defined($lrm_res);
830 }
831 }
832
833 return;
834 }
835
836 $haenv->log('err', "service '$sid' - unknown state '$cd->{state}' in service configuration");
837 }
838
839 sub next_state_error {
840 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
841
842 my $ns = $self->{ns};
843 my $ms = $self->{ms};
844
845 if ($cd->{state} eq 'disabled') {
846 # clean up on error recovery
847 delete $sd->{failed_nodes};
848
849 &$change_service_state($self, $sid, 'stopped');
850 return;
851 }
852
853 }
854
855 # after a node was fenced this recovers the service to a new node
856 sub next_state_recovery {
857 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
858
859 my ($haenv, $ss) = ($self->{haenv}, $self->{ss});
860 my $ns = $self->{ns};
861 my $ms = $self->{ms};
862
863 if ($sd->{state} ne 'recovery') { # should not happen
864 $haenv->log('err', "cannot recover service '$sid' from fencing, wrong state '$sd->{state}'");
865 return;
866 }
867
868 my $fenced_node = $sd->{node}; # for logging purpose
869
870 $self->recompute_online_node_usage(); # we want the most current node state
871
872 my $recovery_node = select_service_node(
873 $self->{groups},
874 $self->{online_node_usage},
875 $sid,
876 $cd,
877 $sd->{node},
878 );
879
880 if ($recovery_node) {
881 my $msg = "recover service '$sid' from fenced node '$fenced_node' to node '$recovery_node'";
882 if ($recovery_node eq $fenced_node) {
883 # can happen if restriced groups and the node came up again OK
884 $msg = "recover service '$sid' to previous failed and fenced node '$fenced_node' again";
885 }
886 $haenv->log('info', "$msg");
887
888 $fence_recovery_cleanup->($self, $sid, $fenced_node);
889
890 $haenv->steal_service($sid, $sd->{node}, $recovery_node);
891 $self->{online_node_usage}->add_service_usage_to_node($recovery_node, $sid, $recovery_node);
892
893 # NOTE: $sd *is normally read-only*, fencing is the exception
894 $cd->{node} = $sd->{node} = $recovery_node;
895 my $new_state = ($cd->{state} eq 'started') ? 'started' : 'request_stop';
896 $change_service_state->($self, $sid, $new_state, node => $recovery_node);
897 } else {
898 # no possible node found, cannot recover - but retry later, as we always try to make it available
899 $haenv->log('err', "recovering service '$sid' from fenced node '$fenced_node' failed, no recovery node found");
900
901 if ($cd->{state} eq 'disabled') {
902 # allow getting a service out of recovery manually if an admin disables it.
903 delete $sd->{failed_nodes}; # clean up on recovery to stopped
904 $change_service_state->($self, $sid, 'stopped'); # must NOT go through request_stop
905 return;
906 }
907 }
908 }
909
910 1;