]> git.proxmox.com Git - pve-ha-manager.git/blob - src/PVE/HA/Manager.pm
69bfbc3cb2e19c95048c8b30385bb8018520fd58
[pve-ha-manager.git] / src / PVE / HA / Manager.pm
1 package PVE::HA::Manager;
2
3 use strict;
4 use warnings;
5 use Digest::MD5 qw(md5_base64);
6
7 use PVE::Tools;
8 use PVE::HA::Tools ':exit_codes';
9 use PVE::HA::NodeStatus;
10 use PVE::HA::Usage::Basic;
11 use PVE::HA::Usage::Static;
12
13 ## Variable Name & Abbreviations Convention
14 #
15 # The HA stack has some variables it uses frequently and thus abbreviates it such that it may be
16 # confusing for new readers. Here's a short list of the most common used.
17 #
18 # NOTE: variables should be assumed to be read only if not otherwise stated, only use the specific
19 # methods to re-compute/read/alter them.
20 #
21 # - $haenv -> HA environment, the main interface to the simulator/test/real world
22 # - $sid -> Service ID, unique identifier for a service, `type:vmid` is common
23 #
24 # - $ms -> Master/Manager Status, contains runtime info from the current active manager
25 # - $ns -> Node Status, hash holding online/offline status about all nodes
26 #
27 # - $ss -> Service Status, hash holding the current state (last LRM cmd result, failed starts
28 # or migrates, maintenance fallback node, for *all* services ...
29 # - $sd -> Service Data, the service status of a *single* service, iow. $ss->{$sid}
30 #
31 # - $sc -> Service Configuration, hash for all services including target state, group, ...
32 # - $cd -> Configuration Data, the service config of a *single* service, iow. $sc->{$sid}
33 #
34 # Try to avoid adding new two letter (or similar over abbreviated) names, but also don't send
35 # patches for changing above, as that set is mostly sensible and should be easy to remember once
36 # spending a bit time in the HA code base.
37
38 sub new {
39 my ($this, $haenv) = @_;
40
41 my $class = ref($this) || $this;
42
43 my $self = bless { haenv => $haenv }, $class;
44
45 my $old_ms = $haenv->read_manager_status();
46
47 # we only copy the state part of the manager which cannot be auto generated
48
49 $self->{ns} = PVE::HA::NodeStatus->new($haenv, $old_ms->{node_status} || {});
50
51 # fixme: use separate class PVE::HA::ServiceStatus
52 $self->{ss} = $old_ms->{service_status} || {};
53
54 $self->{ms} = { master_node => $haenv->nodename() };
55
56 my $dc_cfg = $haenv->get_datacenter_settings();
57 $self->{'scheduler-mode'} = $dc_cfg->{crs}->{ha} ? $dc_cfg->{crs}->{ha} : 'basic';
58 $haenv->log('info', "using scheduler mode '$self->{'scheduler-mode'}'")
59 if $self->{'scheduler-mode'} ne 'basic';
60
61 return $self;
62 }
63
64 sub cleanup {
65 my ($self) = @_;
66
67 # todo: ?
68 }
69
70 sub flush_master_status {
71 my ($self) = @_;
72
73 my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss});
74
75 $ms->{node_status} = $ns->{status};
76 $ms->{service_status} = $ss;
77 $ms->{timestamp} = $haenv->get_time();
78
79 $haenv->write_manager_status($ms);
80 }
81
82 sub get_service_group {
83 my ($groups, $online_node_usage, $service_conf) = @_;
84
85 my $group = {};
86 # add all online nodes to default group to allow try_next when no group set
87 $group->{nodes}->{$_} = 1 for $online_node_usage->list_nodes();
88
89 # overwrite default if service is bound to a specific group
90 if (my $group_id = $service_conf->{group}) {
91 $group = $groups->{ids}->{$group_id} if $groups->{ids}->{$group_id};
92 }
93
94 return $group;
95 }
96
97 # groups available nodes with their priority as group index
98 sub get_node_priority_groups {
99 my ($group, $online_node_usage) = @_;
100
101 my $pri_groups = {};
102 my $group_members = {};
103 foreach my $entry (keys %{$group->{nodes}}) {
104 my ($node, $pri) = ($entry, 0);
105 if ($entry =~ m/^(\S+):(\d+)$/) {
106 ($node, $pri) = ($1, $2);
107 }
108 next if !$online_node_usage->contains_node($node); # offline
109 $pri_groups->{$pri}->{$node} = 1;
110 $group_members->{$node} = $pri;
111 }
112
113 # add non-group members to unrestricted groups (priority -1)
114 if (!$group->{restricted}) {
115 my $pri = -1;
116 for my $node ($online_node_usage->list_nodes()) {
117 next if defined($group_members->{$node});
118 $pri_groups->{$pri}->{$node} = 1;
119 $group_members->{$node} = -1;
120 }
121 }
122
123 return ($pri_groups, $group_members);
124 }
125
126 sub select_service_node {
127 my ($groups, $online_node_usage, $sid, $service_conf, $current_node, $try_next, $tried_nodes, $maintenance_fallback) = @_;
128
129 my $group = get_service_group($groups, $online_node_usage, $service_conf);
130
131 my ($pri_groups, $group_members) = get_node_priority_groups($group, $online_node_usage);
132
133 my @pri_list = sort {$b <=> $a} keys %$pri_groups;
134 return undef if !scalar(@pri_list);
135
136 # stay on current node if possible (avoids random migrations)
137 if (!$try_next && $group->{nofailback} && defined($group_members->{$current_node})) {
138 return $current_node;
139 }
140
141 # select node from top priority node list
142
143 my $top_pri = $pri_list[0];
144
145 # try to avoid nodes where the service failed already if we want to relocate
146 if ($try_next) {
147 foreach my $node (@$tried_nodes) {
148 delete $pri_groups->{$top_pri}->{$node};
149 }
150 }
151
152 return $maintenance_fallback
153 if defined($maintenance_fallback) && $pri_groups->{$top_pri}->{$maintenance_fallback};
154
155 return $current_node if !$try_next && $pri_groups->{$top_pri}->{$current_node};
156
157 my $scores = $online_node_usage->score_nodes_to_start_service($sid, $current_node);
158 my @nodes = sort {
159 $scores->{$a} <=> $scores->{$b} || $a cmp $b
160 } keys %{$pri_groups->{$top_pri}};
161
162 my $found;
163 for (my $i = scalar(@nodes) - 1; $i >= 0; $i--) {
164 my $node = $nodes[$i];
165 if ($node eq $current_node) {
166 $found = $i;
167 }
168 }
169
170 if ($try_next) {
171 if (defined($found) && ($found < (scalar(@nodes) - 1))) {
172 return $nodes[$found + 1];
173 } else {
174 return $nodes[0];
175 }
176 } else {
177 return $nodes[0];
178 }
179 }
180
181 my $uid_counter = 0;
182
183 sub compute_new_uuid {
184 my ($state) = @_;
185
186 $uid_counter++;
187 return md5_base64($state . $$ . time() . $uid_counter);
188 }
189
190 my $valid_service_states = {
191 stopped => 1,
192 request_stop => 1,
193 started => 1,
194 fence => 1,
195 recovery => 1,
196 migrate => 1,
197 relocate => 1,
198 freeze => 1,
199 error => 1,
200 };
201
202 # FIXME with 'static' mode and thousands of services, the overhead can be noticable and the fact
203 # that this function is called for each state change and upon recovery doesn't help.
204 sub recompute_online_node_usage {
205 my ($self) = @_;
206
207 my $haenv = $self->{haenv};
208
209 my $online_nodes = $self->{ns}->list_online_nodes();
210
211 my $online_node_usage;
212
213 if (my $mode = $self->{'scheduler-mode'}) {
214 if ($mode eq 'static') {
215 $online_node_usage = eval {
216 my $scheduler = PVE::HA::Usage::Static->new($haenv);
217 $scheduler->add_node($_) for $online_nodes->@*;
218 return $scheduler;
219 };
220 $haenv->log('warning', "using 'basic' scheduler mode, init for 'static' failed - $@")
221 if $@;
222 } elsif ($mode ne 'basic') {
223 $haenv->log('warning', "got unknown scheduler mode '$mode', using 'basic'");
224 }
225 }
226
227 if (!$online_node_usage) {
228 $online_node_usage = PVE::HA::Usage::Basic->new($haenv);
229 $online_node_usage->add_node($_) for $online_nodes->@*;
230 }
231
232 foreach my $sid (keys %{$self->{ss}}) {
233 my $sd = $self->{ss}->{$sid};
234 my $state = $sd->{state};
235 my $target = $sd->{target}; # optional
236 if ($online_node_usage->contains_node($sd->{node})) {
237 if (
238 $state eq 'started' || $state eq 'request_stop' || $state eq 'fence' ||
239 $state eq 'freeze' || $state eq 'error' || $state eq 'recovery'
240 ) {
241 $online_node_usage->add_service_usage_to_node($sd->{node}, $sid, $sd->{node});
242 } elsif (($state eq 'migrate') || ($state eq 'relocate')) {
243 my $source = $sd->{node};
244 # count it for both, source and target as load is put on both
245 $online_node_usage->add_service_usage_to_node($source, $sid, $source, $target);
246 $online_node_usage->add_service_usage_to_node($target, $sid, $source, $target);
247 } elsif ($state eq 'stopped') {
248 # do nothing
249 } else {
250 die "should not be reached (sid = '$sid', state = '$state')";
251 }
252 } elsif (defined($target) && $online_node_usage->contains_node($target)) {
253 if ($state eq 'migrate' || $state eq 'relocate') {
254 # to correctly track maintenance modi and also consider the target as used for the
255 # case a node dies, as we cannot really know if the to-be-aborted incoming migration
256 # has already cleaned up all used resources
257 $online_node_usage->add_service_usage_to_node($target, $sid, $sd->{node}, $target);
258 }
259 }
260 }
261
262 $self->{online_node_usage} = $online_node_usage;
263 }
264
265 my $change_service_state = sub {
266 my ($self, $sid, $new_state, %params) = @_;
267
268 my ($haenv, $ss) = ($self->{haenv}, $self->{ss});
269
270 my $sd = $ss->{$sid} || die "no such service '$sid";
271
272 my $old_state = $sd->{state};
273 my $old_node = $sd->{node};
274 my $old_failed_nodes = $sd->{failed_nodes};
275 my $old_maintenance_node = $sd->{maintenance_node};
276
277 die "no state change" if $old_state eq $new_state; # just to be sure
278
279 die "invalid CRM service state '$new_state'\n" if !$valid_service_states->{$new_state};
280
281 foreach my $k (keys %$sd) { delete $sd->{$k}; };
282
283 $sd->{state} = $new_state;
284 $sd->{node} = $old_node;
285 $sd->{failed_nodes} = $old_failed_nodes if defined($old_failed_nodes);
286 $sd->{maintenance_node} = $old_maintenance_node if defined($old_maintenance_node);
287
288 my $text_state = '';
289 foreach my $k (sort keys %params) {
290 my $v = $params{$k};
291 $text_state .= ", " if $text_state;
292 $text_state .= "$k = $v";
293 $sd->{$k} = $v;
294 }
295
296 $self->recompute_online_node_usage();
297
298 $sd->{uid} = compute_new_uuid($new_state);
299
300 $text_state = " ($text_state)" if $text_state;
301 $haenv->log('info', "service '$sid': state changed from '${old_state}'" .
302 " to '${new_state}'$text_state");
303 };
304
305 # clean up a possible bad state from a recovered service to allow its start
306 my $fence_recovery_cleanup = sub {
307 my ($self, $sid, $fenced_node) = @_;
308
309 my $haenv = $self->{haenv};
310
311 my (undef, $type, $id) = $haenv->parse_sid($sid);
312 my $plugin = PVE::HA::Resources->lookup($type);
313
314 # should not happen
315 die "unknown resource type '$type'" if !$plugin;
316
317 # locks may block recovery, cleanup those which are safe to remove after fencing,
318 # i.e., after the original node was reset and thus all it's state
319 my $removable_locks = [
320 'backup',
321 'mounted',
322 'migrate',
323 'clone',
324 'rollback',
325 'snapshot',
326 'snapshot-delete',
327 'suspending',
328 'suspended',
329 ];
330 if (my $removed_lock = $plugin->remove_locks($haenv, $id, $removable_locks, $fenced_node)) {
331 $haenv->log('warning', "removed leftover lock '$removed_lock' from recovered " .
332 "service '$sid' to allow its start.");
333 }
334 };
335
336 # read LRM status for all nodes
337 sub read_lrm_status {
338 my ($self) = @_;
339
340 my $nodes = $self->{ns}->list_nodes();
341 my $haenv = $self->{haenv};
342
343 my $results = {};
344 my $modes = {};
345 foreach my $node (@$nodes) {
346 my $lrm_status = $haenv->read_lrm_status($node);
347 $modes->{$node} = $lrm_status->{mode} || 'active';
348 foreach my $uid (keys %{$lrm_status->{results}}) {
349 next if $results->{$uid}; # should not happen
350 $results->{$uid} = $lrm_status->{results}->{$uid};
351 }
352 }
353
354 return ($results, $modes);
355 }
356
357 # read new crm commands and save them into crm master status
358 sub update_crm_commands {
359 my ($self) = @_;
360
361 my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss});
362
363 my $cmdlist = $haenv->read_crm_commands();
364
365 foreach my $cmd (split(/\n/, $cmdlist)) {
366 chomp $cmd;
367
368 if ($cmd =~ m/^(migrate|relocate)\s+(\S+)\s+(\S+)$/) {
369 my ($task, $sid, $node) = ($1, $2, $3);
370 if (my $sd = $ss->{$sid}) {
371 if (!$ns->node_is_online($node)) {
372 $haenv->log('err', "crm command error - node not online: $cmd");
373 } else {
374 if ($node eq $sd->{node}) {
375 $haenv->log('info', "ignore crm command - service already on target node: $cmd");
376 } else {
377 $haenv->log('info', "got crm command: $cmd");
378 $ss->{$sid}->{cmd} = [ $task, $node ];
379 }
380 }
381 } else {
382 $haenv->log('err', "crm command error - no such service: $cmd");
383 }
384
385 } elsif ($cmd =~ m/^stop\s+(\S+)\s+(\S+)$/) {
386 my ($sid, $timeout) = ($1, $2);
387 if (my $sd = $ss->{$sid}) {
388 $haenv->log('info', "got crm command: $cmd");
389 $ss->{$sid}->{cmd} = [ 'stop', $timeout ];
390 } else {
391 $haenv->log('err', "crm command error - no such service: $cmd");
392 }
393 } else {
394 $haenv->log('err', "unable to parse crm command: $cmd");
395 }
396 }
397
398 }
399
400 sub manage {
401 my ($self) = @_;
402
403 my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss});
404
405 my ($node_info) = $haenv->get_node_info();
406 my ($lrm_results, $lrm_modes) = $self->read_lrm_status();
407
408 $ns->update($node_info, $lrm_modes);
409
410 if (!$ns->node_is_operational($haenv->nodename())) {
411 $haenv->log('info', "master seems offline");
412 return;
413 }
414
415 my $sc = $haenv->read_service_config();
416
417 $self->{groups} = $haenv->read_group_config(); # update
418
419 # compute new service status
420
421 # add new service
422 foreach my $sid (sort keys %$sc) {
423 next if $ss->{$sid}; # already there
424 my $cd = $sc->{$sid};
425 next if $cd->{state} eq 'ignored';
426
427 $haenv->log('info', "adding new service '$sid' on node '$cd->{node}'");
428 # assume we are running to avoid relocate running service at add
429 my $state = ($cd->{state} eq 'started') ? 'started' : 'request_stop';
430 $ss->{$sid} = { state => $state, node => $cd->{node},
431 uid => compute_new_uuid('started') };
432 }
433
434 # remove stale or ignored services from manager state
435 foreach my $sid (keys %$ss) {
436 next if $sc->{$sid} && $sc->{$sid}->{state} ne 'ignored';
437
438 my $reason = defined($sc->{$sid}) ? 'ignored state requested' : 'no config';
439 $haenv->log('info', "removing stale service '$sid' ($reason)");
440
441 # remove all service related state information
442 delete $ss->{$sid};
443 }
444
445 $self->update_crm_commands();
446
447 for (;;) {
448 my $repeat = 0;
449
450 $self->recompute_online_node_usage();
451
452 foreach my $sid (sort keys %$ss) {
453 my $sd = $ss->{$sid};
454 my $cd = $sc->{$sid} || { state => 'disabled' };
455
456 my $lrm_res = $sd->{uid} ? $lrm_results->{$sd->{uid}} : undef;
457
458 my $last_state = $sd->{state};
459
460 if ($last_state eq 'stopped') {
461
462 $self->next_state_stopped($sid, $cd, $sd, $lrm_res);
463
464 } elsif ($last_state eq 'started') {
465
466 $self->next_state_started($sid, $cd, $sd, $lrm_res);
467
468 } elsif ($last_state eq 'migrate' || $last_state eq 'relocate') {
469
470 $self->next_state_migrate_relocate($sid, $cd, $sd, $lrm_res);
471
472 } elsif ($last_state eq 'fence') {
473
474 # do nothing here - wait until fenced
475
476 } elsif ($last_state eq 'recovery') {
477
478 $self->next_state_recovery($sid, $cd, $sd, $lrm_res);
479
480 } elsif ($last_state eq 'request_stop') {
481
482 $self->next_state_request_stop($sid, $cd, $sd, $lrm_res);
483
484 } elsif ($last_state eq 'freeze') {
485
486 my $lrm_mode = $sd->{node} ? $lrm_modes->{$sd->{node}} : undef;
487 # unfreeze
488 my $state = ($cd->{state} eq 'started') ? 'started' : 'request_stop';
489 &$change_service_state($self, $sid, $state)
490 if $lrm_mode && $lrm_mode eq 'active';
491
492 } elsif ($last_state eq 'error') {
493
494 $self->next_state_error($sid, $cd, $sd, $lrm_res);
495
496 } else {
497
498 die "unknown service state '$last_state'";
499 }
500
501 my $lrm_mode = $sd->{node} ? $lrm_modes->{$sd->{node}} : undef;
502 if ($lrm_mode && $lrm_mode eq 'restart') {
503 if (($sd->{state} eq 'started' || $sd->{state} eq 'stopped' ||
504 $sd->{state} eq 'request_stop')) {
505 &$change_service_state($self, $sid, 'freeze');
506 }
507 }
508
509 $repeat = 1 if $sd->{state} ne $last_state;
510 }
511
512 # handle fencing
513 my $fenced_nodes = {};
514 foreach my $sid (sort keys %$ss) {
515 my ($service_state, $service_node) = $ss->{$sid}->@{'state', 'node'};
516 next if $service_state ne 'fence';
517
518 if (!defined($fenced_nodes->{$service_node})) {
519 $fenced_nodes->{$service_node} = $ns->fence_node($service_node) || 0;
520 }
521
522 next if !$fenced_nodes->{$service_node};
523
524 # node fence was successful - recover service
525 $change_service_state->($self, $sid, 'recovery');
526 $repeat = 1; # for faster recovery execution
527 }
528
529 # Avoid that a node without services in 'fence' state (e.g., removed
530 # manually by admin) is stuck with the 'fence' node state.
531 for my $node (sort grep { !defined($fenced_nodes->{$_}) } keys $ns->{status}->%*) {
532 next if $ns->get_node_state($node) ne 'fence';
533
534 $haenv->log('notice', "node '$node' in fence state but no services to-fence! admin interference?!");
535 $repeat = 1 if $ns->fence_node($node);
536 }
537
538 last if !$repeat;
539 }
540
541 $self->flush_master_status();
542 }
543
544 # functions to compute next service states
545 # $cd: service configuration data (read only)
546 # $sd: service status data (read only)
547 #
548 # Note: use change_service_state() to alter state
549 #
550
551 sub next_state_request_stop {
552 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
553
554 my $haenv = $self->{haenv};
555 my $ns = $self->{ns};
556
557 # check result from LRM daemon
558 if ($lrm_res) {
559 my $exit_code = $lrm_res->{exit_code};
560 if ($exit_code == SUCCESS) {
561 &$change_service_state($self, $sid, 'stopped');
562 return;
563 } else {
564 $haenv->log('err', "service '$sid' stop failed (exit code $exit_code)");
565 &$change_service_state($self, $sid, 'error'); # fixme: what state?
566 return;
567 }
568 }
569
570 if ($ns->node_is_offline_delayed($sd->{node})) {
571 &$change_service_state($self, $sid, 'fence');
572 return;
573 }
574 }
575
576 sub next_state_migrate_relocate {
577 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
578
579 my $haenv = $self->{haenv};
580 my $ns = $self->{ns};
581
582 # check result from LRM daemon
583 if ($lrm_res) {
584 my $exit_code = $lrm_res->{exit_code};
585 my $req_state = $cd->{state} eq 'started' ? 'started' : 'request_stop';
586 if ($exit_code == SUCCESS) {
587 &$change_service_state($self, $sid, $req_state, node => $sd->{target});
588 return;
589 } elsif ($exit_code == EWRONG_NODE) {
590 $haenv->log('err', "service '$sid' - migration failed: service" .
591 " registered on wrong node!");
592 &$change_service_state($self, $sid, 'error');
593 } else {
594 $haenv->log('err', "service '$sid' - migration failed (exit code $exit_code)");
595 &$change_service_state($self, $sid, $req_state, node => $sd->{node});
596 return;
597 }
598 }
599
600 if ($ns->node_is_offline_delayed($sd->{node})) {
601 &$change_service_state($self, $sid, 'fence');
602 return;
603 }
604 }
605
606 sub next_state_stopped {
607 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
608
609 my $haenv = $self->{haenv};
610 my $ns = $self->{ns};
611
612 if ($sd->{node} ne $cd->{node}) {
613 # this can happen if we fence a node with active migrations
614 # hack: modify $sd (normally this should be considered read-only)
615 $haenv->log('info', "fixup service '$sid' location ($sd->{node} => $cd->{node})");
616 $sd->{node} = $cd->{node};
617 }
618
619 if ($sd->{cmd}) {
620 my $cmd = shift @{$sd->{cmd}};
621
622 if ($cmd eq 'migrate' || $cmd eq 'relocate') {
623 my $target = shift @{$sd->{cmd}};
624 if (!$ns->node_is_online($target)) {
625 $haenv->log('err', "ignore service '$sid' $cmd request - node '$target' not online");
626 } elsif ($sd->{node} eq $target) {
627 $haenv->log('info', "ignore service '$sid' $cmd request - service already on node '$target'");
628 } else {
629 &$change_service_state($self, $sid, $cmd, node => $sd->{node},
630 target => $target);
631 return;
632 }
633 } elsif ($cmd eq 'stop') {
634 $haenv->log('info', "ignore service '$sid' $cmd request - service already stopped");
635 } else {
636 $haenv->log('err', "unknown command '$cmd' for service '$sid'");
637 }
638 delete $sd->{cmd};
639 }
640
641 if ($cd->{state} eq 'disabled') {
642 # NOTE: do nothing here, the stop state is an exception as we do not
643 # process the LRM result here, thus the LRM always tries to stop the
644 # service (protection for the case no CRM is active)
645 return;
646 }
647
648 if ($ns->node_is_offline_delayed($sd->{node}) && $ns->get_node_state($sd->{node}) ne 'maintenance') {
649 &$change_service_state($self, $sid, 'fence');
650 return;
651 }
652
653 if ($cd->{state} eq 'stopped') {
654 # almost the same as 'disabled' state but the service will also get recovered
655 return;
656 }
657
658 if ($cd->{state} eq 'started') {
659 # simply mark it started, if it's on the wrong node
660 # next_state_started will fix that for us
661 &$change_service_state($self, $sid, 'started', node => $sd->{node});
662 return;
663 }
664
665 $haenv->log('err', "service '$sid' - unknown state '$cd->{state}' in service configuration");
666 }
667
668 sub record_service_failed_on_node {
669 my ($self, $sid, $node) = @_;
670
671 if (!defined($self->{ss}->{$sid}->{failed_nodes})) {
672 $self->{ss}->{$sid}->{failed_nodes} = [];
673 }
674
675 push @{$self->{ss}->{$sid}->{failed_nodes}}, $node;
676 }
677
678 sub next_state_started {
679 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
680
681 my $haenv = $self->{haenv};
682 my $master_status = $self->{ms};
683 my $ns = $self->{ns};
684
685 if (!$ns->node_is_online($sd->{node})) {
686 if ($ns->node_is_offline_delayed($sd->{node})) {
687 &$change_service_state($self, $sid, 'fence');
688 }
689 if ($ns->get_node_state($sd->{node}) ne 'maintenance') {
690 return;
691 } else {
692 # save current node as fallback for when it comes out of
693 # maintenance
694 $sd->{maintenance_node} = $sd->{node};
695 }
696 }
697
698 if ($cd->{state} eq 'disabled' || $cd->{state} eq 'stopped') {
699 &$change_service_state($self, $sid, 'request_stop');
700 return;
701 }
702
703 if ($cd->{state} eq 'started') {
704
705 if ($sd->{cmd}) {
706 my $cmd = shift @{$sd->{cmd}};
707
708 if ($cmd eq 'migrate' || $cmd eq 'relocate') {
709 my $target = shift @{$sd->{cmd}};
710 if (!$ns->node_is_online($target)) {
711 $haenv->log('err', "ignore service '$sid' $cmd request - node '$target' not online");
712 } elsif ($sd->{node} eq $target) {
713 $haenv->log('info', "ignore service '$sid' $cmd request - service already on node '$target'");
714 } else {
715 $haenv->log('info', "$cmd service '$sid' to node '$target'");
716 &$change_service_state($self, $sid, $cmd, node => $sd->{node}, target => $target);
717 }
718 } elsif ($cmd eq 'stop') {
719 my $timeout = shift @{$sd->{cmd}};
720 if ($timeout == 0) {
721 $haenv->log('info', "request immediate service hard-stop for service '$sid'");
722 } else {
723 $haenv->log('info', "request graceful stop with timeout '$timeout' for service '$sid'");
724 }
725 &$change_service_state($self, $sid, 'request_stop', timeout => $timeout);
726 $haenv->update_service_config($sid, {'state' => 'stopped'});
727 } else {
728 $haenv->log('err', "unknown command '$cmd' for service '$sid'");
729 }
730
731 delete $sd->{cmd};
732
733 } else {
734
735 my $try_next = 0;
736
737 if ($lrm_res) {
738
739 my $ec = $lrm_res->{exit_code};
740 if ($ec == SUCCESS) {
741
742 if (defined($sd->{failed_nodes})) {
743 $haenv->log('info', "relocation policy successful for '$sid' on node '$sd->{node}'," .
744 " failed nodes: " . join(', ', @{$sd->{failed_nodes}}) );
745 }
746
747 delete $sd->{failed_nodes};
748
749 # store flag to indicate successful start - only valid while state == 'started'
750 $sd->{running} = 1;
751
752 } elsif ($ec == ERROR) {
753
754 delete $sd->{running};
755
756 # apply our relocate policy if we got ERROR from the LRM
757 $self->record_service_failed_on_node($sid, $sd->{node});
758
759 if (scalar(@{$sd->{failed_nodes}}) <= $cd->{max_relocate}) {
760
761 # tell select_service_node to relocate if possible
762 $try_next = 1;
763
764 $haenv->log('warning', "starting service $sid on node".
765 " '$sd->{node}' failed, relocating service.");
766
767 } else {
768
769 $haenv->log('err', "recovery policy for service $sid " .
770 "failed, entering error state. Failed nodes: ".
771 join(', ', @{$sd->{failed_nodes}}));
772 &$change_service_state($self, $sid, 'error');
773 return;
774
775 }
776 } else {
777 $self->record_service_failed_on_node($sid, $sd->{node});
778
779 $haenv->log('err', "service '$sid' got unrecoverable error" .
780 " (exit code $ec))");
781 # we have no save way out (yet) for other errors
782 &$change_service_state($self, $sid, 'error');
783 return;
784 }
785 }
786
787 my $node = select_service_node(
788 $self->{groups},
789 $self->{online_node_usage},
790 $sid,
791 $cd,
792 $sd->{node},
793 $try_next,
794 $sd->{failed_nodes},
795 $sd->{maintenance_node},
796 );
797
798 if ($node && ($sd->{node} ne $node)) {
799 $self->{online_node_usage}->add_service_usage_to_node($node, $sid, $sd->{node});
800
801 if (defined(my $fallback = $sd->{maintenance_node})) {
802 if ($node eq $fallback) {
803 $haenv->log('info', "moving service '$sid' back to '$fallback', node came back from maintenance.");
804 delete $sd->{maintenance_node};
805 } elsif ($sd->{node} ne $fallback) {
806 $haenv->log('info', "dropping maintenance fallback node '$fallback' for '$sid'");
807 delete $sd->{maintenance_node};
808 }
809 }
810
811 if ($cd->{type} eq 'vm') {
812 $haenv->log('info', "migrate service '$sid' to node '$node' (running)");
813 &$change_service_state($self, $sid, 'migrate', node => $sd->{node}, target => $node);
814 } else {
815 $haenv->log('info', "relocate service '$sid' to node '$node'");
816 &$change_service_state($self, $sid, 'relocate', node => $sd->{node}, target => $node);
817 }
818 } else {
819 if ($try_next && !defined($node)) {
820 $haenv->log('warning', "Start Error Recovery: Tried all available " .
821 " nodes for service '$sid', retry start on current node. " .
822 "Tried nodes: " . join(', ', @{$sd->{failed_nodes}}));
823 }
824 # ensure service get started again if it went unexpected down
825 # but ensure also no LRM result gets lost
826 $sd->{uid} = compute_new_uuid($sd->{state}) if defined($lrm_res);
827 }
828 }
829
830 return;
831 }
832
833 $haenv->log('err', "service '$sid' - unknown state '$cd->{state}' in service configuration");
834 }
835
836 sub next_state_error {
837 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
838
839 my $ns = $self->{ns};
840 my $ms = $self->{ms};
841
842 if ($cd->{state} eq 'disabled') {
843 # clean up on error recovery
844 delete $sd->{failed_nodes};
845
846 &$change_service_state($self, $sid, 'stopped');
847 return;
848 }
849
850 }
851
852 # after a node was fenced this recovers the service to a new node
853 sub next_state_recovery {
854 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
855
856 my ($haenv, $ss) = ($self->{haenv}, $self->{ss});
857 my $ns = $self->{ns};
858 my $ms = $self->{ms};
859
860 if ($sd->{state} ne 'recovery') { # should not happen
861 $haenv->log('err', "cannot recover service '$sid' from fencing, wrong state '$sd->{state}'");
862 return;
863 }
864
865 my $fenced_node = $sd->{node}; # for logging purpose
866
867 $self->recompute_online_node_usage(); # we want the most current node state
868
869 my $recovery_node = select_service_node(
870 $self->{groups},
871 $self->{online_node_usage},
872 $sid,
873 $cd,
874 $sd->{node},
875 );
876
877 if ($recovery_node) {
878 my $msg = "recover service '$sid' from fenced node '$fenced_node' to node '$recovery_node'";
879 if ($recovery_node eq $fenced_node) {
880 # can happen if restriced groups and the node came up again OK
881 $msg = "recover service '$sid' to previous failed and fenced node '$fenced_node' again";
882 }
883 $haenv->log('info', "$msg");
884
885 $fence_recovery_cleanup->($self, $sid, $fenced_node);
886
887 $haenv->steal_service($sid, $sd->{node}, $recovery_node);
888 $self->{online_node_usage}->add_service_usage_to_node($recovery_node, $sid, $recovery_node);
889
890 # NOTE: $sd *is normally read-only*, fencing is the exception
891 $cd->{node} = $sd->{node} = $recovery_node;
892 my $new_state = ($cd->{state} eq 'started') ? 'started' : 'request_stop';
893 $change_service_state->($self, $sid, $new_state, node => $recovery_node);
894 } else {
895 # no possible node found, cannot recover - but retry later, as we always try to make it available
896 $haenv->log('err', "recovering service '$sid' from fenced node '$fenced_node' failed, no recovery node found");
897
898 if ($cd->{state} eq 'disabled') {
899 # allow getting a service out of recovery manually if an admin disables it.
900 delete $sd->{failed_nodes}; # clean up on recovery to stopped
901 $change_service_state->($self, $sid, 'stopped'); # must NOT go through request_stop
902 return;
903 }
904 }
905 }
906
907 1;