]> git.proxmox.com Git - pve-ha-manager.git/blob - src/PVE/HA/Manager.pm
fix #4984: manager: add service to migration-target usage only if online
[pve-ha-manager.git] / src / PVE / HA / Manager.pm
1 package PVE::HA::Manager;
2
3 use strict;
4 use warnings;
5
6 use Digest::MD5 qw(md5_base64);
7
8 use PVE::Tools;
9 use PVE::HA::Tools ':exit_codes';
10 use PVE::HA::NodeStatus;
11 use PVE::HA::Usage::Basic;
12 use PVE::HA::Usage::Static;
13
14 ## Variable Name & Abbreviations Convention
15 #
16 # The HA stack has some variables it uses frequently and thus abbreviates it such that it may be
17 # confusing for new readers. Here's a short list of the most common used.
18 #
19 # NOTE: variables should be assumed to be read only if not otherwise stated, only use the specific
20 # methods to re-compute/read/alter them.
21 #
22 # - $haenv -> HA environment, the main interface to the simulator/test/real world
23 # - $sid -> Service ID, unique identifier for a service, `type:vmid` is common
24 #
25 # - $ms -> Master/Manager Status, contains runtime info from the current active manager
26 # - $ns -> Node Status, hash holding online/offline status about all nodes
27 #
28 # - $ss -> Service Status, hash holding the current state (last LRM cmd result, failed starts
29 # or migrates, maintenance fallback node, for *all* services ...
30 # - $sd -> Service Data, the service status of a *single* service, iow. $ss->{$sid}
31 #
32 # - $sc -> Service Configuration, hash for all services including target state, group, ...
33 # - $cd -> Configuration Data, the service config of a *single* service, iow. $sc->{$sid}
34 #
35 # Try to avoid adding new two letter (or similar over abbreviated) names, but also don't send
36 # patches for changing above, as that set is mostly sensible and should be easy to remember once
37 # spending a bit time in the HA code base.
38
39 sub new {
40 my ($this, $haenv) = @_;
41
42 my $class = ref($this) || $this;
43
44 my $self = bless { haenv => $haenv, crs => {} }, $class;
45
46 my $old_ms = $haenv->read_manager_status();
47
48 # we only copy the state part of the manager which cannot be auto generated
49
50 $self->{ns} = PVE::HA::NodeStatus->new($haenv, $old_ms->{node_status} || {});
51
52 # fixme: use separate class PVE::HA::ServiceStatus
53 $self->{ss} = $old_ms->{service_status} || {};
54
55 $self->{ms} = { master_node => $haenv->nodename() };
56
57 # take over node request state to ensure a node in (manual) maintenance mode stays that way
58 # on change of active master.
59 $self->{ms}->{node_request} = $old_ms->{node_request} if defined($old_ms->{node_request});
60
61 $self->update_crs_scheduler_mode(); # initial set, we update it once every loop
62
63 return $self;
64 }
65
66 sub update_crs_scheduler_mode {
67 my ($self) = @_;
68
69 my $haenv = $self->{haenv};
70 my $dc_cfg = $haenv->get_datacenter_settings();
71
72 $self->{crs}->{rebalance_on_request_start} = !!$dc_cfg->{crs}->{'ha-rebalance-on-start'};
73
74 my $old_mode = $self->{crs}->{scheduler};
75 my $new_mode = $dc_cfg->{crs}->{ha} || 'basic';
76
77 if (!defined($old_mode)) {
78 $haenv->log('info', "using scheduler mode '$new_mode'") if $new_mode ne 'basic';
79 } elsif ($new_mode eq $old_mode) {
80 return; # nothing to do
81 } else {
82 $haenv->log('info', "switching scheduler mode from '$old_mode' to '$new_mode'");
83 }
84
85 $self->{crs}->{scheduler} = $new_mode;
86
87 return;
88 }
89
90 sub cleanup {
91 my ($self) = @_;
92
93 # todo: ?
94 }
95
96 sub flush_master_status {
97 my ($self) = @_;
98
99 my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss});
100
101 $ms->{node_status} = $ns->{status};
102 $ms->{service_status} = $ss;
103 $ms->{timestamp} = $haenv->get_time();
104
105 $haenv->write_manager_status($ms);
106 }
107
108 sub get_service_group {
109 my ($groups, $online_node_usage, $service_conf) = @_;
110
111 my $group = {};
112 # add all online nodes to default group to allow try_next when no group set
113 $group->{nodes}->{$_} = 1 for $online_node_usage->list_nodes();
114
115 # overwrite default if service is bound to a specific group
116 if (my $group_id = $service_conf->{group}) {
117 $group = $groups->{ids}->{$group_id} if $groups->{ids}->{$group_id};
118 }
119
120 return $group;
121 }
122
123 # groups available nodes with their priority as group index
124 sub get_node_priority_groups {
125 my ($group, $online_node_usage) = @_;
126
127 my $pri_groups = {};
128 my $group_members = {};
129 foreach my $entry (keys %{$group->{nodes}}) {
130 my ($node, $pri) = ($entry, 0);
131 if ($entry =~ m/^(\S+):(\d+)$/) {
132 ($node, $pri) = ($1, $2);
133 }
134 next if !$online_node_usage->contains_node($node); # offline
135 $pri_groups->{$pri}->{$node} = 1;
136 $group_members->{$node} = $pri;
137 }
138
139 # add non-group members to unrestricted groups (priority -1)
140 if (!$group->{restricted}) {
141 my $pri = -1;
142 for my $node ($online_node_usage->list_nodes()) {
143 next if defined($group_members->{$node});
144 $pri_groups->{$pri}->{$node} = 1;
145 $group_members->{$node} = -1;
146 }
147 }
148
149 return ($pri_groups, $group_members);
150 }
151
152 sub select_service_node {
153 my ($groups, $online_node_usage, $sid, $service_conf, $current_node, $try_next, $tried_nodes, $maintenance_fallback, $best_scored) = @_;
154
155 my $group = get_service_group($groups, $online_node_usage, $service_conf);
156
157 my ($pri_groups, $group_members) = get_node_priority_groups($group, $online_node_usage);
158
159 my @pri_list = sort {$b <=> $a} keys %$pri_groups;
160 return undef if !scalar(@pri_list);
161
162 # stay on current node if possible (avoids random migrations)
163 if ((!$try_next && !$best_scored) && $group->{nofailback} && defined($group_members->{$current_node})) {
164 return $current_node;
165 }
166
167 # select node from top priority node list
168
169 my $top_pri = $pri_list[0];
170
171 # try to avoid nodes where the service failed already if we want to relocate
172 if ($try_next) {
173 foreach my $node (@$tried_nodes) {
174 delete $pri_groups->{$top_pri}->{$node};
175 }
176 }
177
178 return $maintenance_fallback
179 if defined($maintenance_fallback) && $pri_groups->{$top_pri}->{$maintenance_fallback};
180
181 return $current_node if (!$try_next && !$best_scored) && $pri_groups->{$top_pri}->{$current_node};
182
183 my $scores = $online_node_usage->score_nodes_to_start_service($sid, $current_node);
184 my @nodes = sort {
185 $scores->{$a} <=> $scores->{$b} || $a cmp $b
186 } keys %{$pri_groups->{$top_pri}};
187
188 my $found;
189 for (my $i = scalar(@nodes) - 1; $i >= 0; $i--) {
190 my $node = $nodes[$i];
191 if ($node eq $current_node) {
192 $found = $i;
193 }
194 }
195
196 if ($try_next) {
197 if (!$best_scored && defined($found) && ($found < (scalar(@nodes) - 1))) {
198 return $nodes[$found + 1];
199 } else {
200 return $nodes[0];
201 }
202 } else {
203 return $nodes[0];
204 }
205 }
206
207 my $uid_counter = 0;
208
209 sub compute_new_uuid {
210 my ($state) = @_;
211
212 $uid_counter++;
213 return md5_base64($state . $$ . time() . $uid_counter);
214 }
215
216 my $valid_service_states = {
217 stopped => 1,
218 request_stop => 1,
219 request_start => 1,
220 request_start_balance => 1,
221 started => 1,
222 fence => 1,
223 recovery => 1,
224 migrate => 1,
225 relocate => 1,
226 freeze => 1,
227 error => 1,
228 };
229
230 # FIXME with 'static' mode and thousands of services, the overhead can be noticable and the fact
231 # that this function is called for each state change and upon recovery doesn't help.
232 sub recompute_online_node_usage {
233 my ($self) = @_;
234
235 my $haenv = $self->{haenv};
236
237 my $online_nodes = $self->{ns}->list_online_nodes();
238
239 my $online_node_usage;
240
241 if (my $mode = $self->{crs}->{scheduler}) {
242 if ($mode eq 'static') {
243 $online_node_usage = eval {
244 my $scheduler = PVE::HA::Usage::Static->new($haenv);
245 $scheduler->add_node($_) for $online_nodes->@*;
246 return $scheduler;
247 };
248 $haenv->log('warning', "fallback to 'basic' scheduler mode, init for 'static' failed - $@")
249 if $@;
250 } elsif ($mode eq 'basic') {
251 # handled below in the general fall-back case
252 } else {
253 $haenv->log('warning', "got unknown scheduler mode '$mode', using 'basic'");
254 }
255 }
256
257 # fallback to the basic algorithm in any case
258 if (!$online_node_usage) {
259 $online_node_usage = PVE::HA::Usage::Basic->new($haenv);
260 $online_node_usage->add_node($_) for $online_nodes->@*;
261 }
262
263 foreach my $sid (sort keys %{$self->{ss}}) {
264 my $sd = $self->{ss}->{$sid};
265 my $state = $sd->{state};
266 my $target = $sd->{target}; # optional
267 if ($online_node_usage->contains_node($sd->{node})) {
268 if (
269 $state eq 'started' || $state eq 'request_stop' || $state eq 'fence'
270 || $state eq 'freeze' || $state eq 'error' || $state eq 'recovery'
271 ) {
272 $online_node_usage->add_service_usage_to_node($sd->{node}, $sid, $sd->{node});
273 } elsif ($state eq 'migrate' || $state eq 'relocate' || $state eq 'request_start_balance') {
274 my $source = $sd->{node};
275 # count it for both, source and target as load is put on both
276 $online_node_usage->add_service_usage_to_node($source, $sid, $source, $target)
277 if $state ne 'request_start_balance';
278 $online_node_usage->add_service_usage_to_node($target, $sid, $source, $target)
279 if $online_node_usage->contains_node($target);
280 } elsif ($state eq 'stopped' || $state eq 'request_start') {
281 # do nothing
282 } else {
283 die "should not be reached (sid = '$sid', state = '$state')";
284 }
285 } elsif (defined($target) && $online_node_usage->contains_node($target)) {
286 if ($state eq 'migrate' || $state eq 'relocate') {
287 # to correctly track maintenance modi and also consider the target as used for the
288 # case a node dies, as we cannot really know if the to-be-aborted incoming migration
289 # has already cleaned up all used resources
290 $online_node_usage->add_service_usage_to_node($target, $sid, $sd->{node}, $target);
291 }
292 }
293 }
294
295 $self->{online_node_usage} = $online_node_usage;
296 }
297
298 my $change_service_state = sub {
299 my ($self, $sid, $new_state, %params) = @_;
300
301 my ($haenv, $ss) = ($self->{haenv}, $self->{ss});
302
303 my $sd = $ss->{$sid} || die "no such service '$sid";
304
305 my $old_state = $sd->{state};
306 my $old_node = $sd->{node};
307 my $old_failed_nodes = $sd->{failed_nodes};
308 my $old_maintenance_node = $sd->{maintenance_node};
309
310 die "no state change" if $old_state eq $new_state; # just to be sure
311
312 die "invalid CRM service state '$new_state'\n" if !$valid_service_states->{$new_state};
313
314 foreach my $k (keys %$sd) { delete $sd->{$k}; };
315
316 $sd->{state} = $new_state;
317 $sd->{node} = $old_node;
318 $sd->{failed_nodes} = $old_failed_nodes if defined($old_failed_nodes);
319 $sd->{maintenance_node} = $old_maintenance_node if defined($old_maintenance_node);
320
321 my $text_state = '';
322 foreach my $k (sort keys %params) {
323 my $v = $params{$k};
324 $text_state .= ", " if $text_state;
325 $text_state .= "$k = $v";
326 $sd->{$k} = $v;
327 }
328
329 $self->recompute_online_node_usage();
330
331 $sd->{uid} = compute_new_uuid($new_state);
332
333 $text_state = " ($text_state)" if $text_state;
334 $haenv->log('info', "service '$sid': state changed from '${old_state}' to '${new_state}'$text_state");
335 };
336
337 # clean up a possible bad state from a recovered service to allow its start
338 my $fence_recovery_cleanup = sub {
339 my ($self, $sid, $fenced_node) = @_;
340
341 my $haenv = $self->{haenv};
342
343 my (undef, $type, $id) = $haenv->parse_sid($sid);
344 my $plugin = PVE::HA::Resources->lookup($type);
345
346 # should not happen
347 die "unknown resource type '$type'" if !$plugin;
348
349 # locks may block recovery, cleanup those which are safe to remove after fencing,
350 # i.e., after the original node was reset and thus all it's state
351 my $removable_locks = [
352 'backup',
353 'mounted',
354 'migrate',
355 'clone',
356 'rollback',
357 'snapshot',
358 'snapshot-delete',
359 'suspending',
360 'suspended',
361 ];
362 if (my $removed_lock = $plugin->remove_locks($haenv, $id, $removable_locks, $fenced_node)) {
363 $haenv->log('warning', "removed leftover lock '$removed_lock' from recovered " .
364 "service '$sid' to allow its start.");
365 }
366 };
367
368 # read LRM status for all nodes
369 sub read_lrm_status {
370 my ($self) = @_;
371
372 my $nodes = $self->{ns}->list_nodes();
373 my $haenv = $self->{haenv};
374
375 my $results = {};
376 my $modes = {};
377 foreach my $node (@$nodes) {
378 my $lrm_status = $haenv->read_lrm_status($node);
379 $modes->{$node} = $lrm_status->{mode} || 'active';
380 foreach my $uid (keys %{$lrm_status->{results}}) {
381 next if $results->{$uid}; # should not happen
382 $results->{$uid} = $lrm_status->{results}->{$uid};
383 }
384 }
385
386 return ($results, $modes);
387 }
388
389 # read new crm commands and save them into crm master status
390 sub update_crm_commands {
391 my ($self) = @_;
392
393 my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss});
394
395 my $cmdlist = $haenv->read_crm_commands();
396
397 foreach my $cmd (split(/\n/, $cmdlist)) {
398 chomp $cmd;
399
400 if ($cmd =~ m/^(migrate|relocate)\s+(\S+)\s+(\S+)$/) {
401 my ($task, $sid, $node) = ($1, $2, $3);
402 if (my $sd = $ss->{$sid}) {
403 if (!$ns->node_is_online($node)) {
404 $haenv->log('err', "crm command error - node not online: $cmd");
405 } else {
406 if ($node eq $sd->{node}) {
407 $haenv->log('info', "ignore crm command - service already on target node: $cmd");
408 } else {
409 $haenv->log('info', "got crm command: $cmd");
410 $ss->{$sid}->{cmd} = [ $task, $node ];
411 }
412 }
413 } else {
414 $haenv->log('err', "crm command error - no such service: $cmd");
415 }
416
417 } elsif ($cmd =~ m/^stop\s+(\S+)\s+(\S+)$/) {
418 my ($sid, $timeout) = ($1, $2);
419 if (my $sd = $ss->{$sid}) {
420 $haenv->log('info', "got crm command: $cmd");
421 $ss->{$sid}->{cmd} = [ 'stop', $timeout ];
422 } else {
423 $haenv->log('err', "crm command error - no such service: $cmd");
424 }
425 } elsif ($cmd =~ m/^enable-node-maintenance\s+(\S+)$/) {
426 my $node = $1;
427
428 my $state = $ns->get_node_state($node);
429 if ($state eq 'online') {
430 $ms->{node_request}->{$node}->{maintenance} = 1;
431 } elsif ($state eq 'maintenance') {
432 $haenv->log('info', "ignoring crm command - node $node is already in maintenance state");
433 } else {
434 $haenv->log('err', "crm command error - node not online: $cmd");
435 }
436 } elsif ($cmd =~ m/^disable-node-maintenance\s+(\S+)$/) {
437 my $node = $1;
438
439 my $state = $ns->get_node_state($node);
440 if ($state ne 'maintenance') {
441 $haenv->log(
442 'warn', "clearing maintenance of node $node requested, but it's in state $state");
443 }
444 delete $ms->{node_request}->{$node}->{maintenance}; # gets flushed out at the end of the CRM loop
445 } else {
446 $haenv->log('err', "unable to parse crm command: $cmd");
447 }
448 }
449
450 }
451
452 sub manage {
453 my ($self) = @_;
454
455 my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss});
456
457 my ($node_info) = $haenv->get_node_info();
458 my ($lrm_results, $lrm_modes) = $self->read_lrm_status();
459
460 $ns->update($node_info, $lrm_modes);
461
462 if (!$ns->node_is_operational($haenv->nodename())) {
463 $haenv->log('info', "master seems offline");
464 return;
465 }
466
467 $self->update_crs_scheduler_mode();
468
469 my $sc = $haenv->read_service_config();
470
471 $self->{groups} = $haenv->read_group_config(); # update
472
473 # compute new service status
474
475 # add new service
476 foreach my $sid (sort keys %$sc) {
477 next if $ss->{$sid}; # already there
478 my $cd = $sc->{$sid};
479 next if $cd->{state} eq 'ignored';
480
481 $haenv->log('info', "adding new service '$sid' on node '$cd->{node}'");
482 # assume we are running to avoid relocate running service at add
483 my $state = ($cd->{state} eq 'started') ? 'request_start' : 'request_stop';
484 $ss->{$sid} = {
485 state => $state, node => $cd->{node}, uid => compute_new_uuid('started'),
486 };
487 }
488
489 # remove stale or ignored services from manager state
490 foreach my $sid (keys %$ss) {
491 next if $sc->{$sid} && $sc->{$sid}->{state} ne 'ignored';
492
493 my $reason = defined($sc->{$sid}) ? 'ignored state requested' : 'no config';
494 $haenv->log('info', "removing stale service '$sid' ($reason)");
495
496 # remove all service related state information
497 delete $ss->{$sid};
498 }
499
500 $self->update_crm_commands();
501
502 for (;;) {
503 my $repeat = 0;
504
505 $self->recompute_online_node_usage();
506
507 foreach my $sid (sort keys %$ss) {
508 my $sd = $ss->{$sid};
509 my $cd = $sc->{$sid} || { state => 'disabled' };
510
511 my $lrm_res = $sd->{uid} ? $lrm_results->{$sd->{uid}} : undef;
512
513 my $last_state = $sd->{state};
514
515 if ($last_state eq 'stopped') {
516
517 $self->next_state_stopped($sid, $cd, $sd, $lrm_res);
518
519 } elsif ($last_state eq 'started') {
520
521 $self->next_state_started($sid, $cd, $sd, $lrm_res);
522
523 } elsif ($last_state eq 'request_start') {
524
525 $self->next_state_request_start($sid, $cd, $sd, $lrm_res);
526
527 } elsif ($last_state eq 'migrate' || $last_state eq 'relocate' || $last_state eq 'request_start_balance') {
528
529 $self->next_state_migrate_relocate($sid, $cd, $sd, $lrm_res);
530
531 } elsif ($last_state eq 'fence') {
532
533 # do nothing here - wait until fenced
534
535 } elsif ($last_state eq 'recovery') {
536
537 $self->next_state_recovery($sid, $cd, $sd, $lrm_res);
538
539 } elsif ($last_state eq 'request_stop') {
540
541 $self->next_state_request_stop($sid, $cd, $sd, $lrm_res);
542
543 } elsif ($last_state eq 'freeze') {
544
545 my $lrm_mode = $sd->{node} ? $lrm_modes->{$sd->{node}} : undef;
546 if ($lrm_mode && $lrm_mode eq 'active') { # unfreeze if active again
547 my $state = ($cd->{state} eq 'started') ? 'started' : 'request_stop';
548 $change_service_state->($self, $sid, $state);
549 }
550
551 } elsif ($last_state eq 'error') {
552
553 $self->next_state_error($sid, $cd, $sd, $lrm_res);
554
555 } else {
556
557 die "unknown service state '$last_state'";
558 }
559
560 my $lrm_mode = $sd->{node} ? $lrm_modes->{$sd->{node}} : undef;
561 if ($lrm_mode && $lrm_mode eq 'restart') {
562 my $state = $sd->{state};
563 if ($state eq 'started' || $state eq 'stopped'|| $state eq 'request_stop') {
564 $change_service_state->($self, $sid, 'freeze');
565 }
566 }
567
568 $repeat = 1 if $sd->{state} ne $last_state;
569 }
570
571 # handle fencing
572 my $fenced_nodes = {};
573 foreach my $sid (sort keys %$ss) {
574 my ($service_state, $service_node) = $ss->{$sid}->@{'state', 'node'};
575 next if $service_state ne 'fence';
576
577 if (!defined($fenced_nodes->{$service_node})) {
578 $fenced_nodes->{$service_node} = $ns->fence_node($service_node) || 0;
579 }
580
581 next if !$fenced_nodes->{$service_node};
582
583 # node fence was successful - recover service
584 $change_service_state->($self, $sid, 'recovery');
585 $repeat = 1; # for faster recovery execution
586 }
587
588 # Avoid that a node without services in 'fence' state (e.g., removed
589 # manually by admin) is stuck with the 'fence' node state.
590 for my $node (sort grep { !defined($fenced_nodes->{$_}) } keys $ns->{status}->%*) {
591 next if $ns->get_node_state($node) ne 'fence';
592
593 $haenv->log('notice', "node '$node' in fence state but no services to-fence! admin interference?!");
594 $repeat = 1 if $ns->fence_node($node);
595 }
596
597 last if !$repeat;
598 }
599
600 $self->flush_master_status();
601 }
602
603 # functions to compute next service states
604 # $cd: service configuration data (read only)
605 # $sd: service status data (read only)
606 #
607 # Note: use change_service_state() to alter state
608 #
609
610 sub next_state_request_stop {
611 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
612
613 my $haenv = $self->{haenv};
614 my $ns = $self->{ns};
615
616 # check result from LRM daemon
617 if ($lrm_res) {
618 my $exit_code = $lrm_res->{exit_code};
619 if ($exit_code == SUCCESS) {
620 &$change_service_state($self, $sid, 'stopped');
621 return;
622 } else {
623 $haenv->log('err', "service '$sid' stop failed (exit code $exit_code)");
624 &$change_service_state($self, $sid, 'error'); # fixme: what state?
625 return;
626 }
627 }
628
629 if ($ns->node_is_offline_delayed($sd->{node})) {
630 &$change_service_state($self, $sid, 'fence');
631 return;
632 }
633 }
634
635 sub next_state_migrate_relocate {
636 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
637
638 my $haenv = $self->{haenv};
639 my $ns = $self->{ns};
640
641 # check result from LRM daemon
642 if ($lrm_res) {
643 my $exit_code = $lrm_res->{exit_code};
644 my $req_state = $cd->{state} eq 'started' ? 'started' : 'request_stop';
645 if ($exit_code == SUCCESS) {
646 &$change_service_state($self, $sid, $req_state, node => $sd->{target});
647 return;
648 } elsif ($exit_code == EWRONG_NODE) {
649 $haenv->log('err', "service '$sid' - migration failed: service" .
650 " registered on wrong node!");
651 &$change_service_state($self, $sid, 'error');
652 } elsif ($exit_code == IGNORED) {
653 $haenv->log(
654 "info",
655 "service '$sid' - rebalance-on-start request ignored - service already running",
656 );
657 $change_service_state->($self, $sid, $req_state, node => $sd->{node});
658 } else {
659 $haenv->log('err', "service '$sid' - migration failed (exit code $exit_code)");
660 &$change_service_state($self, $sid, $req_state, node => $sd->{node});
661 return;
662 }
663 }
664
665 if ($ns->node_is_offline_delayed($sd->{node})) {
666 &$change_service_state($self, $sid, 'fence');
667 return;
668 }
669 }
670
671 sub next_state_stopped {
672 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
673
674 my $haenv = $self->{haenv};
675 my $ns = $self->{ns};
676
677 if ($sd->{node} ne $cd->{node}) {
678 # this can happen if we fence a node with active migrations
679 # hack: modify $sd (normally this should be considered read-only)
680 $haenv->log('info', "fixup service '$sid' location ($sd->{node} => $cd->{node})");
681 $sd->{node} = $cd->{node};
682 }
683
684 if ($sd->{cmd}) {
685 my $cmd = shift @{$sd->{cmd}};
686
687 if ($cmd eq 'migrate' || $cmd eq 'relocate') {
688 my $target = shift @{$sd->{cmd}};
689 if (!$ns->node_is_online($target)) {
690 $haenv->log('err', "ignore service '$sid' $cmd request - node '$target' not online");
691 } elsif ($sd->{node} eq $target) {
692 $haenv->log('info', "ignore service '$sid' $cmd request - service already on node '$target'");
693 } else {
694 &$change_service_state($self, $sid, $cmd, node => $sd->{node}, target => $target);
695 return;
696 }
697 } elsif ($cmd eq 'stop') {
698 $haenv->log('info', "ignore service '$sid' $cmd request - service already stopped");
699 } else {
700 $haenv->log('err', "unknown command '$cmd' for service '$sid'");
701 }
702 delete $sd->{cmd};
703 }
704
705 if ($cd->{state} eq 'disabled') {
706 # NOTE: do nothing here, the stop state is an exception as we do not
707 # process the LRM result here, thus the LRM always tries to stop the
708 # service (protection for the case no CRM is active)
709 return;
710 }
711
712 if ($ns->node_is_offline_delayed($sd->{node}) && $ns->get_node_state($sd->{node}) ne 'maintenance') {
713 &$change_service_state($self, $sid, 'fence');
714 return;
715 }
716
717 if ($cd->{state} eq 'stopped') {
718 # almost the same as 'disabled' state but the service will also get recovered
719 return;
720 }
721
722 if ($cd->{state} eq 'started') {
723 # simply mark it started, if it's on the wrong node next_state_started will fix that for us
724 $change_service_state->($self, $sid, 'request_start', node => $sd->{node});
725 return;
726 }
727
728 $haenv->log('err', "service '$sid' - unknown state '$cd->{state}' in service configuration");
729 }
730
731 sub next_state_request_start {
732 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
733
734 my $haenv = $self->{haenv};
735 my $current_node = $sd->{node};
736
737 if ($self->{crs}->{rebalance_on_request_start}) {
738 my $selected_node = select_service_node(
739 $self->{groups},
740 $self->{online_node_usage},
741 $sid,
742 $cd,
743 $sd->{node},
744 0, # try_next
745 $sd->{failed_nodes},
746 $sd->{maintenance_node},
747 1, # best_score
748 );
749 my $select_text = $selected_node ne $current_node ? 'new' : 'current';
750 $haenv->log('info', "service $sid: re-balance selected $select_text node $selected_node for startup");
751
752 if ($selected_node ne $current_node) {
753 $change_service_state->($self, $sid, 'request_start_balance', node => $current_node, target => $selected_node);
754 return;
755 }
756 }
757
758 $change_service_state->($self, $sid, 'started', node => $current_node);
759 }
760
761 sub record_service_failed_on_node {
762 my ($self, $sid, $node) = @_;
763
764 if (!defined($self->{ss}->{$sid}->{failed_nodes})) {
765 $self->{ss}->{$sid}->{failed_nodes} = [];
766 }
767
768 push @{$self->{ss}->{$sid}->{failed_nodes}}, $node;
769 }
770
771 sub next_state_started {
772 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
773
774 my $haenv = $self->{haenv};
775 my $master_status = $self->{ms};
776 my $ns = $self->{ns};
777
778 if (!$ns->node_is_online($sd->{node})) {
779 if ($ns->node_is_offline_delayed($sd->{node})) {
780 &$change_service_state($self, $sid, 'fence');
781 }
782 if ($ns->get_node_state($sd->{node}) ne 'maintenance') {
783 return;
784 } else {
785 # save current node as fallback for when it comes out of maintenance
786 $sd->{maintenance_node} = $sd->{node};
787 }
788 }
789
790 if ($cd->{state} eq 'disabled' || $cd->{state} eq 'stopped') {
791 &$change_service_state($self, $sid, 'request_stop');
792 return;
793 }
794
795 if ($cd->{state} eq 'started') {
796
797 if ($sd->{cmd}) {
798 my $cmd = shift @{$sd->{cmd}};
799
800 if ($cmd eq 'migrate' || $cmd eq 'relocate') {
801 my $target = shift @{$sd->{cmd}};
802 if (!$ns->node_is_online($target)) {
803 $haenv->log('err', "ignore service '$sid' $cmd request - node '$target' not online");
804 } elsif ($sd->{node} eq $target) {
805 $haenv->log('info', "ignore service '$sid' $cmd request - service already on node '$target'");
806 } else {
807 $haenv->log('info', "$cmd service '$sid' to node '$target'");
808 &$change_service_state($self, $sid, $cmd, node => $sd->{node}, target => $target);
809 }
810 } elsif ($cmd eq 'stop') {
811 my $timeout = shift @{$sd->{cmd}};
812 if ($timeout == 0) {
813 $haenv->log('info', "request immediate service hard-stop for service '$sid'");
814 } else {
815 $haenv->log('info', "request graceful stop with timeout '$timeout' for service '$sid'");
816 }
817 &$change_service_state($self, $sid, 'request_stop', timeout => $timeout);
818 $haenv->update_service_config($sid, {'state' => 'stopped'});
819 } else {
820 $haenv->log('err', "unknown command '$cmd' for service '$sid'");
821 }
822
823 delete $sd->{cmd};
824
825 } else {
826
827 my $try_next = 0;
828
829 if ($lrm_res) {
830
831 my $ec = $lrm_res->{exit_code};
832 if ($ec == SUCCESS) {
833
834 if (defined($sd->{failed_nodes})) {
835 $haenv->log('info', "relocation policy successful for '$sid' on node '$sd->{node}'," .
836 " failed nodes: " . join(', ', @{$sd->{failed_nodes}}) );
837 }
838
839 delete $sd->{failed_nodes};
840
841 # store flag to indicate successful start - only valid while state == 'started'
842 $sd->{running} = 1;
843
844 } elsif ($ec == ERROR || $ec == EWRONG_NODE) {
845
846 delete $sd->{running};
847
848 # apply our relocate policy if we got ERROR from the LRM
849 $self->record_service_failed_on_node($sid, $sd->{node});
850
851 if (scalar(@{$sd->{failed_nodes}}) <= $cd->{max_relocate}) {
852
853 # tell select_service_node to relocate if possible
854 $try_next = 1;
855
856 $haenv->log('warning', "starting service $sid on node".
857 " '$sd->{node}' failed, relocating service.");
858
859 } else {
860
861 $haenv->log('err', "recovery policy for service $sid " .
862 "failed, entering error state. Failed nodes: ".
863 join(', ', @{$sd->{failed_nodes}}));
864 &$change_service_state($self, $sid, 'error');
865 return;
866
867 }
868 } else {
869 $self->record_service_failed_on_node($sid, $sd->{node});
870
871 $haenv->log('err', "service '$sid' got unrecoverable error (exit code $ec))");
872 # we have no save way out (yet) for other errors
873 &$change_service_state($self, $sid, 'error');
874 return;
875 }
876 }
877
878 my $node = select_service_node(
879 $self->{groups},
880 $self->{online_node_usage},
881 $sid,
882 $cd,
883 $sd->{node},
884 $try_next,
885 $sd->{failed_nodes},
886 $sd->{maintenance_node},
887 );
888
889 if ($node && ($sd->{node} ne $node)) {
890 $self->{online_node_usage}->add_service_usage_to_node($node, $sid, $sd->{node});
891
892 if (defined(my $fallback = $sd->{maintenance_node})) {
893 if ($node eq $fallback) {
894 $haenv->log(
895 'info',
896 "moving service '$sid' back to '$fallback', node came back from maintenance.",
897 );
898 delete $sd->{maintenance_node};
899 } elsif ($sd->{node} ne $fallback) {
900 $haenv->log('info', "dropping maintenance fallback node '$fallback' for '$sid'");
901 delete $sd->{maintenance_node};
902 }
903 }
904
905 if ($cd->{type} eq 'vm') {
906 $haenv->log('info', "migrate service '$sid' to node '$node' (running)");
907 &$change_service_state($self, $sid, 'migrate', node => $sd->{node}, target => $node);
908 } else {
909 $haenv->log('info', "relocate service '$sid' to node '$node'");
910 &$change_service_state($self, $sid, 'relocate', node => $sd->{node}, target => $node);
911 }
912 } else {
913 if ($try_next && !defined($node)) {
914 $haenv->log(
915 'warning',
916 "Start Error Recovery: Tried all available nodes for service '$sid', retry"
917 ." start on current node. Tried nodes: " . join(', ', @{$sd->{failed_nodes}},
918 )
919 );
920 }
921
922 if ($sd->{maintenance_node} && $sd->{node} eq $sd->{maintenance_node}) {
923 my $node_state = $ns->get_node_state($sd->{node});
924 if ($node_state eq 'online') {
925 # Having the maintenance node set here means that the service was never
926 # started on a different node since it was set. This can happen in the edge
927 # case that the whole cluster is shut down at the same time while the
928 # 'migrate' policy was configured. Node is not in maintenance mode anymore
929 # and service is started on this node, so it's fine to clear the setting.
930 $haenv->log(
931 'info',
932 "service '$sid': clearing stale maintenance node "
933 ."'$sd->{maintenance_node}' setting (is current node)",
934 );
935 delete $sd->{maintenance_node};
936 }
937 }
938
939 # ensure service get started again if it went unexpected down
940 # but ensure also no LRM result gets lost
941 $sd->{uid} = compute_new_uuid($sd->{state}) if defined($lrm_res);
942 }
943 }
944
945 return;
946 }
947
948 $haenv->log('err', "service '$sid' - unknown state '$cd->{state}' in service configuration");
949 }
950
951 sub next_state_error {
952 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
953
954 my $ns = $self->{ns};
955 my $ms = $self->{ms};
956
957 if ($cd->{state} eq 'disabled') {
958 # clean up on error recovery
959 delete $sd->{failed_nodes};
960
961 &$change_service_state($self, $sid, 'stopped');
962 return;
963 }
964
965 }
966
967 # after a node was fenced this recovers the service to a new node
968 sub next_state_recovery {
969 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
970
971 my ($haenv, $ss) = ($self->{haenv}, $self->{ss});
972 my $ns = $self->{ns};
973 my $ms = $self->{ms};
974
975 if ($sd->{state} ne 'recovery') { # should not happen
976 $haenv->log('err', "cannot recover service '$sid' from fencing, wrong state '$sd->{state}'");
977 return;
978 }
979
980 my $fenced_node = $sd->{node}; # for logging purpose
981
982 $self->recompute_online_node_usage(); # we want the most current node state
983
984 my $recovery_node = select_service_node(
985 $self->{groups},
986 $self->{online_node_usage},
987 $sid,
988 $cd,
989 $sd->{node},
990 );
991
992 if ($recovery_node) {
993 my $msg = "recover service '$sid' from fenced node '$fenced_node' to node '$recovery_node'";
994 if ($recovery_node eq $fenced_node) {
995 # can happen if restriced groups and the node came up again OK
996 $msg = "recover service '$sid' to previous failed and fenced node '$fenced_node' again";
997 }
998 $haenv->log('info', "$msg");
999
1000 $fence_recovery_cleanup->($self, $sid, $fenced_node);
1001
1002 $haenv->steal_service($sid, $sd->{node}, $recovery_node);
1003 $self->{online_node_usage}->add_service_usage_to_node($recovery_node, $sid, $recovery_node);
1004
1005 # NOTE: $sd *is normally read-only*, fencing is the exception
1006 $cd->{node} = $sd->{node} = $recovery_node;
1007 my $new_state = ($cd->{state} eq 'started') ? 'started' : 'request_stop';
1008 $change_service_state->($self, $sid, $new_state, node => $recovery_node);
1009 } else {
1010 # no possible node found, cannot recover - but retry later, as we always try to make it available
1011 $haenv->log('err', "recovering service '$sid' from fenced node '$fenced_node' failed, no recovery node found");
1012
1013 if ($cd->{state} eq 'disabled') {
1014 # allow getting a service out of recovery manually if an admin disables it.
1015 delete $sd->{failed_nodes}; # clean up on recovery to stopped
1016 $change_service_state->($self, $sid, 'stopped'); # must NOT go through request_stop
1017 return;
1018 }
1019 }
1020 }
1021
1022 1;