]> git.proxmox.com Git - pve-ha-manager.git/blame - src/PVE/HA/Manager.pm
manager: clear stale maintenance node caused by simultaneous cluster shutdown
[pve-ha-manager.git] / src / PVE / HA / Manager.pm
CommitLineData
c0bbd038
DM
1package PVE::HA::Manager;
2
3use strict;
4use warnings;
09169180 5
c4a221bc 6use Digest::MD5 qw(md5_base64);
c0bbd038 7
c142ebc9 8use PVE::Tools;
a89ff919 9use PVE::HA::Tools ':exit_codes';
c0bbd038 10use PVE::HA::NodeStatus;
5d724d4d 11use PVE::HA::Usage::Basic;
561e7f4b 12use PVE::HA::Usage::Static;
c0bbd038 13
a3ffb0b3
TL
14## Variable Name & Abbreviations Convention
15#
16# The HA stack has some variables it uses frequently and thus abbreviates it such that it may be
17# confusing for new readers. Here's a short list of the most common used.
18#
19# NOTE: variables should be assumed to be read only if not otherwise stated, only use the specific
20# methods to re-compute/read/alter them.
21#
22# - $haenv -> HA environment, the main interface to the simulator/test/real world
23# - $sid -> Service ID, unique identifier for a service, `type:vmid` is common
24#
25# - $ms -> Master/Manager Status, contains runtime info from the current active manager
26# - $ns -> Node Status, hash holding online/offline status about all nodes
27#
28# - $ss -> Service Status, hash holding the current state (last LRM cmd result, failed starts
29# or migrates, maintenance fallback node, for *all* services ...
30# - $sd -> Service Data, the service status of a *single* service, iow. $ss->{$sid}
31#
32# - $sc -> Service Configuration, hash for all services including target state, group, ...
0869c306 33# - $cd -> Configuration Data, the service config of a *single* service, iow. $sc->{$sid}
a3ffb0b3
TL
34#
35# Try to avoid adding new two letter (or similar over abbreviated) names, but also don't send
36# patches for changing above, as that set is mostly sensible and should be easy to remember once
37# spending a bit time in the HA code base.
38
c0bbd038 39sub new {
8f0bb968 40 my ($this, $haenv) = @_;
c0bbd038
DM
41
42 my $class = ref($this) || $this;
43
1b813831 44 my $self = bless { haenv => $haenv, crs => {} }, $class;
8f0bb968 45
6ee64cfc 46 my $old_ms = $haenv->read_manager_status();
8f0bb968 47
6ee64cfc
TL
48 # we only copy the state part of the manager which cannot be auto generated
49
50 $self->{ns} = PVE::HA::NodeStatus->new($haenv, $old_ms->{node_status} || {});
8f0bb968 51
59fd7207 52 # fixme: use separate class PVE::HA::ServiceStatus
6ee64cfc
TL
53 $self->{ss} = $old_ms->{service_status} || {};
54
55 $self->{ms} = { master_node => $haenv->nodename() };
c0bbd038 56
26bbff0d
TL
57 # take over node request state to ensure a node in (manual) maintenance mode stays that way
58 # on change of active master.
59 $self->{ms}->{node_request} = $old_ms->{node_request} if defined($old_ms->{node_request});
60
086f7075 61 $self->update_crs_scheduler_mode(); # initial set, we update it once every loop
f74f8ffb 62
c0bbd038
DM
63 return $self;
64}
65
cb06cd42
TL
66sub update_crs_scheduler_mode {
67 my ($self) = @_;
68
69 my $haenv = $self->{haenv};
70 my $dc_cfg = $haenv->get_datacenter_settings();
71
314ef257
TL
72 $self->{crs}->{rebalance_on_request_start} = !!$dc_cfg->{crs}->{'ha-rebalance-on-start'};
73
1b813831 74 my $old_mode = $self->{crs}->{scheduler};
cb06cd42
TL
75 my $new_mode = $dc_cfg->{crs}->{ha} || 'basic';
76
77 if (!defined($old_mode)) {
78 $haenv->log('info', "using scheduler mode '$new_mode'") if $new_mode ne 'basic';
086f7075
TL
79 } elsif ($new_mode eq $old_mode) {
80 return; # nothing to do
81 } else {
82 $haenv->log('info', "switching scheduler mode from '$old_mode' to '$new_mode'");
cb06cd42
TL
83 }
84
1b813831 85 $self->{crs}->{scheduler} = $new_mode;
cb06cd42
TL
86
87 return;
88}
89
d84da043
DM
90sub cleanup {
91 my ($self) = @_;
92
93 # todo: ?
94}
95
8f0bb968 96sub flush_master_status {
c0bbd038
DM
97 my ($self) = @_;
98
59fd7207 99 my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss});
c0bbd038 100
8f0bb968 101 $ms->{node_status} = $ns->{status};
59fd7207 102 $ms->{service_status} = $ss;
d2f612cf 103 $ms->{timestamp} = $haenv->get_time();
289e4784 104
8f0bb968 105 $haenv->write_manager_status($ms);
289e4784 106}
c0bbd038 107
48a6ba2a
TL
108sub get_service_group {
109 my ($groups, $online_node_usage, $service_conf) = @_;
f7ccd1b3 110
09c5c4bf
TL
111 my $group = {};
112 # add all online nodes to default group to allow try_next when no group set
5d724d4d 113 $group->{nodes}->{$_} = 1 for $online_node_usage->list_nodes();
abc920b4 114
09c5c4bf 115 # overwrite default if service is bound to a specific group
3458a0e3
TL
116 if (my $group_id = $service_conf->{group}) {
117 $group = $groups->{ids}->{$group_id} if $groups->{ids}->{$group_id};
118 }
abc920b4 119
48a6ba2a
TL
120 return $group;
121}
122
123# groups available nodes with their priority as group index
124sub get_node_priority_groups {
125 my ($group, $online_node_usage) = @_;
126
abc920b4
DM
127 my $pri_groups = {};
128 my $group_members = {};
e0a56314 129 foreach my $entry (keys %{$group->{nodes}}) {
abc920b4
DM
130 my ($node, $pri) = ($entry, 0);
131 if ($entry =~ m/^(\S+):(\d+)$/) {
132 ($node, $pri) = ($1, $2);
133 }
5d724d4d 134 next if !$online_node_usage->contains_node($node); # offline
abc920b4
DM
135 $pri_groups->{$pri}->{$node} = 1;
136 $group_members->{$node} = $pri;
137 }
f7ccd1b3 138
abc920b4
DM
139 # add non-group members to unrestricted groups (priority -1)
140 if (!$group->{restricted}) {
141 my $pri = -1;
5d724d4d 142 for my $node ($online_node_usage->list_nodes()) {
abc920b4
DM
143 next if defined($group_members->{$node});
144 $pri_groups->{$pri}->{$node} = 1;
145 $group_members->{$node} = -1;
146 }
147 }
148
48a6ba2a
TL
149 return ($pri_groups, $group_members);
150}
151
152sub select_service_node {
2fdf40f2 153 my ($groups, $online_node_usage, $sid, $service_conf, $current_node, $try_next, $tried_nodes, $maintenance_fallback, $best_scored) = @_;
48a6ba2a
TL
154
155 my $group = get_service_group($groups, $online_node_usage, $service_conf);
156
157 my ($pri_groups, $group_members) = get_node_priority_groups($group, $online_node_usage);
158
abc920b4
DM
159 my @pri_list = sort {$b <=> $a} keys %$pri_groups;
160 return undef if !scalar(@pri_list);
09c5c4bf
TL
161
162 # stay on current node if possible (avoids random migrations)
2fdf40f2 163 if ((!$try_next && !$best_scored) && $group->{nofailback} && defined($group_members->{$current_node})) {
abc920b4
DM
164 return $current_node;
165 }
166
167 # select node from top priority node list
168
169 my $top_pri = $pri_list[0];
170
e6eeb7dc
TL
171 # try to avoid nodes where the service failed already if we want to relocate
172 if ($try_next) {
173 foreach my $node (@$tried_nodes) {
174 delete $pri_groups->{$top_pri}->{$node};
175 }
176 }
177
631ba60e
FE
178 return $maintenance_fallback
179 if defined($maintenance_fallback) && $pri_groups->{$top_pri}->{$maintenance_fallback};
180
2fdf40f2 181 return $current_node if (!$try_next && !$best_scored) && $pri_groups->{$top_pri}->{$current_node};
c724ce1b 182
5d724d4d 183 my $scores = $online_node_usage->score_nodes_to_start_service($sid, $current_node);
289e4784 184 my @nodes = sort {
5d724d4d 185 $scores->{$a} <=> $scores->{$b} || $a cmp $b
c142ebc9 186 } keys %{$pri_groups->{$top_pri}};
abc920b4
DM
187
188 my $found;
189 for (my $i = scalar(@nodes) - 1; $i >= 0; $i--) {
190 my $node = $nodes[$i];
191 if ($node eq $current_node) {
192 $found = $i;
abc920b4
DM
193 }
194 }
195
abc920b4 196 if ($try_next) {
2fdf40f2 197 if (!$best_scored && defined($found) && ($found < (scalar(@nodes) - 1))) {
abc920b4
DM
198 return $nodes[$found + 1];
199 } else {
200 return $nodes[0];
201 }
abc920b4 202 } else {
abc920b4 203 return $nodes[0];
abc920b4 204 }
f7ccd1b3
DM
205}
206
c4a221bc
DM
207my $uid_counter = 0;
208
d55aa611
DM
209sub compute_new_uuid {
210 my ($state) = @_;
289e4784 211
d55aa611
DM
212 $uid_counter++;
213 return md5_base64($state . $$ . time() . $uid_counter);
214}
215
618fbeda
DM
216my $valid_service_states = {
217 stopped => 1,
218 request_stop => 1,
4931b586 219 request_start => 1,
314ef257 220 request_start_balance => 1,
618fbeda
DM
221 started => 1,
222 fence => 1,
c259b1a8 223 recovery => 1,
618fbeda 224 migrate => 1,
b0fdf86a 225 relocate => 1,
9c7d068b 226 freeze => 1,
618fbeda
DM
227 error => 1,
228};
229
561e7f4b
FE
230# FIXME with 'static' mode and thousands of services, the overhead can be noticable and the fact
231# that this function is called for each state change and upon recovery doesn't help.
270d4406
DM
232sub recompute_online_node_usage {
233 my ($self) = @_;
234
561e7f4b 235 my $haenv = $self->{haenv};
270d4406
DM
236
237 my $online_nodes = $self->{ns}->list_online_nodes();
238
561e7f4b
FE
239 my $online_node_usage;
240
1b813831 241 if (my $mode = $self->{crs}->{scheduler}) {
561e7f4b
FE
242 if ($mode eq 'static') {
243 $online_node_usage = eval {
244 my $scheduler = PVE::HA::Usage::Static->new($haenv);
245 $scheduler->add_node($_) for $online_nodes->@*;
246 return $scheduler;
247 };
f2c72982 248 $haenv->log('warning', "fallback to 'basic' scheduler mode, init for 'static' failed - $@")
561e7f4b 249 if $@;
c2d8b56a
TL
250 } elsif ($mode eq 'basic') {
251 # handled below in the general fall-back case
252 } else {
561e7f4b
FE
253 $haenv->log('warning', "got unknown scheduler mode '$mode', using 'basic'");
254 }
255 }
256
c2d8b56a 257 # fallback to the basic algorithm in any case
561e7f4b
FE
258 if (!$online_node_usage) {
259 $online_node_usage = PVE::HA::Usage::Basic->new($haenv);
260 $online_node_usage->add_node($_) for $online_nodes->@*;
261 }
270d4406 262
7fd7af67 263 foreach my $sid (sort keys %{$self->{ss}}) {
270d4406
DM
264 my $sd = $self->{ss}->{$sid};
265 my $state = $sd->{state};
6f818da1 266 my $target = $sd->{target}; # optional
5d724d4d 267 if ($online_node_usage->contains_node($sd->{node})) {
c259b1a8 268 if (
4931b586
TL
269 $state eq 'started' || $state eq 'request_stop' || $state eq 'fence'
270 || $state eq 'freeze' || $state eq 'error' || $state eq 'recovery'
c259b1a8 271 ) {
5d724d4d 272 $online_node_usage->add_service_usage_to_node($sd->{node}, $sid, $sd->{node});
314ef257 273 } elsif ($state eq 'migrate' || $state eq 'relocate' || $state eq 'request_start_balance') {
5d724d4d 274 my $source = $sd->{node};
5c2eef4b 275 # count it for both, source and target as load is put on both
314ef257
TL
276 $online_node_usage->add_service_usage_to_node($source, $sid, $source, $target)
277 if $state ne 'request_start_balance';
5d724d4d 278 $online_node_usage->add_service_usage_to_node($target, $sid, $source, $target);
4931b586 279 } elsif ($state eq 'stopped' || $state eq 'request_start') {
270d4406
DM
280 # do nothing
281 } else {
feea3913 282 die "should not be reached (sid = '$sid', state = '$state')";
270d4406 283 }
5d724d4d 284 } elsif (defined($target) && $online_node_usage->contains_node($target)) {
066fd016
TL
285 if ($state eq 'migrate' || $state eq 'relocate') {
286 # to correctly track maintenance modi and also consider the target as used for the
287 # case a node dies, as we cannot really know if the to-be-aborted incoming migration
288 # has already cleaned up all used resources
5d724d4d 289 $online_node_usage->add_service_usage_to_node($target, $sid, $sd->{node}, $target);
066fd016 290 }
270d4406
DM
291 }
292 }
293
294 $self->{online_node_usage} = $online_node_usage;
295}
296
4e01bc86
DM
297my $change_service_state = sub {
298 my ($self, $sid, $new_state, %params) = @_;
299
300 my ($haenv, $ss) = ($self->{haenv}, $self->{ss});
301
302 my $sd = $ss->{$sid} || die "no such service '$sid";
303
304 my $old_state = $sd->{state};
e4ffb299 305 my $old_node = $sd->{node};
46139211 306 my $old_failed_nodes = $sd->{failed_nodes};
2167dd1e 307 my $old_maintenance_node = $sd->{maintenance_node};
4e01bc86
DM
308
309 die "no state change" if $old_state eq $new_state; # just to be sure
310
618fbeda
DM
311 die "invalid CRM service state '$new_state'\n" if !$valid_service_states->{$new_state};
312
e4ffb299
DM
313 foreach my $k (keys %$sd) { delete $sd->{$k}; };
314
315 $sd->{state} = $new_state;
316 $sd->{node} = $old_node;
ea998b07 317 $sd->{failed_nodes} = $old_failed_nodes if defined($old_failed_nodes);
2167dd1e 318 $sd->{maintenance_node} = $old_maintenance_node if defined($old_maintenance_node);
e4ffb299
DM
319
320 my $text_state = '';
ba623362 321 foreach my $k (sort keys %params) {
4e01bc86 322 my $v = $params{$k};
e4ffb299
DM
323 $text_state .= ", " if $text_state;
324 $text_state .= "$k = $v";
4e01bc86
DM
325 $sd->{$k} = $v;
326 }
270d4406
DM
327
328 $self->recompute_online_node_usage();
329
d55aa611 330 $sd->{uid} = compute_new_uuid($new_state);
4e01bc86 331
24678a59 332 $text_state = " ($text_state)" if $text_state;
09169180 333 $haenv->log('info', "service '$sid': state changed from '${old_state}' to '${new_state}'$text_state");
4e01bc86
DM
334};
335
5dd3ed86
TL
336# clean up a possible bad state from a recovered service to allow its start
337my $fence_recovery_cleanup = sub {
338 my ($self, $sid, $fenced_node) = @_;
339
340 my $haenv = $self->{haenv};
341
0087839a 342 my (undef, $type, $id) = $haenv->parse_sid($sid);
5dd3ed86
TL
343 my $plugin = PVE::HA::Resources->lookup($type);
344
345 # should not happen
346 die "unknown resource type '$type'" if !$plugin;
347
32ea51dd
TL
348 # locks may block recovery, cleanup those which are safe to remove after fencing,
349 # i.e., after the original node was reset and thus all it's state
3458a0e3
TL
350 my $removable_locks = [
351 'backup',
352 'mounted',
353 'migrate',
354 'clone',
355 'rollback',
356 'snapshot',
357 'snapshot-delete',
358 'suspending',
359 'suspended',
360 ];
5dd3ed86
TL
361 if (my $removed_lock = $plugin->remove_locks($haenv, $id, $removable_locks, $fenced_node)) {
362 $haenv->log('warning', "removed leftover lock '$removed_lock' from recovered " .
363 "service '$sid' to allow its start.");
364 }
365};
366
289e4784 367# read LRM status for all nodes
c4a221bc 368sub read_lrm_status {
332170bd 369 my ($self) = @_;
c4a221bc 370
9c7d068b 371 my $nodes = $self->{ns}->list_nodes();
c4a221bc
DM
372 my $haenv = $self->{haenv};
373
9c7d068b
DM
374 my $results = {};
375 my $modes = {};
332170bd 376 foreach my $node (@$nodes) {
9c7d068b 377 my $lrm_status = $haenv->read_lrm_status($node);
02ffd753 378 $modes->{$node} = $lrm_status->{mode} || 'active';
9c7d068b
DM
379 foreach my $uid (keys %{$lrm_status->{results}}) {
380 next if $results->{$uid}; # should not happen
381 $results->{$uid} = $lrm_status->{results}->{$uid};
c4a221bc
DM
382 }
383 }
384
9c7d068b 385 return ($results, $modes);
c4a221bc
DM
386}
387
aa98a844
DM
388# read new crm commands and save them into crm master status
389sub update_crm_commands {
390 my ($self) = @_;
391
392 my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss});
393
394 my $cmdlist = $haenv->read_crm_commands();
bf7febe3 395
aa98a844
DM
396 foreach my $cmd (split(/\n/, $cmdlist)) {
397 chomp $cmd;
398
b0fdf86a 399 if ($cmd =~ m/^(migrate|relocate)\s+(\S+)\s+(\S+)$/) {
289e4784 400 my ($task, $sid, $node) = ($1, $2, $3);
aa98a844
DM
401 if (my $sd = $ss->{$sid}) {
402 if (!$ns->node_is_online($node)) {
403 $haenv->log('err', "crm command error - node not online: $cmd");
404 } else {
405 if ($node eq $sd->{node}) {
406 $haenv->log('info', "ignore crm command - service already on target node: $cmd");
289e4784 407 } else {
aa98a844 408 $haenv->log('info', "got crm command: $cmd");
3d42b01b 409 $ss->{$sid}->{cmd} = [ $task, $node ];
aa98a844
DM
410 }
411 }
412 } else {
413 $haenv->log('err', "crm command error - no such service: $cmd");
414 }
415
21caf0db
FE
416 } elsif ($cmd =~ m/^stop\s+(\S+)\s+(\S+)$/) {
417 my ($sid, $timeout) = ($1, $2);
418 if (my $sd = $ss->{$sid}) {
419 $haenv->log('info', "got crm command: $cmd");
420 $ss->{$sid}->{cmd} = [ 'stop', $timeout ];
421 } else {
422 $haenv->log('err', "crm command error - no such service: $cmd");
423 }
989c4c49
TL
424 } elsif ($cmd =~ m/^enable-node-maintenance\s+(\S+)$/) {
425 my $node = $1;
426
427 my $state = $ns->get_node_state($node);
428 if ($state eq 'online') {
429 $ms->{node_request}->{$node}->{maintenance} = 1;
430 } elsif ($state eq 'maintenance') {
431 $haenv->log('info', "ignoring crm command - node $node is already in maintenance state");
432 } else {
433 $haenv->log('err', "crm command error - node not online: $cmd");
434 }
435 } elsif ($cmd =~ m/^disable-node-maintenance\s+(\S+)$/) {
436 my $node = $1;
437
438 my $state = $ns->get_node_state($node);
439 if ($state ne 'maintenance') {
440 $haenv->log(
441 'warn', "clearing maintenance of node $node requested, but it's in state $state");
442 }
443 delete $ms->{node_request}->{$node}->{maintenance}; # gets flushed out at the end of the CRM loop
aa98a844
DM
444 } else {
445 $haenv->log('err', "unable to parse crm command: $cmd");
446 }
447 }
448
449}
450
8f0bb968
DM
451sub manage {
452 my ($self) = @_;
c0bbd038 453
59fd7207 454 my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss});
c0bbd038 455
99278e06
TL
456 my ($node_info) = $haenv->get_node_info();
457 my ($lrm_results, $lrm_modes) = $self->read_lrm_status();
458
459 $ns->update($node_info, $lrm_modes);
c79442f2 460
99278e06 461 if (!$ns->node_is_operational($haenv->nodename())) {
e5986717 462 $haenv->log('info', "master seems offline");
c79442f2
DM
463 return;
464 }
465
086f7075
TL
466 $self->update_crs_scheduler_mode();
467
f7ccd1b3
DM
468 my $sc = $haenv->read_service_config();
469
abc920b4
DM
470 $self->{groups} = $haenv->read_group_config(); # update
471
f7ccd1b3
DM
472 # compute new service status
473
474 # add new service
cc32a8f3 475 foreach my $sid (sort keys %$sc) {
f7ccd1b3 476 next if $ss->{$sid}; # already there
77499288 477 my $cd = $sc->{$sid};
667670b2
TL
478 next if $cd->{state} eq 'ignored';
479
77499288 480 $haenv->log('info', "adding new service '$sid' on node '$cd->{node}'");
f7ccd1b3 481 # assume we are running to avoid relocate running service at add
c2f2b9c6 482 my $state = ($cd->{state} eq 'started') ? 'request_start' : 'request_stop';
83a84eb0
TL
483 $ss->{$sid} = {
484 state => $state, node => $cd->{node}, uid => compute_new_uuid('started'),
485 };
f7ccd1b3
DM
486 }
487
667670b2 488 # remove stale or ignored services from manager state
4e5764af 489 foreach my $sid (keys %$ss) {
667670b2
TL
490 next if $sc->{$sid} && $sc->{$sid}->{state} ne 'ignored';
491
492 my $reason = defined($sc->{$sid}) ? 'ignored state requested' : 'no config';
493 $haenv->log('info', "removing stale service '$sid' ($reason)");
494
46139211 495 # remove all service related state information
4e5764af
DM
496 delete $ss->{$sid};
497 }
5a28da91 498
aa98a844
DM
499 $self->update_crm_commands();
500
c79442f2
DM
501 for (;;) {
502 my $repeat = 0;
289e4784 503
270d4406 504 $self->recompute_online_node_usage();
f7ccd1b3 505
a5e4bef4 506 foreach my $sid (sort keys %$ss) {
c79442f2
DM
507 my $sd = $ss->{$sid};
508 my $cd = $sc->{$sid} || { state => 'disabled' };
f7ccd1b3 509
9c7d068b 510 my $lrm_res = $sd->{uid} ? $lrm_results->{$sd->{uid}} : undef;
a875fbe8 511
c79442f2
DM
512 my $last_state = $sd->{state};
513
514 if ($last_state eq 'stopped') {
515
abc920b4 516 $self->next_state_stopped($sid, $cd, $sd, $lrm_res);
f7ccd1b3 517
c79442f2 518 } elsif ($last_state eq 'started') {
f7ccd1b3 519
abc920b4 520 $self->next_state_started($sid, $cd, $sd, $lrm_res);
f7ccd1b3 521
4931b586
TL
522 } elsif ($last_state eq 'request_start') {
523
524 $self->next_state_request_start($sid, $cd, $sd, $lrm_res);
525
314ef257 526 } elsif ($last_state eq 'migrate' || $last_state eq 'relocate' || $last_state eq 'request_start_balance') {
f7ccd1b3 527
8aaa0e36 528 $self->next_state_migrate_relocate($sid, $cd, $sd, $lrm_res);
f7ccd1b3 529
c79442f2 530 } elsif ($last_state eq 'fence') {
f7ccd1b3 531
21e37ed4 532 # do nothing here - wait until fenced
f7ccd1b3 533
c259b1a8
TL
534 } elsif ($last_state eq 'recovery') {
535
536 $self->next_state_recovery($sid, $cd, $sd, $lrm_res);
537
c79442f2 538 } elsif ($last_state eq 'request_stop') {
f7ccd1b3 539
0df5b3dd 540 $self->next_state_request_stop($sid, $cd, $sd, $lrm_res);
618fbeda 541
9c7d068b
DM
542 } elsif ($last_state eq 'freeze') {
543
544 my $lrm_mode = $sd->{node} ? $lrm_modes->{$sd->{node}} : undef;
83a84eb0
TL
545 if ($lrm_mode && $lrm_mode eq 'active') { # unfreeze if active again
546 my $state = ($cd->{state} eq 'started') ? 'started' : 'request_stop';
547 $change_service_state->($self, $sid, $state);
548 }
9c7d068b 549
e88469ba
DM
550 } elsif ($last_state eq 'error') {
551
a2881965 552 $self->next_state_error($sid, $cd, $sd, $lrm_res);
e88469ba 553
a875fbe8
DM
554 } else {
555
556 die "unknown service state '$last_state'";
618fbeda 557 }
21e37ed4 558
9c7d068b 559 my $lrm_mode = $sd->{node} ? $lrm_modes->{$sd->{node}} : undef;
07adc6a6 560 if ($lrm_mode && $lrm_mode eq 'restart') {
83a84eb0
TL
561 my $state = $sd->{state};
562 if ($state eq 'started' || $state eq 'stopped'|| $state eq 'request_stop') {
563 $change_service_state->($self, $sid, 'freeze');
07adc6a6 564 }
9c7d068b 565 }
07adc6a6 566
c79442f2 567 $repeat = 1 if $sd->{state} ne $last_state;
f7ccd1b3
DM
568 }
569
21e37ed4
DM
570 # handle fencing
571 my $fenced_nodes = {};
9b2dbc2a 572 foreach my $sid (sort keys %$ss) {
2deff1ae
TL
573 my ($service_state, $service_node) = $ss->{$sid}->@{'state', 'node'};
574 next if $service_state ne 'fence';
0dcb6597
TL
575
576 if (!defined($fenced_nodes->{$service_node})) {
2deff1ae 577 $fenced_nodes->{$service_node} = $ns->fence_node($service_node) || 0;
21e37ed4
DM
578 }
579
0dcb6597 580 next if !$fenced_nodes->{$service_node};
21e37ed4 581
9da84a0d 582 # node fence was successful - recover service
c259b1a8 583 $change_service_state->($self, $sid, 'recovery');
0dcb6597 584 $repeat = 1; # for faster recovery execution
21e37ed4
DM
585 }
586
2deff1ae
TL
587 # Avoid that a node without services in 'fence' state (e.g., removed
588 # manually by admin) is stuck with the 'fence' node state.
589 for my $node (sort grep { !defined($fenced_nodes->{$_}) } keys $ns->{status}->%*) {
7dc92703 590 next if $ns->get_node_state($node) ne 'fence';
7dc92703 591
2deff1ae
TL
592 $haenv->log('notice', "node '$node' in fence state but no services to-fence! admin interference?!");
593 $repeat = 1 if $ns->fence_node($node);
7dc92703
FE
594 }
595
c79442f2 596 last if !$repeat;
f7ccd1b3 597 }
f7ccd1b3 598
8f0bb968 599 $self->flush_master_status();
c0bbd038
DM
600}
601
a875fbe8
DM
602# functions to compute next service states
603# $cd: service configuration data (read only)
604# $sd: service status data (read only)
605#
606# Note: use change_service_state() to alter state
607#
608
0df5b3dd
DM
609sub next_state_request_stop {
610 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
611
612 my $haenv = $self->{haenv};
613 my $ns = $self->{ns};
614
615 # check result from LRM daemon
616 if ($lrm_res) {
617 my $exit_code = $lrm_res->{exit_code};
a89ff919 618 if ($exit_code == SUCCESS) {
0df5b3dd
DM
619 &$change_service_state($self, $sid, 'stopped');
620 return;
621 } else {
33f01524 622 $haenv->log('err', "service '$sid' stop failed (exit code $exit_code)");
0df5b3dd
DM
623 &$change_service_state($self, $sid, 'error'); # fixme: what state?
624 return;
625 }
626 }
627
ce3d7003 628 if ($ns->node_is_offline_delayed($sd->{node})) {
0df5b3dd
DM
629 &$change_service_state($self, $sid, 'fence');
630 return;
631 }
632}
633
8aaa0e36
DM
634sub next_state_migrate_relocate {
635 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
636
637 my $haenv = $self->{haenv};
638 my $ns = $self->{ns};
639
640 # check result from LRM daemon
641 if ($lrm_res) {
642 my $exit_code = $lrm_res->{exit_code};
bb07bd2c 643 my $req_state = $cd->{state} eq 'started' ? 'started' : 'request_stop';
a89ff919 644 if ($exit_code == SUCCESS) {
542a9902 645 &$change_service_state($self, $sid, $req_state, node => $sd->{target});
8aaa0e36 646 return;
660596ce
TL
647 } elsif ($exit_code == EWRONG_NODE) {
648 $haenv->log('err', "service '$sid' - migration failed: service" .
649 " registered on wrong node!");
650 &$change_service_state($self, $sid, 'error');
5a9c3a28
FE
651 } elsif ($exit_code == IGNORED) {
652 $haenv->log(
653 "info",
654 "service '$sid' - rebalance-on-start request ignored - service already running",
655 );
656 $change_service_state->($self, $sid, $req_state, node => $sd->{node});
8aaa0e36
DM
657 } else {
658 $haenv->log('err', "service '$sid' - migration failed (exit code $exit_code)");
542a9902 659 &$change_service_state($self, $sid, $req_state, node => $sd->{node});
8aaa0e36
DM
660 return;
661 }
662 }
663
ce3d7003 664 if ($ns->node_is_offline_delayed($sd->{node})) {
8aaa0e36
DM
665 &$change_service_state($self, $sid, 'fence');
666 return;
667 }
668}
669
a875fbe8 670sub next_state_stopped {
abc920b4 671 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
a875fbe8
DM
672
673 my $haenv = $self->{haenv};
e88469ba 674 my $ns = $self->{ns};
a875fbe8 675
ff6f1c5c
DM
676 if ($sd->{node} ne $cd->{node}) {
677 # this can happen if we fence a node with active migrations
678 # hack: modify $sd (normally this should be considered read-only)
24678a59 679 $haenv->log('info', "fixup service '$sid' location ($sd->{node} => $cd->{node})");
289e4784 680 $sd->{node} = $cd->{node};
ff6f1c5c
DM
681 }
682
94b7ebe2 683 if ($sd->{cmd}) {
21caf0db 684 my $cmd = shift @{$sd->{cmd}};
94b7ebe2 685
b0fdf86a 686 if ($cmd eq 'migrate' || $cmd eq 'relocate') {
21caf0db 687 my $target = shift @{$sd->{cmd}};
94b7ebe2 688 if (!$ns->node_is_online($target)) {
b0fdf86a 689 $haenv->log('err', "ignore service '$sid' $cmd request - node '$target' not online");
e88469ba 690 } elsif ($sd->{node} eq $target) {
b0fdf86a 691 $haenv->log('info', "ignore service '$sid' $cmd request - service already on node '$target'");
94b7ebe2 692 } else {
4931b586 693 &$change_service_state($self, $sid, $cmd, node => $sd->{node}, target => $target);
9da84a0d 694 return;
94b7ebe2 695 }
21caf0db
FE
696 } elsif ($cmd eq 'stop') {
697 $haenv->log('info', "ignore service '$sid' $cmd request - service already stopped");
94b7ebe2 698 } else {
289e4784 699 $haenv->log('err', "unknown command '$cmd' for service '$sid'");
94b7ebe2 700 }
21caf0db 701 delete $sd->{cmd};
35cbb764 702 }
94b7ebe2 703
a875fbe8 704 if ($cd->{state} eq 'disabled') {
35cbb764
TL
705 # NOTE: do nothing here, the stop state is an exception as we do not
706 # process the LRM result here, thus the LRM always tries to stop the
707 # service (protection for the case no CRM is active)
e88469ba 708 return;
35cbb764 709 }
e88469ba 710
84c945e4 711 if ($ns->node_is_offline_delayed($sd->{node}) && $ns->get_node_state($sd->{node}) ne 'maintenance') {
af14d5f3
TL
712 &$change_service_state($self, $sid, 'fence');
713 return;
714 }
715
716 if ($cd->{state} eq 'stopped') {
717 # almost the same as 'disabled' state but the service will also get recovered
718 return;
719 }
720
bb07bd2c 721 if ($cd->{state} eq 'started') {
4931b586
TL
722 # simply mark it started, if it's on the wrong node next_state_started will fix that for us
723 $change_service_state->($self, $sid, 'request_start', node => $sd->{node});
e88469ba 724 return;
a875fbe8 725 }
e88469ba
DM
726
727 $haenv->log('err', "service '$sid' - unknown state '$cd->{state}' in service configuration");
a875fbe8
DM
728}
729
4931b586
TL
730sub next_state_request_start {
731 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
732
314ef257
TL
733 my $haenv = $self->{haenv};
734 my $current_node = $sd->{node};
735
736 if ($self->{crs}->{rebalance_on_request_start}) {
737 my $selected_node = select_service_node(
738 $self->{groups},
739 $self->{online_node_usage},
740 $sid,
741 $cd,
742 $sd->{node},
743 0, # try_next
744 $sd->{failed_nodes},
745 $sd->{maintenance_node},
746 1, # best_score
747 );
748 my $select_text = $selected_node ne $current_node ? 'new' : 'current';
749 $haenv->log('info', "service $sid: re-balance selected $select_text node $selected_node for startup");
750
751 if ($selected_node ne $current_node) {
752 $change_service_state->($self, $sid, 'request_start_balance', node => $current_node, target => $selected_node);
753 return;
754 }
755 }
756
757 $change_service_state->($self, $sid, 'started', node => $current_node);
4931b586
TL
758}
759
46139211 760sub record_service_failed_on_node {
57fe8e87 761 my ($self, $sid, $node) = @_;
46139211 762
57fe8e87
DM
763 if (!defined($self->{ss}->{$sid}->{failed_nodes})) {
764 $self->{ss}->{$sid}->{failed_nodes} = [];
765 }
46139211 766
57fe8e87 767 push @{$self->{ss}->{$sid}->{failed_nodes}}, $node;
46139211
TL
768}
769
a875fbe8 770sub next_state_started {
abc920b4 771 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
a875fbe8
DM
772
773 my $haenv = $self->{haenv};
ea4443cc 774 my $master_status = $self->{ms};
a875fbe8
DM
775 my $ns = $self->{ns};
776
777 if (!$ns->node_is_online($sd->{node})) {
b0e9158d 778 if ($ns->node_is_offline_delayed($sd->{node})) {
5385a606
DM
779 &$change_service_state($self, $sid, 'fence');
780 }
99278e06
TL
781 if ($ns->get_node_state($sd->{node}) ne 'maintenance') {
782 return;
2167dd1e 783 } else {
09169180 784 # save current node as fallback for when it comes out of maintenance
2167dd1e 785 $sd->{maintenance_node} = $sd->{node};
99278e06 786 }
e88469ba 787 }
289e4784 788
af14d5f3 789 if ($cd->{state} eq 'disabled' || $cd->{state} eq 'stopped') {
e88469ba
DM
790 &$change_service_state($self, $sid, 'request_stop');
791 return;
792 }
793
bb07bd2c 794 if ($cd->{state} eq 'started') {
e88469ba
DM
795
796 if ($sd->{cmd}) {
21caf0db 797 my $cmd = shift @{$sd->{cmd}};
e88469ba 798
b0fdf86a 799 if ($cmd eq 'migrate' || $cmd eq 'relocate') {
21caf0db 800 my $target = shift @{$sd->{cmd}};
e88469ba 801 if (!$ns->node_is_online($target)) {
b0fdf86a 802 $haenv->log('err', "ignore service '$sid' $cmd request - node '$target' not online");
e88469ba 803 } elsif ($sd->{node} eq $target) {
b0fdf86a 804 $haenv->log('info', "ignore service '$sid' $cmd request - service already on node '$target'");
e88469ba 805 } else {
a3cb8dcb 806 $haenv->log('info', "$cmd service '$sid' to node '$target'");
b0fdf86a 807 &$change_service_state($self, $sid, $cmd, node => $sd->{node}, target => $target);
e88469ba 808 }
21caf0db
FE
809 } elsif ($cmd eq 'stop') {
810 my $timeout = shift @{$sd->{cmd}};
396eb6f0
TL
811 if ($timeout == 0) {
812 $haenv->log('info', "request immediate service hard-stop for service '$sid'");
813 } else {
814 $haenv->log('info', "request graceful stop with timeout '$timeout' for service '$sid'");
815 }
21caf0db
FE
816 &$change_service_state($self, $sid, 'request_stop', timeout => $timeout);
817 $haenv->update_service_config($sid, {'state' => 'stopped'});
a875fbe8 818 } else {
289e4784 819 $haenv->log('err', "unknown command '$cmd' for service '$sid'");
a875fbe8 820 }
21caf0db
FE
821
822 delete $sd->{cmd};
823
a875fbe8 824 } else {
b0fdf86a 825
abc920b4 826 my $try_next = 0;
46139211 827
ea4443cc 828 if ($lrm_res) {
46139211 829
e9e1cd68
TL
830 my $ec = $lrm_res->{exit_code};
831 if ($ec == SUCCESS) {
832
46139211 833 if (defined($sd->{failed_nodes})) {
81449997 834 $haenv->log('info', "relocation policy successful for '$sid' on node '$sd->{node}'," .
46139211
TL
835 " failed nodes: " . join(', ', @{$sd->{failed_nodes}}) );
836 }
837
838 delete $sd->{failed_nodes};
e9e1cd68 839
b47920fd
DM
840 # store flag to indicate successful start - only valid while state == 'started'
841 $sd->{running} = 1;
842
b159176a 843 } elsif ($ec == ERROR || $ec == EWRONG_NODE) {
b47920fd
DM
844
845 delete $sd->{running};
846
e9e1cd68 847 # apply our relocate policy if we got ERROR from the LRM
46139211 848 $self->record_service_failed_on_node($sid, $sd->{node});
ea4443cc 849
46139211 850 if (scalar(@{$sd->{failed_nodes}}) <= $cd->{max_relocate}) {
ea4443cc 851
e9e1cd68
TL
852 # tell select_service_node to relocate if possible
853 $try_next = 1;
ea4443cc
TL
854
855 $haenv->log('warning', "starting service $sid on node".
856 " '$sd->{node}' failed, relocating service.");
ea4443cc
TL
857
858 } else {
859
46139211
TL
860 $haenv->log('err', "recovery policy for service $sid " .
861 "failed, entering error state. Failed nodes: ".
862 join(', ', @{$sd->{failed_nodes}}));
ea4443cc
TL
863 &$change_service_state($self, $sid, 'error');
864 return;
865
866 }
e9e1cd68 867 } else {
46139211
TL
868 $self->record_service_failed_on_node($sid, $sd->{node});
869
09169180 870 $haenv->log('err', "service '$sid' got unrecoverable error (exit code $ec))");
e9e1cd68
TL
871 # we have no save way out (yet) for other errors
872 &$change_service_state($self, $sid, 'error');
35cbb764 873 return;
ea4443cc 874 }
abc920b4
DM
875 }
876
2167dd1e
TL
877 my $node = select_service_node(
878 $self->{groups},
879 $self->{online_node_usage},
b2598576 880 $sid,
2167dd1e
TL
881 $cd,
882 $sd->{node},
883 $try_next,
884 $sd->{failed_nodes},
885 $sd->{maintenance_node},
886 );
abc920b4 887
b0fdf86a 888 if ($node && ($sd->{node} ne $node)) {
5d724d4d 889 $self->{online_node_usage}->add_service_usage_to_node($node, $sid, $sd->{node});
2167dd1e
TL
890
891 if (defined(my $fallback = $sd->{maintenance_node})) {
892 if ($node eq $fallback) {
09169180
TL
893 $haenv->log(
894 'info',
895 "moving service '$sid' back to '$fallback', node came back from maintenance.",
896 );
2167dd1e
TL
897 delete $sd->{maintenance_node};
898 } elsif ($sd->{node} ne $fallback) {
899 $haenv->log('info', "dropping maintenance fallback node '$fallback' for '$sid'");
900 delete $sd->{maintenance_node};
901 }
902 }
903
c0255b2c
TL
904 if ($cd->{type} eq 'vm') {
905 $haenv->log('info', "migrate service '$sid' to node '$node' (running)");
906 &$change_service_state($self, $sid, 'migrate', node => $sd->{node}, target => $node);
907 } else {
908 $haenv->log('info', "relocate service '$sid' to node '$node'");
909 &$change_service_state($self, $sid, 'relocate', node => $sd->{node}, target => $node);
910 }
b0fdf86a 911 } else {
e6eeb7dc 912 if ($try_next && !defined($node)) {
09169180
TL
913 $haenv->log(
914 'warning',
915 "Start Error Recovery: Tried all available nodes for service '$sid', retry"
916 ." start on current node. Tried nodes: " . join(', ', @{$sd->{failed_nodes}},
917 )
918 );
e6eeb7dc 919 }
17c6cbea
FE
920
921 if ($sd->{maintenance_node} && $sd->{node} eq $sd->{maintenance_node}) {
922 my $node_state = $ns->get_node_state($sd->{node});
923 if ($node_state eq 'online') {
924 # Having the maintenance node set here means that the service was never
925 # started on a different node since it was set. This can happen in the edge
926 # case that the whole cluster is shut down at the same time while the
927 # 'migrate' policy was configured. Node is not in maintenance mode anymore
928 # and service is started on this node, so it's fine to clear the setting.
929 $haenv->log(
930 'info',
931 "service '$sid': clearing stale maintenance node "
932 ."'$sd->{maintenance_node}' setting (is current node)",
933 );
934 delete $sd->{maintenance_node};
935 }
936 }
937
35cbb764 938 # ensure service get started again if it went unexpected down
bf2d8d74
TL
939 # but ensure also no LRM result gets lost
940 $sd->{uid} = compute_new_uuid($sd->{state}) if defined($lrm_res);
b0fdf86a 941 }
a875fbe8 942 }
e88469ba
DM
943
944 return;
35cbb764 945 }
e88469ba
DM
946
947 $haenv->log('err', "service '$sid' - unknown state '$cd->{state}' in service configuration");
a875fbe8 948}
c0bbd038 949
a2881965
TL
950sub next_state_error {
951 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
952
953 my $ns = $self->{ns};
46139211 954 my $ms = $self->{ms};
a2881965
TL
955
956 if ($cd->{state} eq 'disabled') {
46139211
TL
957 # clean up on error recovery
958 delete $sd->{failed_nodes};
959
a2881965
TL
960 &$change_service_state($self, $sid, 'stopped');
961 return;
962 }
963
a2881965
TL
964}
965
c259b1a8
TL
966# after a node was fenced this recovers the service to a new node
967sub next_state_recovery {
968 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
969
970 my ($haenv, $ss) = ($self->{haenv}, $self->{ss});
971 my $ns = $self->{ns};
972 my $ms = $self->{ms};
973
974 if ($sd->{state} ne 'recovery') { # should not happen
975 $haenv->log('err', "cannot recover service '$sid' from fencing, wrong state '$sd->{state}'");
976 return;
977 }
978
979 my $fenced_node = $sd->{node}; # for logging purpose
980
981 $self->recompute_online_node_usage(); # we want the most current node state
982
983 my $recovery_node = select_service_node(
984 $self->{groups},
985 $self->{online_node_usage},
b2598576 986 $sid,
c259b1a8
TL
987 $cd,
988 $sd->{node},
989 );
990
991 if ($recovery_node) {
90a24755
TL
992 my $msg = "recover service '$sid' from fenced node '$fenced_node' to node '$recovery_node'";
993 if ($recovery_node eq $fenced_node) {
994 # can happen if restriced groups and the node came up again OK
995 $msg = "recover service '$sid' to previous failed and fenced node '$fenced_node' again";
996 }
997 $haenv->log('info', "$msg");
c259b1a8
TL
998
999 $fence_recovery_cleanup->($self, $sid, $fenced_node);
1000
1001 $haenv->steal_service($sid, $sd->{node}, $recovery_node);
5d724d4d 1002 $self->{online_node_usage}->add_service_usage_to_node($recovery_node, $sid, $recovery_node);
c259b1a8
TL
1003
1004 # NOTE: $sd *is normally read-only*, fencing is the exception
1005 $cd->{node} = $sd->{node} = $recovery_node;
1006 my $new_state = ($cd->{state} eq 'started') ? 'started' : 'request_stop';
1007 $change_service_state->($self, $sid, $new_state, node => $recovery_node);
1008 } else {
1009 # no possible node found, cannot recover - but retry later, as we always try to make it available
1010 $haenv->log('err', "recovering service '$sid' from fenced node '$fenced_node' failed, no recovery node found");
719883e9
TL
1011
1012 if ($cd->{state} eq 'disabled') {
1013 # allow getting a service out of recovery manually if an admin disables it.
1014 delete $sd->{failed_nodes}; # clean up on recovery to stopped
1015 $change_service_state->($self, $sid, 'stopped'); # must NOT go through request_stop
1016 return;
1017 }
c259b1a8
TL
1018 }
1019}
1020
c0bbd038 10211;