]>
Commit | Line | Data |
---|---|---|
c0bbd038 DM |
1 | package PVE::HA::Manager; |
2 | ||
3 | use strict; | |
4 | use warnings; | |
09169180 | 5 | |
c4a221bc | 6 | use Digest::MD5 qw(md5_base64); |
c0bbd038 | 7 | |
c142ebc9 | 8 | use PVE::Tools; |
a89ff919 | 9 | use PVE::HA::Tools ':exit_codes'; |
c0bbd038 | 10 | use PVE::HA::NodeStatus; |
5d724d4d | 11 | use PVE::HA::Usage::Basic; |
561e7f4b | 12 | use PVE::HA::Usage::Static; |
c0bbd038 | 13 | |
a3ffb0b3 TL |
14 | ## Variable Name & Abbreviations Convention |
15 | # | |
16 | # The HA stack has some variables it uses frequently and thus abbreviates it such that it may be | |
17 | # confusing for new readers. Here's a short list of the most common used. | |
18 | # | |
19 | # NOTE: variables should be assumed to be read only if not otherwise stated, only use the specific | |
20 | # methods to re-compute/read/alter them. | |
21 | # | |
22 | # - $haenv -> HA environment, the main interface to the simulator/test/real world | |
23 | # - $sid -> Service ID, unique identifier for a service, `type:vmid` is common | |
24 | # | |
25 | # - $ms -> Master/Manager Status, contains runtime info from the current active manager | |
26 | # - $ns -> Node Status, hash holding online/offline status about all nodes | |
27 | # | |
28 | # - $ss -> Service Status, hash holding the current state (last LRM cmd result, failed starts | |
29 | # or migrates, maintenance fallback node, for *all* services ... | |
30 | # - $sd -> Service Data, the service status of a *single* service, iow. $ss->{$sid} | |
31 | # | |
32 | # - $sc -> Service Configuration, hash for all services including target state, group, ... | |
0869c306 | 33 | # - $cd -> Configuration Data, the service config of a *single* service, iow. $sc->{$sid} |
a3ffb0b3 TL |
34 | # |
35 | # Try to avoid adding new two letter (or similar over abbreviated) names, but also don't send | |
36 | # patches for changing above, as that set is mostly sensible and should be easy to remember once | |
37 | # spending a bit time in the HA code base. | |
38 | ||
c0bbd038 | 39 | sub new { |
8f0bb968 | 40 | my ($this, $haenv) = @_; |
c0bbd038 DM |
41 | |
42 | my $class = ref($this) || $this; | |
43 | ||
1b813831 | 44 | my $self = bless { haenv => $haenv, crs => {} }, $class; |
8f0bb968 | 45 | |
6ee64cfc | 46 | my $old_ms = $haenv->read_manager_status(); |
8f0bb968 | 47 | |
6ee64cfc TL |
48 | # we only copy the state part of the manager which cannot be auto generated |
49 | ||
50 | $self->{ns} = PVE::HA::NodeStatus->new($haenv, $old_ms->{node_status} || {}); | |
8f0bb968 | 51 | |
59fd7207 | 52 | # fixme: use separate class PVE::HA::ServiceStatus |
6ee64cfc TL |
53 | $self->{ss} = $old_ms->{service_status} || {}; |
54 | ||
55 | $self->{ms} = { master_node => $haenv->nodename() }; | |
c0bbd038 | 56 | |
26bbff0d TL |
57 | # take over node request state to ensure a node in (manual) maintenance mode stays that way |
58 | # on change of active master. | |
59 | $self->{ms}->{node_request} = $old_ms->{node_request} if defined($old_ms->{node_request}); | |
60 | ||
086f7075 | 61 | $self->update_crs_scheduler_mode(); # initial set, we update it once every loop |
f74f8ffb | 62 | |
c0bbd038 DM |
63 | return $self; |
64 | } | |
65 | ||
cb06cd42 TL |
66 | sub update_crs_scheduler_mode { |
67 | my ($self) = @_; | |
68 | ||
69 | my $haenv = $self->{haenv}; | |
70 | my $dc_cfg = $haenv->get_datacenter_settings(); | |
71 | ||
314ef257 TL |
72 | $self->{crs}->{rebalance_on_request_start} = !!$dc_cfg->{crs}->{'ha-rebalance-on-start'}; |
73 | ||
1b813831 | 74 | my $old_mode = $self->{crs}->{scheduler}; |
cb06cd42 TL |
75 | my $new_mode = $dc_cfg->{crs}->{ha} || 'basic'; |
76 | ||
77 | if (!defined($old_mode)) { | |
78 | $haenv->log('info', "using scheduler mode '$new_mode'") if $new_mode ne 'basic'; | |
086f7075 TL |
79 | } elsif ($new_mode eq $old_mode) { |
80 | return; # nothing to do | |
81 | } else { | |
82 | $haenv->log('info', "switching scheduler mode from '$old_mode' to '$new_mode'"); | |
cb06cd42 TL |
83 | } |
84 | ||
1b813831 | 85 | $self->{crs}->{scheduler} = $new_mode; |
cb06cd42 TL |
86 | |
87 | return; | |
88 | } | |
89 | ||
d84da043 DM |
90 | sub cleanup { |
91 | my ($self) = @_; | |
92 | ||
93 | # todo: ? | |
94 | } | |
95 | ||
8f0bb968 | 96 | sub flush_master_status { |
c0bbd038 DM |
97 | my ($self) = @_; |
98 | ||
59fd7207 | 99 | my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss}); |
c0bbd038 | 100 | |
8f0bb968 | 101 | $ms->{node_status} = $ns->{status}; |
59fd7207 | 102 | $ms->{service_status} = $ss; |
d2f612cf | 103 | $ms->{timestamp} = $haenv->get_time(); |
289e4784 | 104 | |
8f0bb968 | 105 | $haenv->write_manager_status($ms); |
289e4784 | 106 | } |
c0bbd038 | 107 | |
48a6ba2a TL |
108 | sub get_service_group { |
109 | my ($groups, $online_node_usage, $service_conf) = @_; | |
f7ccd1b3 | 110 | |
09c5c4bf TL |
111 | my $group = {}; |
112 | # add all online nodes to default group to allow try_next when no group set | |
5d724d4d | 113 | $group->{nodes}->{$_} = 1 for $online_node_usage->list_nodes(); |
abc920b4 | 114 | |
09c5c4bf | 115 | # overwrite default if service is bound to a specific group |
3458a0e3 TL |
116 | if (my $group_id = $service_conf->{group}) { |
117 | $group = $groups->{ids}->{$group_id} if $groups->{ids}->{$group_id}; | |
118 | } | |
abc920b4 | 119 | |
48a6ba2a TL |
120 | return $group; |
121 | } | |
122 | ||
123 | # groups available nodes with their priority as group index | |
124 | sub get_node_priority_groups { | |
125 | my ($group, $online_node_usage) = @_; | |
126 | ||
abc920b4 DM |
127 | my $pri_groups = {}; |
128 | my $group_members = {}; | |
e0a56314 | 129 | foreach my $entry (keys %{$group->{nodes}}) { |
abc920b4 DM |
130 | my ($node, $pri) = ($entry, 0); |
131 | if ($entry =~ m/^(\S+):(\d+)$/) { | |
132 | ($node, $pri) = ($1, $2); | |
133 | } | |
5d724d4d | 134 | next if !$online_node_usage->contains_node($node); # offline |
abc920b4 DM |
135 | $pri_groups->{$pri}->{$node} = 1; |
136 | $group_members->{$node} = $pri; | |
137 | } | |
f7ccd1b3 | 138 | |
abc920b4 DM |
139 | # add non-group members to unrestricted groups (priority -1) |
140 | if (!$group->{restricted}) { | |
141 | my $pri = -1; | |
5d724d4d | 142 | for my $node ($online_node_usage->list_nodes()) { |
abc920b4 DM |
143 | next if defined($group_members->{$node}); |
144 | $pri_groups->{$pri}->{$node} = 1; | |
145 | $group_members->{$node} = -1; | |
146 | } | |
147 | } | |
148 | ||
48a6ba2a TL |
149 | return ($pri_groups, $group_members); |
150 | } | |
151 | ||
152 | sub select_service_node { | |
2fdf40f2 | 153 | my ($groups, $online_node_usage, $sid, $service_conf, $current_node, $try_next, $tried_nodes, $maintenance_fallback, $best_scored) = @_; |
48a6ba2a TL |
154 | |
155 | my $group = get_service_group($groups, $online_node_usage, $service_conf); | |
156 | ||
157 | my ($pri_groups, $group_members) = get_node_priority_groups($group, $online_node_usage); | |
158 | ||
abc920b4 DM |
159 | my @pri_list = sort {$b <=> $a} keys %$pri_groups; |
160 | return undef if !scalar(@pri_list); | |
09c5c4bf TL |
161 | |
162 | # stay on current node if possible (avoids random migrations) | |
2fdf40f2 | 163 | if ((!$try_next && !$best_scored) && $group->{nofailback} && defined($group_members->{$current_node})) { |
abc920b4 DM |
164 | return $current_node; |
165 | } | |
166 | ||
167 | # select node from top priority node list | |
168 | ||
169 | my $top_pri = $pri_list[0]; | |
170 | ||
e6eeb7dc TL |
171 | # try to avoid nodes where the service failed already if we want to relocate |
172 | if ($try_next) { | |
173 | foreach my $node (@$tried_nodes) { | |
174 | delete $pri_groups->{$top_pri}->{$node}; | |
175 | } | |
176 | } | |
177 | ||
631ba60e FE |
178 | return $maintenance_fallback |
179 | if defined($maintenance_fallback) && $pri_groups->{$top_pri}->{$maintenance_fallback}; | |
180 | ||
2fdf40f2 | 181 | return $current_node if (!$try_next && !$best_scored) && $pri_groups->{$top_pri}->{$current_node}; |
c724ce1b | 182 | |
5d724d4d | 183 | my $scores = $online_node_usage->score_nodes_to_start_service($sid, $current_node); |
289e4784 | 184 | my @nodes = sort { |
5d724d4d | 185 | $scores->{$a} <=> $scores->{$b} || $a cmp $b |
c142ebc9 | 186 | } keys %{$pri_groups->{$top_pri}}; |
abc920b4 DM |
187 | |
188 | my $found; | |
189 | for (my $i = scalar(@nodes) - 1; $i >= 0; $i--) { | |
190 | my $node = $nodes[$i]; | |
191 | if ($node eq $current_node) { | |
192 | $found = $i; | |
abc920b4 DM |
193 | } |
194 | } | |
195 | ||
abc920b4 | 196 | if ($try_next) { |
2fdf40f2 | 197 | if (!$best_scored && defined($found) && ($found < (scalar(@nodes) - 1))) { |
abc920b4 DM |
198 | return $nodes[$found + 1]; |
199 | } else { | |
200 | return $nodes[0]; | |
201 | } | |
abc920b4 | 202 | } else { |
abc920b4 | 203 | return $nodes[0]; |
abc920b4 | 204 | } |
f7ccd1b3 DM |
205 | } |
206 | ||
c4a221bc DM |
207 | my $uid_counter = 0; |
208 | ||
d55aa611 DM |
209 | sub compute_new_uuid { |
210 | my ($state) = @_; | |
289e4784 | 211 | |
d55aa611 DM |
212 | $uid_counter++; |
213 | return md5_base64($state . $$ . time() . $uid_counter); | |
214 | } | |
215 | ||
618fbeda DM |
216 | my $valid_service_states = { |
217 | stopped => 1, | |
218 | request_stop => 1, | |
4931b586 | 219 | request_start => 1, |
314ef257 | 220 | request_start_balance => 1, |
618fbeda DM |
221 | started => 1, |
222 | fence => 1, | |
c259b1a8 | 223 | recovery => 1, |
618fbeda | 224 | migrate => 1, |
b0fdf86a | 225 | relocate => 1, |
9c7d068b | 226 | freeze => 1, |
618fbeda DM |
227 | error => 1, |
228 | }; | |
229 | ||
561e7f4b FE |
230 | # FIXME with 'static' mode and thousands of services, the overhead can be noticable and the fact |
231 | # that this function is called for each state change and upon recovery doesn't help. | |
270d4406 DM |
232 | sub recompute_online_node_usage { |
233 | my ($self) = @_; | |
234 | ||
561e7f4b | 235 | my $haenv = $self->{haenv}; |
270d4406 DM |
236 | |
237 | my $online_nodes = $self->{ns}->list_online_nodes(); | |
238 | ||
561e7f4b FE |
239 | my $online_node_usage; |
240 | ||
1b813831 | 241 | if (my $mode = $self->{crs}->{scheduler}) { |
561e7f4b FE |
242 | if ($mode eq 'static') { |
243 | $online_node_usage = eval { | |
244 | my $scheduler = PVE::HA::Usage::Static->new($haenv); | |
245 | $scheduler->add_node($_) for $online_nodes->@*; | |
246 | return $scheduler; | |
247 | }; | |
f2c72982 | 248 | $haenv->log('warning', "fallback to 'basic' scheduler mode, init for 'static' failed - $@") |
561e7f4b | 249 | if $@; |
c2d8b56a TL |
250 | } elsif ($mode eq 'basic') { |
251 | # handled below in the general fall-back case | |
252 | } else { | |
561e7f4b FE |
253 | $haenv->log('warning', "got unknown scheduler mode '$mode', using 'basic'"); |
254 | } | |
255 | } | |
256 | ||
c2d8b56a | 257 | # fallback to the basic algorithm in any case |
561e7f4b FE |
258 | if (!$online_node_usage) { |
259 | $online_node_usage = PVE::HA::Usage::Basic->new($haenv); | |
260 | $online_node_usage->add_node($_) for $online_nodes->@*; | |
261 | } | |
270d4406 | 262 | |
7fd7af67 | 263 | foreach my $sid (sort keys %{$self->{ss}}) { |
270d4406 DM |
264 | my $sd = $self->{ss}->{$sid}; |
265 | my $state = $sd->{state}; | |
6f818da1 | 266 | my $target = $sd->{target}; # optional |
5d724d4d | 267 | if ($online_node_usage->contains_node($sd->{node})) { |
c259b1a8 | 268 | if ( |
4931b586 TL |
269 | $state eq 'started' || $state eq 'request_stop' || $state eq 'fence' |
270 | || $state eq 'freeze' || $state eq 'error' || $state eq 'recovery' | |
c259b1a8 | 271 | ) { |
5d724d4d | 272 | $online_node_usage->add_service_usage_to_node($sd->{node}, $sid, $sd->{node}); |
314ef257 | 273 | } elsif ($state eq 'migrate' || $state eq 'relocate' || $state eq 'request_start_balance') { |
5d724d4d | 274 | my $source = $sd->{node}; |
5c2eef4b | 275 | # count it for both, source and target as load is put on both |
314ef257 TL |
276 | $online_node_usage->add_service_usage_to_node($source, $sid, $source, $target) |
277 | if $state ne 'request_start_balance'; | |
c7843a31 FE |
278 | $online_node_usage->add_service_usage_to_node($target, $sid, $source, $target) |
279 | if $online_node_usage->contains_node($target); | |
4931b586 | 280 | } elsif ($state eq 'stopped' || $state eq 'request_start') { |
270d4406 DM |
281 | # do nothing |
282 | } else { | |
feea3913 | 283 | die "should not be reached (sid = '$sid', state = '$state')"; |
270d4406 | 284 | } |
5d724d4d | 285 | } elsif (defined($target) && $online_node_usage->contains_node($target)) { |
066fd016 TL |
286 | if ($state eq 'migrate' || $state eq 'relocate') { |
287 | # to correctly track maintenance modi and also consider the target as used for the | |
288 | # case a node dies, as we cannot really know if the to-be-aborted incoming migration | |
289 | # has already cleaned up all used resources | |
5d724d4d | 290 | $online_node_usage->add_service_usage_to_node($target, $sid, $sd->{node}, $target); |
066fd016 | 291 | } |
270d4406 DM |
292 | } |
293 | } | |
294 | ||
295 | $self->{online_node_usage} = $online_node_usage; | |
296 | } | |
297 | ||
4e01bc86 DM |
298 | my $change_service_state = sub { |
299 | my ($self, $sid, $new_state, %params) = @_; | |
300 | ||
301 | my ($haenv, $ss) = ($self->{haenv}, $self->{ss}); | |
302 | ||
303 | my $sd = $ss->{$sid} || die "no such service '$sid"; | |
304 | ||
305 | my $old_state = $sd->{state}; | |
e4ffb299 | 306 | my $old_node = $sd->{node}; |
46139211 | 307 | my $old_failed_nodes = $sd->{failed_nodes}; |
2167dd1e | 308 | my $old_maintenance_node = $sd->{maintenance_node}; |
4e01bc86 DM |
309 | |
310 | die "no state change" if $old_state eq $new_state; # just to be sure | |
311 | ||
618fbeda DM |
312 | die "invalid CRM service state '$new_state'\n" if !$valid_service_states->{$new_state}; |
313 | ||
e4ffb299 DM |
314 | foreach my $k (keys %$sd) { delete $sd->{$k}; }; |
315 | ||
316 | $sd->{state} = $new_state; | |
317 | $sd->{node} = $old_node; | |
ea998b07 | 318 | $sd->{failed_nodes} = $old_failed_nodes if defined($old_failed_nodes); |
2167dd1e | 319 | $sd->{maintenance_node} = $old_maintenance_node if defined($old_maintenance_node); |
e4ffb299 DM |
320 | |
321 | my $text_state = ''; | |
ba623362 | 322 | foreach my $k (sort keys %params) { |
4e01bc86 | 323 | my $v = $params{$k}; |
e4ffb299 DM |
324 | $text_state .= ", " if $text_state; |
325 | $text_state .= "$k = $v"; | |
4e01bc86 DM |
326 | $sd->{$k} = $v; |
327 | } | |
270d4406 DM |
328 | |
329 | $self->recompute_online_node_usage(); | |
330 | ||
d55aa611 | 331 | $sd->{uid} = compute_new_uuid($new_state); |
4e01bc86 | 332 | |
24678a59 | 333 | $text_state = " ($text_state)" if $text_state; |
09169180 | 334 | $haenv->log('info', "service '$sid': state changed from '${old_state}' to '${new_state}'$text_state"); |
4e01bc86 DM |
335 | }; |
336 | ||
5dd3ed86 TL |
337 | # clean up a possible bad state from a recovered service to allow its start |
338 | my $fence_recovery_cleanup = sub { | |
339 | my ($self, $sid, $fenced_node) = @_; | |
340 | ||
341 | my $haenv = $self->{haenv}; | |
342 | ||
0087839a | 343 | my (undef, $type, $id) = $haenv->parse_sid($sid); |
5dd3ed86 TL |
344 | my $plugin = PVE::HA::Resources->lookup($type); |
345 | ||
346 | # should not happen | |
347 | die "unknown resource type '$type'" if !$plugin; | |
348 | ||
32ea51dd TL |
349 | # locks may block recovery, cleanup those which are safe to remove after fencing, |
350 | # i.e., after the original node was reset and thus all it's state | |
3458a0e3 TL |
351 | my $removable_locks = [ |
352 | 'backup', | |
353 | 'mounted', | |
354 | 'migrate', | |
355 | 'clone', | |
356 | 'rollback', | |
357 | 'snapshot', | |
358 | 'snapshot-delete', | |
359 | 'suspending', | |
360 | 'suspended', | |
361 | ]; | |
5dd3ed86 TL |
362 | if (my $removed_lock = $plugin->remove_locks($haenv, $id, $removable_locks, $fenced_node)) { |
363 | $haenv->log('warning', "removed leftover lock '$removed_lock' from recovered " . | |
364 | "service '$sid' to allow its start."); | |
365 | } | |
366 | }; | |
367 | ||
289e4784 | 368 | # read LRM status for all nodes |
c4a221bc | 369 | sub read_lrm_status { |
332170bd | 370 | my ($self) = @_; |
c4a221bc | 371 | |
9c7d068b | 372 | my $nodes = $self->{ns}->list_nodes(); |
c4a221bc DM |
373 | my $haenv = $self->{haenv}; |
374 | ||
9c7d068b DM |
375 | my $results = {}; |
376 | my $modes = {}; | |
332170bd | 377 | foreach my $node (@$nodes) { |
9c7d068b | 378 | my $lrm_status = $haenv->read_lrm_status($node); |
02ffd753 | 379 | $modes->{$node} = $lrm_status->{mode} || 'active'; |
9c7d068b DM |
380 | foreach my $uid (keys %{$lrm_status->{results}}) { |
381 | next if $results->{$uid}; # should not happen | |
382 | $results->{$uid} = $lrm_status->{results}->{$uid}; | |
c4a221bc DM |
383 | } |
384 | } | |
385 | ||
9c7d068b | 386 | return ($results, $modes); |
c4a221bc DM |
387 | } |
388 | ||
aa98a844 DM |
389 | # read new crm commands and save them into crm master status |
390 | sub update_crm_commands { | |
391 | my ($self) = @_; | |
392 | ||
393 | my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss}); | |
394 | ||
395 | my $cmdlist = $haenv->read_crm_commands(); | |
bf7febe3 | 396 | |
aa98a844 DM |
397 | foreach my $cmd (split(/\n/, $cmdlist)) { |
398 | chomp $cmd; | |
399 | ||
b0fdf86a | 400 | if ($cmd =~ m/^(migrate|relocate)\s+(\S+)\s+(\S+)$/) { |
289e4784 | 401 | my ($task, $sid, $node) = ($1, $2, $3); |
aa98a844 DM |
402 | if (my $sd = $ss->{$sid}) { |
403 | if (!$ns->node_is_online($node)) { | |
404 | $haenv->log('err', "crm command error - node not online: $cmd"); | |
405 | } else { | |
406 | if ($node eq $sd->{node}) { | |
407 | $haenv->log('info', "ignore crm command - service already on target node: $cmd"); | |
289e4784 | 408 | } else { |
aa98a844 | 409 | $haenv->log('info', "got crm command: $cmd"); |
3d42b01b | 410 | $ss->{$sid}->{cmd} = [ $task, $node ]; |
aa98a844 DM |
411 | } |
412 | } | |
413 | } else { | |
414 | $haenv->log('err', "crm command error - no such service: $cmd"); | |
415 | } | |
416 | ||
21caf0db FE |
417 | } elsif ($cmd =~ m/^stop\s+(\S+)\s+(\S+)$/) { |
418 | my ($sid, $timeout) = ($1, $2); | |
419 | if (my $sd = $ss->{$sid}) { | |
420 | $haenv->log('info', "got crm command: $cmd"); | |
421 | $ss->{$sid}->{cmd} = [ 'stop', $timeout ]; | |
422 | } else { | |
423 | $haenv->log('err', "crm command error - no such service: $cmd"); | |
424 | } | |
989c4c49 TL |
425 | } elsif ($cmd =~ m/^enable-node-maintenance\s+(\S+)$/) { |
426 | my $node = $1; | |
427 | ||
428 | my $state = $ns->get_node_state($node); | |
429 | if ($state eq 'online') { | |
430 | $ms->{node_request}->{$node}->{maintenance} = 1; | |
431 | } elsif ($state eq 'maintenance') { | |
432 | $haenv->log('info', "ignoring crm command - node $node is already in maintenance state"); | |
433 | } else { | |
434 | $haenv->log('err', "crm command error - node not online: $cmd"); | |
435 | } | |
436 | } elsif ($cmd =~ m/^disable-node-maintenance\s+(\S+)$/) { | |
437 | my $node = $1; | |
438 | ||
439 | my $state = $ns->get_node_state($node); | |
440 | if ($state ne 'maintenance') { | |
441 | $haenv->log( | |
442 | 'warn', "clearing maintenance of node $node requested, but it's in state $state"); | |
443 | } | |
444 | delete $ms->{node_request}->{$node}->{maintenance}; # gets flushed out at the end of the CRM loop | |
aa98a844 DM |
445 | } else { |
446 | $haenv->log('err', "unable to parse crm command: $cmd"); | |
447 | } | |
448 | } | |
449 | ||
450 | } | |
451 | ||
8f0bb968 DM |
452 | sub manage { |
453 | my ($self) = @_; | |
c0bbd038 | 454 | |
59fd7207 | 455 | my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss}); |
c0bbd038 | 456 | |
99278e06 TL |
457 | my ($node_info) = $haenv->get_node_info(); |
458 | my ($lrm_results, $lrm_modes) = $self->read_lrm_status(); | |
459 | ||
460 | $ns->update($node_info, $lrm_modes); | |
c79442f2 | 461 | |
99278e06 | 462 | if (!$ns->node_is_operational($haenv->nodename())) { |
e5986717 | 463 | $haenv->log('info', "master seems offline"); |
c79442f2 DM |
464 | return; |
465 | } | |
466 | ||
086f7075 TL |
467 | $self->update_crs_scheduler_mode(); |
468 | ||
f7ccd1b3 DM |
469 | my $sc = $haenv->read_service_config(); |
470 | ||
abc920b4 DM |
471 | $self->{groups} = $haenv->read_group_config(); # update |
472 | ||
f7ccd1b3 DM |
473 | # compute new service status |
474 | ||
475 | # add new service | |
cc32a8f3 | 476 | foreach my $sid (sort keys %$sc) { |
f7ccd1b3 | 477 | next if $ss->{$sid}; # already there |
77499288 | 478 | my $cd = $sc->{$sid}; |
667670b2 TL |
479 | next if $cd->{state} eq 'ignored'; |
480 | ||
77499288 | 481 | $haenv->log('info', "adding new service '$sid' on node '$cd->{node}'"); |
f7ccd1b3 | 482 | # assume we are running to avoid relocate running service at add |
c2f2b9c6 | 483 | my $state = ($cd->{state} eq 'started') ? 'request_start' : 'request_stop'; |
83a84eb0 TL |
484 | $ss->{$sid} = { |
485 | state => $state, node => $cd->{node}, uid => compute_new_uuid('started'), | |
486 | }; | |
f7ccd1b3 DM |
487 | } |
488 | ||
667670b2 | 489 | # remove stale or ignored services from manager state |
4e5764af | 490 | foreach my $sid (keys %$ss) { |
667670b2 TL |
491 | next if $sc->{$sid} && $sc->{$sid}->{state} ne 'ignored'; |
492 | ||
493 | my $reason = defined($sc->{$sid}) ? 'ignored state requested' : 'no config'; | |
494 | $haenv->log('info', "removing stale service '$sid' ($reason)"); | |
495 | ||
46139211 | 496 | # remove all service related state information |
4e5764af DM |
497 | delete $ss->{$sid}; |
498 | } | |
5a28da91 | 499 | |
aa98a844 DM |
500 | $self->update_crm_commands(); |
501 | ||
c79442f2 DM |
502 | for (;;) { |
503 | my $repeat = 0; | |
289e4784 | 504 | |
270d4406 | 505 | $self->recompute_online_node_usage(); |
f7ccd1b3 | 506 | |
a5e4bef4 | 507 | foreach my $sid (sort keys %$ss) { |
c79442f2 DM |
508 | my $sd = $ss->{$sid}; |
509 | my $cd = $sc->{$sid} || { state => 'disabled' }; | |
f7ccd1b3 | 510 | |
9c7d068b | 511 | my $lrm_res = $sd->{uid} ? $lrm_results->{$sd->{uid}} : undef; |
a875fbe8 | 512 | |
c79442f2 DM |
513 | my $last_state = $sd->{state}; |
514 | ||
515 | if ($last_state eq 'stopped') { | |
516 | ||
abc920b4 | 517 | $self->next_state_stopped($sid, $cd, $sd, $lrm_res); |
f7ccd1b3 | 518 | |
c79442f2 | 519 | } elsif ($last_state eq 'started') { |
f7ccd1b3 | 520 | |
abc920b4 | 521 | $self->next_state_started($sid, $cd, $sd, $lrm_res); |
f7ccd1b3 | 522 | |
4931b586 TL |
523 | } elsif ($last_state eq 'request_start') { |
524 | ||
525 | $self->next_state_request_start($sid, $cd, $sd, $lrm_res); | |
526 | ||
314ef257 | 527 | } elsif ($last_state eq 'migrate' || $last_state eq 'relocate' || $last_state eq 'request_start_balance') { |
f7ccd1b3 | 528 | |
8aaa0e36 | 529 | $self->next_state_migrate_relocate($sid, $cd, $sd, $lrm_res); |
f7ccd1b3 | 530 | |
c79442f2 | 531 | } elsif ($last_state eq 'fence') { |
f7ccd1b3 | 532 | |
21e37ed4 | 533 | # do nothing here - wait until fenced |
f7ccd1b3 | 534 | |
c259b1a8 TL |
535 | } elsif ($last_state eq 'recovery') { |
536 | ||
537 | $self->next_state_recovery($sid, $cd, $sd, $lrm_res); | |
538 | ||
c79442f2 | 539 | } elsif ($last_state eq 'request_stop') { |
f7ccd1b3 | 540 | |
0df5b3dd | 541 | $self->next_state_request_stop($sid, $cd, $sd, $lrm_res); |
618fbeda | 542 | |
9c7d068b DM |
543 | } elsif ($last_state eq 'freeze') { |
544 | ||
545 | my $lrm_mode = $sd->{node} ? $lrm_modes->{$sd->{node}} : undef; | |
83a84eb0 TL |
546 | if ($lrm_mode && $lrm_mode eq 'active') { # unfreeze if active again |
547 | my $state = ($cd->{state} eq 'started') ? 'started' : 'request_stop'; | |
548 | $change_service_state->($self, $sid, $state); | |
549 | } | |
9c7d068b | 550 | |
e88469ba DM |
551 | } elsif ($last_state eq 'error') { |
552 | ||
a2881965 | 553 | $self->next_state_error($sid, $cd, $sd, $lrm_res); |
e88469ba | 554 | |
a875fbe8 DM |
555 | } else { |
556 | ||
557 | die "unknown service state '$last_state'"; | |
618fbeda | 558 | } |
21e37ed4 | 559 | |
9c7d068b | 560 | my $lrm_mode = $sd->{node} ? $lrm_modes->{$sd->{node}} : undef; |
07adc6a6 | 561 | if ($lrm_mode && $lrm_mode eq 'restart') { |
83a84eb0 TL |
562 | my $state = $sd->{state}; |
563 | if ($state eq 'started' || $state eq 'stopped'|| $state eq 'request_stop') { | |
564 | $change_service_state->($self, $sid, 'freeze'); | |
07adc6a6 | 565 | } |
9c7d068b | 566 | } |
07adc6a6 | 567 | |
c79442f2 | 568 | $repeat = 1 if $sd->{state} ne $last_state; |
f7ccd1b3 DM |
569 | } |
570 | ||
21e37ed4 DM |
571 | # handle fencing |
572 | my $fenced_nodes = {}; | |
9b2dbc2a | 573 | foreach my $sid (sort keys %$ss) { |
2deff1ae TL |
574 | my ($service_state, $service_node) = $ss->{$sid}->@{'state', 'node'}; |
575 | next if $service_state ne 'fence'; | |
0dcb6597 TL |
576 | |
577 | if (!defined($fenced_nodes->{$service_node})) { | |
2deff1ae | 578 | $fenced_nodes->{$service_node} = $ns->fence_node($service_node) || 0; |
21e37ed4 DM |
579 | } |
580 | ||
0dcb6597 | 581 | next if !$fenced_nodes->{$service_node}; |
21e37ed4 | 582 | |
9da84a0d | 583 | # node fence was successful - recover service |
c259b1a8 | 584 | $change_service_state->($self, $sid, 'recovery'); |
0dcb6597 | 585 | $repeat = 1; # for faster recovery execution |
21e37ed4 DM |
586 | } |
587 | ||
2deff1ae TL |
588 | # Avoid that a node without services in 'fence' state (e.g., removed |
589 | # manually by admin) is stuck with the 'fence' node state. | |
590 | for my $node (sort grep { !defined($fenced_nodes->{$_}) } keys $ns->{status}->%*) { | |
7dc92703 | 591 | next if $ns->get_node_state($node) ne 'fence'; |
7dc92703 | 592 | |
2deff1ae TL |
593 | $haenv->log('notice', "node '$node' in fence state but no services to-fence! admin interference?!"); |
594 | $repeat = 1 if $ns->fence_node($node); | |
7dc92703 FE |
595 | } |
596 | ||
c79442f2 | 597 | last if !$repeat; |
f7ccd1b3 | 598 | } |
f7ccd1b3 | 599 | |
8f0bb968 | 600 | $self->flush_master_status(); |
c0bbd038 DM |
601 | } |
602 | ||
a875fbe8 DM |
603 | # functions to compute next service states |
604 | # $cd: service configuration data (read only) | |
605 | # $sd: service status data (read only) | |
606 | # | |
607 | # Note: use change_service_state() to alter state | |
608 | # | |
609 | ||
0df5b3dd DM |
610 | sub next_state_request_stop { |
611 | my ($self, $sid, $cd, $sd, $lrm_res) = @_; | |
612 | ||
613 | my $haenv = $self->{haenv}; | |
614 | my $ns = $self->{ns}; | |
615 | ||
616 | # check result from LRM daemon | |
617 | if ($lrm_res) { | |
618 | my $exit_code = $lrm_res->{exit_code}; | |
a89ff919 | 619 | if ($exit_code == SUCCESS) { |
0df5b3dd DM |
620 | &$change_service_state($self, $sid, 'stopped'); |
621 | return; | |
622 | } else { | |
33f01524 | 623 | $haenv->log('err', "service '$sid' stop failed (exit code $exit_code)"); |
0df5b3dd DM |
624 | &$change_service_state($self, $sid, 'error'); # fixme: what state? |
625 | return; | |
626 | } | |
627 | } | |
628 | ||
ce3d7003 | 629 | if ($ns->node_is_offline_delayed($sd->{node})) { |
0df5b3dd DM |
630 | &$change_service_state($self, $sid, 'fence'); |
631 | return; | |
632 | } | |
633 | } | |
634 | ||
8aaa0e36 DM |
635 | sub next_state_migrate_relocate { |
636 | my ($self, $sid, $cd, $sd, $lrm_res) = @_; | |
637 | ||
638 | my $haenv = $self->{haenv}; | |
639 | my $ns = $self->{ns}; | |
640 | ||
641 | # check result from LRM daemon | |
642 | if ($lrm_res) { | |
643 | my $exit_code = $lrm_res->{exit_code}; | |
bb07bd2c | 644 | my $req_state = $cd->{state} eq 'started' ? 'started' : 'request_stop'; |
a89ff919 | 645 | if ($exit_code == SUCCESS) { |
542a9902 | 646 | &$change_service_state($self, $sid, $req_state, node => $sd->{target}); |
8aaa0e36 | 647 | return; |
660596ce TL |
648 | } elsif ($exit_code == EWRONG_NODE) { |
649 | $haenv->log('err', "service '$sid' - migration failed: service" . | |
650 | " registered on wrong node!"); | |
651 | &$change_service_state($self, $sid, 'error'); | |
5a9c3a28 FE |
652 | } elsif ($exit_code == IGNORED) { |
653 | $haenv->log( | |
654 | "info", | |
655 | "service '$sid' - rebalance-on-start request ignored - service already running", | |
656 | ); | |
657 | $change_service_state->($self, $sid, $req_state, node => $sd->{node}); | |
8aaa0e36 DM |
658 | } else { |
659 | $haenv->log('err', "service '$sid' - migration failed (exit code $exit_code)"); | |
542a9902 | 660 | &$change_service_state($self, $sid, $req_state, node => $sd->{node}); |
8aaa0e36 DM |
661 | return; |
662 | } | |
663 | } | |
664 | ||
ce3d7003 | 665 | if ($ns->node_is_offline_delayed($sd->{node})) { |
8aaa0e36 DM |
666 | &$change_service_state($self, $sid, 'fence'); |
667 | return; | |
668 | } | |
669 | } | |
670 | ||
a875fbe8 | 671 | sub next_state_stopped { |
abc920b4 | 672 | my ($self, $sid, $cd, $sd, $lrm_res) = @_; |
a875fbe8 DM |
673 | |
674 | my $haenv = $self->{haenv}; | |
e88469ba | 675 | my $ns = $self->{ns}; |
a875fbe8 | 676 | |
ff6f1c5c DM |
677 | if ($sd->{node} ne $cd->{node}) { |
678 | # this can happen if we fence a node with active migrations | |
679 | # hack: modify $sd (normally this should be considered read-only) | |
24678a59 | 680 | $haenv->log('info', "fixup service '$sid' location ($sd->{node} => $cd->{node})"); |
289e4784 | 681 | $sd->{node} = $cd->{node}; |
ff6f1c5c DM |
682 | } |
683 | ||
94b7ebe2 | 684 | if ($sd->{cmd}) { |
21caf0db | 685 | my $cmd = shift @{$sd->{cmd}}; |
94b7ebe2 | 686 | |
b0fdf86a | 687 | if ($cmd eq 'migrate' || $cmd eq 'relocate') { |
21caf0db | 688 | my $target = shift @{$sd->{cmd}}; |
94b7ebe2 | 689 | if (!$ns->node_is_online($target)) { |
b0fdf86a | 690 | $haenv->log('err', "ignore service '$sid' $cmd request - node '$target' not online"); |
e88469ba | 691 | } elsif ($sd->{node} eq $target) { |
b0fdf86a | 692 | $haenv->log('info', "ignore service '$sid' $cmd request - service already on node '$target'"); |
94b7ebe2 | 693 | } else { |
4931b586 | 694 | &$change_service_state($self, $sid, $cmd, node => $sd->{node}, target => $target); |
9da84a0d | 695 | return; |
94b7ebe2 | 696 | } |
21caf0db FE |
697 | } elsif ($cmd eq 'stop') { |
698 | $haenv->log('info', "ignore service '$sid' $cmd request - service already stopped"); | |
94b7ebe2 | 699 | } else { |
289e4784 | 700 | $haenv->log('err', "unknown command '$cmd' for service '$sid'"); |
94b7ebe2 | 701 | } |
21caf0db | 702 | delete $sd->{cmd}; |
35cbb764 | 703 | } |
94b7ebe2 | 704 | |
a875fbe8 | 705 | if ($cd->{state} eq 'disabled') { |
35cbb764 TL |
706 | # NOTE: do nothing here, the stop state is an exception as we do not |
707 | # process the LRM result here, thus the LRM always tries to stop the | |
708 | # service (protection for the case no CRM is active) | |
e88469ba | 709 | return; |
35cbb764 | 710 | } |
e88469ba | 711 | |
84c945e4 | 712 | if ($ns->node_is_offline_delayed($sd->{node}) && $ns->get_node_state($sd->{node}) ne 'maintenance') { |
af14d5f3 TL |
713 | &$change_service_state($self, $sid, 'fence'); |
714 | return; | |
715 | } | |
716 | ||
717 | if ($cd->{state} eq 'stopped') { | |
718 | # almost the same as 'disabled' state but the service will also get recovered | |
719 | return; | |
720 | } | |
721 | ||
bb07bd2c | 722 | if ($cd->{state} eq 'started') { |
4931b586 TL |
723 | # simply mark it started, if it's on the wrong node next_state_started will fix that for us |
724 | $change_service_state->($self, $sid, 'request_start', node => $sd->{node}); | |
e88469ba | 725 | return; |
a875fbe8 | 726 | } |
e88469ba DM |
727 | |
728 | $haenv->log('err', "service '$sid' - unknown state '$cd->{state}' in service configuration"); | |
a875fbe8 DM |
729 | } |
730 | ||
4931b586 TL |
731 | sub next_state_request_start { |
732 | my ($self, $sid, $cd, $sd, $lrm_res) = @_; | |
733 | ||
314ef257 TL |
734 | my $haenv = $self->{haenv}; |
735 | my $current_node = $sd->{node}; | |
736 | ||
737 | if ($self->{crs}->{rebalance_on_request_start}) { | |
738 | my $selected_node = select_service_node( | |
739 | $self->{groups}, | |
740 | $self->{online_node_usage}, | |
741 | $sid, | |
742 | $cd, | |
743 | $sd->{node}, | |
744 | 0, # try_next | |
745 | $sd->{failed_nodes}, | |
746 | $sd->{maintenance_node}, | |
747 | 1, # best_score | |
748 | ); | |
749 | my $select_text = $selected_node ne $current_node ? 'new' : 'current'; | |
750 | $haenv->log('info', "service $sid: re-balance selected $select_text node $selected_node for startup"); | |
751 | ||
752 | if ($selected_node ne $current_node) { | |
753 | $change_service_state->($self, $sid, 'request_start_balance', node => $current_node, target => $selected_node); | |
754 | return; | |
755 | } | |
756 | } | |
757 | ||
758 | $change_service_state->($self, $sid, 'started', node => $current_node); | |
4931b586 TL |
759 | } |
760 | ||
46139211 | 761 | sub record_service_failed_on_node { |
57fe8e87 | 762 | my ($self, $sid, $node) = @_; |
46139211 | 763 | |
57fe8e87 DM |
764 | if (!defined($self->{ss}->{$sid}->{failed_nodes})) { |
765 | $self->{ss}->{$sid}->{failed_nodes} = []; | |
766 | } | |
46139211 | 767 | |
57fe8e87 | 768 | push @{$self->{ss}->{$sid}->{failed_nodes}}, $node; |
46139211 TL |
769 | } |
770 | ||
a875fbe8 | 771 | sub next_state_started { |
abc920b4 | 772 | my ($self, $sid, $cd, $sd, $lrm_res) = @_; |
a875fbe8 DM |
773 | |
774 | my $haenv = $self->{haenv}; | |
ea4443cc | 775 | my $master_status = $self->{ms}; |
a875fbe8 DM |
776 | my $ns = $self->{ns}; |
777 | ||
778 | if (!$ns->node_is_online($sd->{node})) { | |
b0e9158d | 779 | if ($ns->node_is_offline_delayed($sd->{node})) { |
5385a606 DM |
780 | &$change_service_state($self, $sid, 'fence'); |
781 | } | |
99278e06 TL |
782 | if ($ns->get_node_state($sd->{node}) ne 'maintenance') { |
783 | return; | |
2167dd1e | 784 | } else { |
09169180 | 785 | # save current node as fallback for when it comes out of maintenance |
2167dd1e | 786 | $sd->{maintenance_node} = $sd->{node}; |
99278e06 | 787 | } |
e88469ba | 788 | } |
289e4784 | 789 | |
af14d5f3 | 790 | if ($cd->{state} eq 'disabled' || $cd->{state} eq 'stopped') { |
e88469ba DM |
791 | &$change_service_state($self, $sid, 'request_stop'); |
792 | return; | |
793 | } | |
794 | ||
bb07bd2c | 795 | if ($cd->{state} eq 'started') { |
e88469ba DM |
796 | |
797 | if ($sd->{cmd}) { | |
21caf0db | 798 | my $cmd = shift @{$sd->{cmd}}; |
e88469ba | 799 | |
b0fdf86a | 800 | if ($cmd eq 'migrate' || $cmd eq 'relocate') { |
21caf0db | 801 | my $target = shift @{$sd->{cmd}}; |
e88469ba | 802 | if (!$ns->node_is_online($target)) { |
b0fdf86a | 803 | $haenv->log('err', "ignore service '$sid' $cmd request - node '$target' not online"); |
e88469ba | 804 | } elsif ($sd->{node} eq $target) { |
b0fdf86a | 805 | $haenv->log('info', "ignore service '$sid' $cmd request - service already on node '$target'"); |
e88469ba | 806 | } else { |
a3cb8dcb | 807 | $haenv->log('info', "$cmd service '$sid' to node '$target'"); |
b0fdf86a | 808 | &$change_service_state($self, $sid, $cmd, node => $sd->{node}, target => $target); |
e88469ba | 809 | } |
21caf0db FE |
810 | } elsif ($cmd eq 'stop') { |
811 | my $timeout = shift @{$sd->{cmd}}; | |
396eb6f0 TL |
812 | if ($timeout == 0) { |
813 | $haenv->log('info', "request immediate service hard-stop for service '$sid'"); | |
814 | } else { | |
815 | $haenv->log('info', "request graceful stop with timeout '$timeout' for service '$sid'"); | |
816 | } | |
21caf0db FE |
817 | &$change_service_state($self, $sid, 'request_stop', timeout => $timeout); |
818 | $haenv->update_service_config($sid, {'state' => 'stopped'}); | |
a875fbe8 | 819 | } else { |
289e4784 | 820 | $haenv->log('err', "unknown command '$cmd' for service '$sid'"); |
a875fbe8 | 821 | } |
21caf0db FE |
822 | |
823 | delete $sd->{cmd}; | |
824 | ||
a875fbe8 | 825 | } else { |
b0fdf86a | 826 | |
abc920b4 | 827 | my $try_next = 0; |
46139211 | 828 | |
ea4443cc | 829 | if ($lrm_res) { |
46139211 | 830 | |
e9e1cd68 TL |
831 | my $ec = $lrm_res->{exit_code}; |
832 | if ($ec == SUCCESS) { | |
833 | ||
46139211 | 834 | if (defined($sd->{failed_nodes})) { |
81449997 | 835 | $haenv->log('info', "relocation policy successful for '$sid' on node '$sd->{node}'," . |
46139211 TL |
836 | " failed nodes: " . join(', ', @{$sd->{failed_nodes}}) ); |
837 | } | |
838 | ||
839 | delete $sd->{failed_nodes}; | |
e9e1cd68 | 840 | |
b47920fd DM |
841 | # store flag to indicate successful start - only valid while state == 'started' |
842 | $sd->{running} = 1; | |
843 | ||
b159176a | 844 | } elsif ($ec == ERROR || $ec == EWRONG_NODE) { |
b47920fd DM |
845 | |
846 | delete $sd->{running}; | |
847 | ||
e9e1cd68 | 848 | # apply our relocate policy if we got ERROR from the LRM |
46139211 | 849 | $self->record_service_failed_on_node($sid, $sd->{node}); |
ea4443cc | 850 | |
46139211 | 851 | if (scalar(@{$sd->{failed_nodes}}) <= $cd->{max_relocate}) { |
ea4443cc | 852 | |
e9e1cd68 TL |
853 | # tell select_service_node to relocate if possible |
854 | $try_next = 1; | |
ea4443cc TL |
855 | |
856 | $haenv->log('warning', "starting service $sid on node". | |
857 | " '$sd->{node}' failed, relocating service."); | |
ea4443cc TL |
858 | |
859 | } else { | |
860 | ||
46139211 TL |
861 | $haenv->log('err', "recovery policy for service $sid " . |
862 | "failed, entering error state. Failed nodes: ". | |
863 | join(', ', @{$sd->{failed_nodes}})); | |
ea4443cc TL |
864 | &$change_service_state($self, $sid, 'error'); |
865 | return; | |
866 | ||
867 | } | |
e9e1cd68 | 868 | } else { |
46139211 TL |
869 | $self->record_service_failed_on_node($sid, $sd->{node}); |
870 | ||
09169180 | 871 | $haenv->log('err', "service '$sid' got unrecoverable error (exit code $ec))"); |
e9e1cd68 TL |
872 | # we have no save way out (yet) for other errors |
873 | &$change_service_state($self, $sid, 'error'); | |
35cbb764 | 874 | return; |
ea4443cc | 875 | } |
abc920b4 DM |
876 | } |
877 | ||
2167dd1e TL |
878 | my $node = select_service_node( |
879 | $self->{groups}, | |
880 | $self->{online_node_usage}, | |
b2598576 | 881 | $sid, |
2167dd1e TL |
882 | $cd, |
883 | $sd->{node}, | |
884 | $try_next, | |
885 | $sd->{failed_nodes}, | |
886 | $sd->{maintenance_node}, | |
887 | ); | |
abc920b4 | 888 | |
b0fdf86a | 889 | if ($node && ($sd->{node} ne $node)) { |
5d724d4d | 890 | $self->{online_node_usage}->add_service_usage_to_node($node, $sid, $sd->{node}); |
2167dd1e TL |
891 | |
892 | if (defined(my $fallback = $sd->{maintenance_node})) { | |
893 | if ($node eq $fallback) { | |
09169180 TL |
894 | $haenv->log( |
895 | 'info', | |
896 | "moving service '$sid' back to '$fallback', node came back from maintenance.", | |
897 | ); | |
2167dd1e TL |
898 | delete $sd->{maintenance_node}; |
899 | } elsif ($sd->{node} ne $fallback) { | |
900 | $haenv->log('info', "dropping maintenance fallback node '$fallback' for '$sid'"); | |
901 | delete $sd->{maintenance_node}; | |
902 | } | |
903 | } | |
904 | ||
c0255b2c TL |
905 | if ($cd->{type} eq 'vm') { |
906 | $haenv->log('info', "migrate service '$sid' to node '$node' (running)"); | |
907 | &$change_service_state($self, $sid, 'migrate', node => $sd->{node}, target => $node); | |
908 | } else { | |
909 | $haenv->log('info', "relocate service '$sid' to node '$node'"); | |
910 | &$change_service_state($self, $sid, 'relocate', node => $sd->{node}, target => $node); | |
911 | } | |
b0fdf86a | 912 | } else { |
e6eeb7dc | 913 | if ($try_next && !defined($node)) { |
09169180 TL |
914 | $haenv->log( |
915 | 'warning', | |
916 | "Start Error Recovery: Tried all available nodes for service '$sid', retry" | |
917 | ." start on current node. Tried nodes: " . join(', ', @{$sd->{failed_nodes}}, | |
918 | ) | |
919 | ); | |
e6eeb7dc | 920 | } |
17c6cbea FE |
921 | |
922 | if ($sd->{maintenance_node} && $sd->{node} eq $sd->{maintenance_node}) { | |
923 | my $node_state = $ns->get_node_state($sd->{node}); | |
924 | if ($node_state eq 'online') { | |
925 | # Having the maintenance node set here means that the service was never | |
926 | # started on a different node since it was set. This can happen in the edge | |
927 | # case that the whole cluster is shut down at the same time while the | |
928 | # 'migrate' policy was configured. Node is not in maintenance mode anymore | |
929 | # and service is started on this node, so it's fine to clear the setting. | |
930 | $haenv->log( | |
931 | 'info', | |
932 | "service '$sid': clearing stale maintenance node " | |
933 | ."'$sd->{maintenance_node}' setting (is current node)", | |
934 | ); | |
935 | delete $sd->{maintenance_node}; | |
936 | } | |
937 | } | |
938 | ||
35cbb764 | 939 | # ensure service get started again if it went unexpected down |
bf2d8d74 TL |
940 | # but ensure also no LRM result gets lost |
941 | $sd->{uid} = compute_new_uuid($sd->{state}) if defined($lrm_res); | |
b0fdf86a | 942 | } |
a875fbe8 | 943 | } |
e88469ba DM |
944 | |
945 | return; | |
35cbb764 | 946 | } |
e88469ba DM |
947 | |
948 | $haenv->log('err', "service '$sid' - unknown state '$cd->{state}' in service configuration"); | |
a875fbe8 | 949 | } |
c0bbd038 | 950 | |
a2881965 TL |
951 | sub next_state_error { |
952 | my ($self, $sid, $cd, $sd, $lrm_res) = @_; | |
953 | ||
954 | my $ns = $self->{ns}; | |
46139211 | 955 | my $ms = $self->{ms}; |
a2881965 TL |
956 | |
957 | if ($cd->{state} eq 'disabled') { | |
46139211 TL |
958 | # clean up on error recovery |
959 | delete $sd->{failed_nodes}; | |
960 | ||
a2881965 TL |
961 | &$change_service_state($self, $sid, 'stopped'); |
962 | return; | |
963 | } | |
964 | ||
a2881965 TL |
965 | } |
966 | ||
c259b1a8 TL |
967 | # after a node was fenced this recovers the service to a new node |
968 | sub next_state_recovery { | |
969 | my ($self, $sid, $cd, $sd, $lrm_res) = @_; | |
970 | ||
971 | my ($haenv, $ss) = ($self->{haenv}, $self->{ss}); | |
972 | my $ns = $self->{ns}; | |
973 | my $ms = $self->{ms}; | |
974 | ||
975 | if ($sd->{state} ne 'recovery') { # should not happen | |
976 | $haenv->log('err', "cannot recover service '$sid' from fencing, wrong state '$sd->{state}'"); | |
977 | return; | |
978 | } | |
979 | ||
980 | my $fenced_node = $sd->{node}; # for logging purpose | |
981 | ||
982 | $self->recompute_online_node_usage(); # we want the most current node state | |
983 | ||
984 | my $recovery_node = select_service_node( | |
985 | $self->{groups}, | |
986 | $self->{online_node_usage}, | |
b2598576 | 987 | $sid, |
c259b1a8 TL |
988 | $cd, |
989 | $sd->{node}, | |
990 | ); | |
991 | ||
992 | if ($recovery_node) { | |
90a24755 TL |
993 | my $msg = "recover service '$sid' from fenced node '$fenced_node' to node '$recovery_node'"; |
994 | if ($recovery_node eq $fenced_node) { | |
995 | # can happen if restriced groups and the node came up again OK | |
996 | $msg = "recover service '$sid' to previous failed and fenced node '$fenced_node' again"; | |
997 | } | |
998 | $haenv->log('info', "$msg"); | |
c259b1a8 TL |
999 | |
1000 | $fence_recovery_cleanup->($self, $sid, $fenced_node); | |
1001 | ||
1002 | $haenv->steal_service($sid, $sd->{node}, $recovery_node); | |
5d724d4d | 1003 | $self->{online_node_usage}->add_service_usage_to_node($recovery_node, $sid, $recovery_node); |
c259b1a8 TL |
1004 | |
1005 | # NOTE: $sd *is normally read-only*, fencing is the exception | |
1006 | $cd->{node} = $sd->{node} = $recovery_node; | |
1007 | my $new_state = ($cd->{state} eq 'started') ? 'started' : 'request_stop'; | |
1008 | $change_service_state->($self, $sid, $new_state, node => $recovery_node); | |
1009 | } else { | |
1010 | # no possible node found, cannot recover - but retry later, as we always try to make it available | |
1011 | $haenv->log('err', "recovering service '$sid' from fenced node '$fenced_node' failed, no recovery node found"); | |
719883e9 TL |
1012 | |
1013 | if ($cd->{state} eq 'disabled') { | |
1014 | # allow getting a service out of recovery manually if an admin disables it. | |
1015 | delete $sd->{failed_nodes}; # clean up on recovery to stopped | |
1016 | $change_service_state->($self, $sid, 'stopped'); # must NOT go through request_stop | |
1017 | return; | |
1018 | } | |
c259b1a8 TL |
1019 | } |
1020 | } | |
1021 | ||
c0bbd038 | 1022 | 1; |