]> git.proxmox.com Git - pve-ha-manager.git/blob - src/PVE/HA/Manager.pm
manager: use static resource scheduler when configured
[pve-ha-manager.git] / src / PVE / HA / Manager.pm
1 package PVE::HA::Manager;
2
3 use strict;
4 use warnings;
5 use Digest::MD5 qw(md5_base64);
6
7 use PVE::Tools;
8 use PVE::HA::Tools ':exit_codes';
9 use PVE::HA::NodeStatus;
10 use PVE::HA::Usage::Basic;
11 use PVE::HA::Usage::Static;
12
13 ## Variable Name & Abbreviations Convention
14 #
15 # The HA stack has some variables it uses frequently and thus abbreviates it such that it may be
16 # confusing for new readers. Here's a short list of the most common used.
17 #
18 # NOTE: variables should be assumed to be read only if not otherwise stated, only use the specific
19 # methods to re-compute/read/alter them.
20 #
21 # - $haenv -> HA environment, the main interface to the simulator/test/real world
22 # - $sid -> Service ID, unique identifier for a service, `type:vmid` is common
23 #
24 # - $ms -> Master/Manager Status, contains runtime info from the current active manager
25 # - $ns -> Node Status, hash holding online/offline status about all nodes
26 #
27 # - $ss -> Service Status, hash holding the current state (last LRM cmd result, failed starts
28 # or migrates, maintenance fallback node, for *all* services ...
29 # - $sd -> Service Data, the service status of a *single* service, iow. $ss->{$sid}
30 #
31 # - $sc -> Service Configuration, hash for all services including target state, group, ...
32 # - $cd -> Configuration Data, the service config of a *single* service, iow. $sc->{$sid}
33 #
34 # Try to avoid adding new two letter (or similar over abbreviated) names, but also don't send
35 # patches for changing above, as that set is mostly sensible and should be easy to remember once
36 # spending a bit time in the HA code base.
37
38 sub new {
39 my ($this, $haenv) = @_;
40
41 my $class = ref($this) || $this;
42
43 my $self = bless { haenv => $haenv }, $class;
44
45 my $old_ms = $haenv->read_manager_status();
46
47 # we only copy the state part of the manager which cannot be auto generated
48
49 $self->{ns} = PVE::HA::NodeStatus->new($haenv, $old_ms->{node_status} || {});
50
51 # fixme: use separate class PVE::HA::ServiceStatus
52 $self->{ss} = $old_ms->{service_status} || {};
53
54 $self->{ms} = { master_node => $haenv->nodename() };
55
56 my $dc_cfg = $haenv->get_datacenter_settings();
57 $self->{'scheduler-mode'} = $dc_cfg->{crs}->{ha} ? $dc_cfg->{crs}->{ha} : 'basic';
58 $haenv->log('info', "using scheduler mode '$self->{'scheduler-mode'}'")
59 if $self->{'scheduler-mode'} ne 'basic';
60
61 return $self;
62 }
63
64 sub cleanup {
65 my ($self) = @_;
66
67 # todo: ?
68 }
69
70 sub flush_master_status {
71 my ($self) = @_;
72
73 my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss});
74
75 $ms->{node_status} = $ns->{status};
76 $ms->{service_status} = $ss;
77 $ms->{timestamp} = $haenv->get_time();
78
79 $haenv->write_manager_status($ms);
80 }
81
82 sub get_service_group {
83 my ($groups, $online_node_usage, $service_conf) = @_;
84
85 my $group = {};
86 # add all online nodes to default group to allow try_next when no group set
87 $group->{nodes}->{$_} = 1 for $online_node_usage->list_nodes();
88
89 # overwrite default if service is bound to a specific group
90 if (my $group_id = $service_conf->{group}) {
91 $group = $groups->{ids}->{$group_id} if $groups->{ids}->{$group_id};
92 }
93
94 return $group;
95 }
96
97 # groups available nodes with their priority as group index
98 sub get_node_priority_groups {
99 my ($group, $online_node_usage) = @_;
100
101 my $pri_groups = {};
102 my $group_members = {};
103 foreach my $entry (keys %{$group->{nodes}}) {
104 my ($node, $pri) = ($entry, 0);
105 if ($entry =~ m/^(\S+):(\d+)$/) {
106 ($node, $pri) = ($1, $2);
107 }
108 next if !$online_node_usage->contains_node($node); # offline
109 $pri_groups->{$pri}->{$node} = 1;
110 $group_members->{$node} = $pri;
111 }
112
113 # add non-group members to unrestricted groups (priority -1)
114 if (!$group->{restricted}) {
115 my $pri = -1;
116 for my $node ($online_node_usage->list_nodes()) {
117 next if defined($group_members->{$node});
118 $pri_groups->{$pri}->{$node} = 1;
119 $group_members->{$node} = -1;
120 }
121 }
122
123 return ($pri_groups, $group_members);
124 }
125
126 sub select_service_node {
127 my ($groups, $online_node_usage, $sid, $service_conf, $current_node, $try_next, $tried_nodes, $maintenance_fallback) = @_;
128
129 my $group = get_service_group($groups, $online_node_usage, $service_conf);
130
131 my ($pri_groups, $group_members) = get_node_priority_groups($group, $online_node_usage);
132
133 my @pri_list = sort {$b <=> $a} keys %$pri_groups;
134 return undef if !scalar(@pri_list);
135
136 # stay on current node if possible (avoids random migrations)
137 if (!$try_next && $group->{nofailback} && defined($group_members->{$current_node})) {
138 return $current_node;
139 }
140
141 # select node from top priority node list
142
143 my $top_pri = $pri_list[0];
144
145 # try to avoid nodes where the service failed already if we want to relocate
146 if ($try_next) {
147 foreach my $node (@$tried_nodes) {
148 delete $pri_groups->{$top_pri}->{$node};
149 }
150 }
151
152 my $scores = $online_node_usage->score_nodes_to_start_service($sid, $current_node);
153 my @nodes = sort {
154 $scores->{$a} <=> $scores->{$b} || $a cmp $b
155 } keys %{$pri_groups->{$top_pri}};
156
157 my $found;
158 my $found_maintenance_fallback;
159 for (my $i = scalar(@nodes) - 1; $i >= 0; $i--) {
160 my $node = $nodes[$i];
161 if ($node eq $current_node) {
162 $found = $i;
163 }
164 if (defined($maintenance_fallback) && $node eq $maintenance_fallback) {
165 $found_maintenance_fallback = $i;
166 }
167 }
168
169 if (defined($found_maintenance_fallback)) {
170 return $nodes[$found_maintenance_fallback];
171 }
172
173 if ($try_next) {
174 if (defined($found) && ($found < (scalar(@nodes) - 1))) {
175 return $nodes[$found + 1];
176 } else {
177 return $nodes[0];
178 }
179 } elsif (defined($found)) {
180 return $nodes[$found];
181 } else {
182 return $nodes[0];
183 }
184 }
185
186 my $uid_counter = 0;
187
188 sub compute_new_uuid {
189 my ($state) = @_;
190
191 $uid_counter++;
192 return md5_base64($state . $$ . time() . $uid_counter);
193 }
194
195 my $valid_service_states = {
196 stopped => 1,
197 request_stop => 1,
198 started => 1,
199 fence => 1,
200 recovery => 1,
201 migrate => 1,
202 relocate => 1,
203 freeze => 1,
204 error => 1,
205 };
206
207 # FIXME with 'static' mode and thousands of services, the overhead can be noticable and the fact
208 # that this function is called for each state change and upon recovery doesn't help.
209 sub recompute_online_node_usage {
210 my ($self) = @_;
211
212 my $haenv = $self->{haenv};
213
214 my $online_nodes = $self->{ns}->list_online_nodes();
215
216 my $online_node_usage;
217
218 if (my $mode = $self->{'scheduler-mode'}) {
219 if ($mode eq 'static') {
220 $online_node_usage = eval {
221 my $scheduler = PVE::HA::Usage::Static->new($haenv);
222 $scheduler->add_node($_) for $online_nodes->@*;
223 return $scheduler;
224 };
225 $haenv->log('warning', "using 'basic' scheduler mode, init for 'static' failed - $@")
226 if $@;
227 } elsif ($mode ne 'basic') {
228 $haenv->log('warning', "got unknown scheduler mode '$mode', using 'basic'");
229 }
230 }
231
232 if (!$online_node_usage) {
233 $online_node_usage = PVE::HA::Usage::Basic->new($haenv);
234 $online_node_usage->add_node($_) for $online_nodes->@*;
235 }
236
237 foreach my $sid (keys %{$self->{ss}}) {
238 my $sd = $self->{ss}->{$sid};
239 my $state = $sd->{state};
240 my $target = $sd->{target}; # optional
241 if ($online_node_usage->contains_node($sd->{node})) {
242 if (
243 $state eq 'started' || $state eq 'request_stop' || $state eq 'fence' ||
244 $state eq 'freeze' || $state eq 'error' || $state eq 'recovery'
245 ) {
246 $online_node_usage->add_service_usage_to_node($sd->{node}, $sid, $sd->{node});
247 } elsif (($state eq 'migrate') || ($state eq 'relocate')) {
248 my $source = $sd->{node};
249 # count it for both, source and target as load is put on both
250 $online_node_usage->add_service_usage_to_node($source, $sid, $source, $target);
251 $online_node_usage->add_service_usage_to_node($target, $sid, $source, $target);
252 } elsif ($state eq 'stopped') {
253 # do nothing
254 } else {
255 die "should not be reached (sid = '$sid', state = '$state')";
256 }
257 } elsif (defined($target) && $online_node_usage->contains_node($target)) {
258 if ($state eq 'migrate' || $state eq 'relocate') {
259 # to correctly track maintenance modi and also consider the target as used for the
260 # case a node dies, as we cannot really know if the to-be-aborted incoming migration
261 # has already cleaned up all used resources
262 $online_node_usage->add_service_usage_to_node($target, $sid, $sd->{node}, $target);
263 }
264 }
265 }
266
267 $self->{online_node_usage} = $online_node_usage;
268 }
269
270 my $change_service_state = sub {
271 my ($self, $sid, $new_state, %params) = @_;
272
273 my ($haenv, $ss) = ($self->{haenv}, $self->{ss});
274
275 my $sd = $ss->{$sid} || die "no such service '$sid";
276
277 my $old_state = $sd->{state};
278 my $old_node = $sd->{node};
279 my $old_failed_nodes = $sd->{failed_nodes};
280 my $old_maintenance_node = $sd->{maintenance_node};
281
282 die "no state change" if $old_state eq $new_state; # just to be sure
283
284 die "invalid CRM service state '$new_state'\n" if !$valid_service_states->{$new_state};
285
286 foreach my $k (keys %$sd) { delete $sd->{$k}; };
287
288 $sd->{state} = $new_state;
289 $sd->{node} = $old_node;
290 $sd->{failed_nodes} = $old_failed_nodes if defined($old_failed_nodes);
291 $sd->{maintenance_node} = $old_maintenance_node if defined($old_maintenance_node);
292
293 my $text_state = '';
294 foreach my $k (sort keys %params) {
295 my $v = $params{$k};
296 $text_state .= ", " if $text_state;
297 $text_state .= "$k = $v";
298 $sd->{$k} = $v;
299 }
300
301 $self->recompute_online_node_usage();
302
303 $sd->{uid} = compute_new_uuid($new_state);
304
305 $text_state = " ($text_state)" if $text_state;
306 $haenv->log('info', "service '$sid': state changed from '${old_state}'" .
307 " to '${new_state}'$text_state");
308 };
309
310 # clean up a possible bad state from a recovered service to allow its start
311 my $fence_recovery_cleanup = sub {
312 my ($self, $sid, $fenced_node) = @_;
313
314 my $haenv = $self->{haenv};
315
316 my (undef, $type, $id) = $haenv->parse_sid($sid);
317 my $plugin = PVE::HA::Resources->lookup($type);
318
319 # should not happen
320 die "unknown resource type '$type'" if !$plugin;
321
322 # locks may block recovery, cleanup those which are safe to remove after fencing,
323 # i.e., after the original node was reset and thus all it's state
324 my $removable_locks = [
325 'backup',
326 'mounted',
327 'migrate',
328 'clone',
329 'rollback',
330 'snapshot',
331 'snapshot-delete',
332 'suspending',
333 'suspended',
334 ];
335 if (my $removed_lock = $plugin->remove_locks($haenv, $id, $removable_locks, $fenced_node)) {
336 $haenv->log('warning', "removed leftover lock '$removed_lock' from recovered " .
337 "service '$sid' to allow its start.");
338 }
339 };
340
341 # read LRM status for all nodes
342 sub read_lrm_status {
343 my ($self) = @_;
344
345 my $nodes = $self->{ns}->list_nodes();
346 my $haenv = $self->{haenv};
347
348 my $results = {};
349 my $modes = {};
350 foreach my $node (@$nodes) {
351 my $lrm_status = $haenv->read_lrm_status($node);
352 $modes->{$node} = $lrm_status->{mode} || 'active';
353 foreach my $uid (keys %{$lrm_status->{results}}) {
354 next if $results->{$uid}; # should not happen
355 $results->{$uid} = $lrm_status->{results}->{$uid};
356 }
357 }
358
359 return ($results, $modes);
360 }
361
362 # read new crm commands and save them into crm master status
363 sub update_crm_commands {
364 my ($self) = @_;
365
366 my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss});
367
368 my $cmdlist = $haenv->read_crm_commands();
369
370 foreach my $cmd (split(/\n/, $cmdlist)) {
371 chomp $cmd;
372
373 if ($cmd =~ m/^(migrate|relocate)\s+(\S+)\s+(\S+)$/) {
374 my ($task, $sid, $node) = ($1, $2, $3);
375 if (my $sd = $ss->{$sid}) {
376 if (!$ns->node_is_online($node)) {
377 $haenv->log('err', "crm command error - node not online: $cmd");
378 } else {
379 if ($node eq $sd->{node}) {
380 $haenv->log('info', "ignore crm command - service already on target node: $cmd");
381 } else {
382 $haenv->log('info', "got crm command: $cmd");
383 $ss->{$sid}->{cmd} = [ $task, $node ];
384 }
385 }
386 } else {
387 $haenv->log('err', "crm command error - no such service: $cmd");
388 }
389
390 } elsif ($cmd =~ m/^stop\s+(\S+)\s+(\S+)$/) {
391 my ($sid, $timeout) = ($1, $2);
392 if (my $sd = $ss->{$sid}) {
393 $haenv->log('info', "got crm command: $cmd");
394 $ss->{$sid}->{cmd} = [ 'stop', $timeout ];
395 } else {
396 $haenv->log('err', "crm command error - no such service: $cmd");
397 }
398 } else {
399 $haenv->log('err', "unable to parse crm command: $cmd");
400 }
401 }
402
403 }
404
405 sub manage {
406 my ($self) = @_;
407
408 my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss});
409
410 my ($node_info) = $haenv->get_node_info();
411 my ($lrm_results, $lrm_modes) = $self->read_lrm_status();
412
413 $ns->update($node_info, $lrm_modes);
414
415 if (!$ns->node_is_operational($haenv->nodename())) {
416 $haenv->log('info', "master seems offline");
417 return;
418 }
419
420 my $sc = $haenv->read_service_config();
421
422 $self->{groups} = $haenv->read_group_config(); # update
423
424 # compute new service status
425
426 # add new service
427 foreach my $sid (sort keys %$sc) {
428 next if $ss->{$sid}; # already there
429 my $cd = $sc->{$sid};
430 next if $cd->{state} eq 'ignored';
431
432 $haenv->log('info', "adding new service '$sid' on node '$cd->{node}'");
433 # assume we are running to avoid relocate running service at add
434 my $state = ($cd->{state} eq 'started') ? 'started' : 'request_stop';
435 $ss->{$sid} = { state => $state, node => $cd->{node},
436 uid => compute_new_uuid('started') };
437 }
438
439 # remove stale or ignored services from manager state
440 foreach my $sid (keys %$ss) {
441 next if $sc->{$sid} && $sc->{$sid}->{state} ne 'ignored';
442
443 my $reason = defined($sc->{$sid}) ? 'ignored state requested' : 'no config';
444 $haenv->log('info', "removing stale service '$sid' ($reason)");
445
446 # remove all service related state information
447 delete $ss->{$sid};
448 }
449
450 $self->update_crm_commands();
451
452 for (;;) {
453 my $repeat = 0;
454
455 $self->recompute_online_node_usage();
456
457 foreach my $sid (sort keys %$ss) {
458 my $sd = $ss->{$sid};
459 my $cd = $sc->{$sid} || { state => 'disabled' };
460
461 my $lrm_res = $sd->{uid} ? $lrm_results->{$sd->{uid}} : undef;
462
463 my $last_state = $sd->{state};
464
465 if ($last_state eq 'stopped') {
466
467 $self->next_state_stopped($sid, $cd, $sd, $lrm_res);
468
469 } elsif ($last_state eq 'started') {
470
471 $self->next_state_started($sid, $cd, $sd, $lrm_res);
472
473 } elsif ($last_state eq 'migrate' || $last_state eq 'relocate') {
474
475 $self->next_state_migrate_relocate($sid, $cd, $sd, $lrm_res);
476
477 } elsif ($last_state eq 'fence') {
478
479 # do nothing here - wait until fenced
480
481 } elsif ($last_state eq 'recovery') {
482
483 $self->next_state_recovery($sid, $cd, $sd, $lrm_res);
484
485 } elsif ($last_state eq 'request_stop') {
486
487 $self->next_state_request_stop($sid, $cd, $sd, $lrm_res);
488
489 } elsif ($last_state eq 'freeze') {
490
491 my $lrm_mode = $sd->{node} ? $lrm_modes->{$sd->{node}} : undef;
492 # unfreeze
493 my $state = ($cd->{state} eq 'started') ? 'started' : 'request_stop';
494 &$change_service_state($self, $sid, $state)
495 if $lrm_mode && $lrm_mode eq 'active';
496
497 } elsif ($last_state eq 'error') {
498
499 $self->next_state_error($sid, $cd, $sd, $lrm_res);
500
501 } else {
502
503 die "unknown service state '$last_state'";
504 }
505
506 my $lrm_mode = $sd->{node} ? $lrm_modes->{$sd->{node}} : undef;
507 if ($lrm_mode && $lrm_mode eq 'restart') {
508 if (($sd->{state} eq 'started' || $sd->{state} eq 'stopped' ||
509 $sd->{state} eq 'request_stop')) {
510 &$change_service_state($self, $sid, 'freeze');
511 }
512 }
513
514 $repeat = 1 if $sd->{state} ne $last_state;
515 }
516
517 # handle fencing
518 my $fenced_nodes = {};
519 foreach my $sid (sort keys %$ss) {
520 my ($service_state, $service_node) = $ss->{$sid}->@{'state', 'node'};
521 next if $service_state ne 'fence';
522
523 if (!defined($fenced_nodes->{$service_node})) {
524 $fenced_nodes->{$service_node} = $ns->fence_node($service_node) || 0;
525 }
526
527 next if !$fenced_nodes->{$service_node};
528
529 # node fence was successful - recover service
530 $change_service_state->($self, $sid, 'recovery');
531 $repeat = 1; # for faster recovery execution
532 }
533
534 # Avoid that a node without services in 'fence' state (e.g., removed
535 # manually by admin) is stuck with the 'fence' node state.
536 for my $node (sort grep { !defined($fenced_nodes->{$_}) } keys $ns->{status}->%*) {
537 next if $ns->get_node_state($node) ne 'fence';
538
539 $haenv->log('notice', "node '$node' in fence state but no services to-fence! admin interference?!");
540 $repeat = 1 if $ns->fence_node($node);
541 }
542
543 last if !$repeat;
544 }
545
546 $self->flush_master_status();
547 }
548
549 # functions to compute next service states
550 # $cd: service configuration data (read only)
551 # $sd: service status data (read only)
552 #
553 # Note: use change_service_state() to alter state
554 #
555
556 sub next_state_request_stop {
557 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
558
559 my $haenv = $self->{haenv};
560 my $ns = $self->{ns};
561
562 # check result from LRM daemon
563 if ($lrm_res) {
564 my $exit_code = $lrm_res->{exit_code};
565 if ($exit_code == SUCCESS) {
566 &$change_service_state($self, $sid, 'stopped');
567 return;
568 } else {
569 $haenv->log('err', "service '$sid' stop failed (exit code $exit_code)");
570 &$change_service_state($self, $sid, 'error'); # fixme: what state?
571 return;
572 }
573 }
574
575 if ($ns->node_is_offline_delayed($sd->{node})) {
576 &$change_service_state($self, $sid, 'fence');
577 return;
578 }
579 }
580
581 sub next_state_migrate_relocate {
582 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
583
584 my $haenv = $self->{haenv};
585 my $ns = $self->{ns};
586
587 # check result from LRM daemon
588 if ($lrm_res) {
589 my $exit_code = $lrm_res->{exit_code};
590 my $req_state = $cd->{state} eq 'started' ? 'started' : 'request_stop';
591 if ($exit_code == SUCCESS) {
592 &$change_service_state($self, $sid, $req_state, node => $sd->{target});
593 return;
594 } elsif ($exit_code == EWRONG_NODE) {
595 $haenv->log('err', "service '$sid' - migration failed: service" .
596 " registered on wrong node!");
597 &$change_service_state($self, $sid, 'error');
598 } else {
599 $haenv->log('err', "service '$sid' - migration failed (exit code $exit_code)");
600 &$change_service_state($self, $sid, $req_state, node => $sd->{node});
601 return;
602 }
603 }
604
605 if ($ns->node_is_offline_delayed($sd->{node})) {
606 &$change_service_state($self, $sid, 'fence');
607 return;
608 }
609 }
610
611 sub next_state_stopped {
612 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
613
614 my $haenv = $self->{haenv};
615 my $ns = $self->{ns};
616
617 if ($sd->{node} ne $cd->{node}) {
618 # this can happen if we fence a node with active migrations
619 # hack: modify $sd (normally this should be considered read-only)
620 $haenv->log('info', "fixup service '$sid' location ($sd->{node} => $cd->{node})");
621 $sd->{node} = $cd->{node};
622 }
623
624 if ($sd->{cmd}) {
625 my $cmd = shift @{$sd->{cmd}};
626
627 if ($cmd eq 'migrate' || $cmd eq 'relocate') {
628 my $target = shift @{$sd->{cmd}};
629 if (!$ns->node_is_online($target)) {
630 $haenv->log('err', "ignore service '$sid' $cmd request - node '$target' not online");
631 } elsif ($sd->{node} eq $target) {
632 $haenv->log('info', "ignore service '$sid' $cmd request - service already on node '$target'");
633 } else {
634 &$change_service_state($self, $sid, $cmd, node => $sd->{node},
635 target => $target);
636 return;
637 }
638 } elsif ($cmd eq 'stop') {
639 $haenv->log('info', "ignore service '$sid' $cmd request - service already stopped");
640 } else {
641 $haenv->log('err', "unknown command '$cmd' for service '$sid'");
642 }
643 delete $sd->{cmd};
644 }
645
646 if ($cd->{state} eq 'disabled') {
647 # NOTE: do nothing here, the stop state is an exception as we do not
648 # process the LRM result here, thus the LRM always tries to stop the
649 # service (protection for the case no CRM is active)
650 return;
651 }
652
653 if ($ns->node_is_offline_delayed($sd->{node}) && $ns->get_node_state($sd->{node}) ne 'maintenance') {
654 &$change_service_state($self, $sid, 'fence');
655 return;
656 }
657
658 if ($cd->{state} eq 'stopped') {
659 # almost the same as 'disabled' state but the service will also get recovered
660 return;
661 }
662
663 if ($cd->{state} eq 'started') {
664 # simply mark it started, if it's on the wrong node
665 # next_state_started will fix that for us
666 &$change_service_state($self, $sid, 'started', node => $sd->{node});
667 return;
668 }
669
670 $haenv->log('err', "service '$sid' - unknown state '$cd->{state}' in service configuration");
671 }
672
673 sub record_service_failed_on_node {
674 my ($self, $sid, $node) = @_;
675
676 if (!defined($self->{ss}->{$sid}->{failed_nodes})) {
677 $self->{ss}->{$sid}->{failed_nodes} = [];
678 }
679
680 push @{$self->{ss}->{$sid}->{failed_nodes}}, $node;
681 }
682
683 sub next_state_started {
684 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
685
686 my $haenv = $self->{haenv};
687 my $master_status = $self->{ms};
688 my $ns = $self->{ns};
689
690 if (!$ns->node_is_online($sd->{node})) {
691 if ($ns->node_is_offline_delayed($sd->{node})) {
692 &$change_service_state($self, $sid, 'fence');
693 }
694 if ($ns->get_node_state($sd->{node}) ne 'maintenance') {
695 return;
696 } else {
697 # save current node as fallback for when it comes out of
698 # maintenance
699 $sd->{maintenance_node} = $sd->{node};
700 }
701 }
702
703 if ($cd->{state} eq 'disabled' || $cd->{state} eq 'stopped') {
704 &$change_service_state($self, $sid, 'request_stop');
705 return;
706 }
707
708 if ($cd->{state} eq 'started') {
709
710 if ($sd->{cmd}) {
711 my $cmd = shift @{$sd->{cmd}};
712
713 if ($cmd eq 'migrate' || $cmd eq 'relocate') {
714 my $target = shift @{$sd->{cmd}};
715 if (!$ns->node_is_online($target)) {
716 $haenv->log('err', "ignore service '$sid' $cmd request - node '$target' not online");
717 } elsif ($sd->{node} eq $target) {
718 $haenv->log('info', "ignore service '$sid' $cmd request - service already on node '$target'");
719 } else {
720 $haenv->log('info', "$cmd service '$sid' to node '$target'");
721 &$change_service_state($self, $sid, $cmd, node => $sd->{node}, target => $target);
722 }
723 } elsif ($cmd eq 'stop') {
724 my $timeout = shift @{$sd->{cmd}};
725 if ($timeout == 0) {
726 $haenv->log('info', "request immediate service hard-stop for service '$sid'");
727 } else {
728 $haenv->log('info', "request graceful stop with timeout '$timeout' for service '$sid'");
729 }
730 &$change_service_state($self, $sid, 'request_stop', timeout => $timeout);
731 $haenv->update_service_config($sid, {'state' => 'stopped'});
732 } else {
733 $haenv->log('err', "unknown command '$cmd' for service '$sid'");
734 }
735
736 delete $sd->{cmd};
737
738 } else {
739
740 my $try_next = 0;
741
742 if ($lrm_res) {
743
744 my $ec = $lrm_res->{exit_code};
745 if ($ec == SUCCESS) {
746
747 if (defined($sd->{failed_nodes})) {
748 $haenv->log('info', "relocation policy successful for '$sid' on node '$sd->{node}'," .
749 " failed nodes: " . join(', ', @{$sd->{failed_nodes}}) );
750 }
751
752 delete $sd->{failed_nodes};
753
754 # store flag to indicate successful start - only valid while state == 'started'
755 $sd->{running} = 1;
756
757 } elsif ($ec == ERROR) {
758
759 delete $sd->{running};
760
761 # apply our relocate policy if we got ERROR from the LRM
762 $self->record_service_failed_on_node($sid, $sd->{node});
763
764 if (scalar(@{$sd->{failed_nodes}}) <= $cd->{max_relocate}) {
765
766 # tell select_service_node to relocate if possible
767 $try_next = 1;
768
769 $haenv->log('warning', "starting service $sid on node".
770 " '$sd->{node}' failed, relocating service.");
771
772 } else {
773
774 $haenv->log('err', "recovery policy for service $sid " .
775 "failed, entering error state. Failed nodes: ".
776 join(', ', @{$sd->{failed_nodes}}));
777 &$change_service_state($self, $sid, 'error');
778 return;
779
780 }
781 } else {
782 $self->record_service_failed_on_node($sid, $sd->{node});
783
784 $haenv->log('err', "service '$sid' got unrecoverable error" .
785 " (exit code $ec))");
786 # we have no save way out (yet) for other errors
787 &$change_service_state($self, $sid, 'error');
788 return;
789 }
790 }
791
792 my $node = select_service_node(
793 $self->{groups},
794 $self->{online_node_usage},
795 $sid,
796 $cd,
797 $sd->{node},
798 $try_next,
799 $sd->{failed_nodes},
800 $sd->{maintenance_node},
801 );
802
803 if ($node && ($sd->{node} ne $node)) {
804 $self->{online_node_usage}->add_service_usage_to_node($node, $sid, $sd->{node});
805
806 if (defined(my $fallback = $sd->{maintenance_node})) {
807 if ($node eq $fallback) {
808 $haenv->log('info', "moving service '$sid' back to '$fallback', node came back from maintenance.");
809 delete $sd->{maintenance_node};
810 } elsif ($sd->{node} ne $fallback) {
811 $haenv->log('info', "dropping maintenance fallback node '$fallback' for '$sid'");
812 delete $sd->{maintenance_node};
813 }
814 }
815
816 if ($cd->{type} eq 'vm') {
817 $haenv->log('info', "migrate service '$sid' to node '$node' (running)");
818 &$change_service_state($self, $sid, 'migrate', node => $sd->{node}, target => $node);
819 } else {
820 $haenv->log('info', "relocate service '$sid' to node '$node'");
821 &$change_service_state($self, $sid, 'relocate', node => $sd->{node}, target => $node);
822 }
823 } else {
824 if ($try_next && !defined($node)) {
825 $haenv->log('warning', "Start Error Recovery: Tried all available " .
826 " nodes for service '$sid', retry start on current node. " .
827 "Tried nodes: " . join(', ', @{$sd->{failed_nodes}}));
828 }
829 # ensure service get started again if it went unexpected down
830 # but ensure also no LRM result gets lost
831 $sd->{uid} = compute_new_uuid($sd->{state}) if defined($lrm_res);
832 }
833 }
834
835 return;
836 }
837
838 $haenv->log('err', "service '$sid' - unknown state '$cd->{state}' in service configuration");
839 }
840
841 sub next_state_error {
842 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
843
844 my $ns = $self->{ns};
845 my $ms = $self->{ms};
846
847 if ($cd->{state} eq 'disabled') {
848 # clean up on error recovery
849 delete $sd->{failed_nodes};
850
851 &$change_service_state($self, $sid, 'stopped');
852 return;
853 }
854
855 }
856
857 # after a node was fenced this recovers the service to a new node
858 sub next_state_recovery {
859 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
860
861 my ($haenv, $ss) = ($self->{haenv}, $self->{ss});
862 my $ns = $self->{ns};
863 my $ms = $self->{ms};
864
865 if ($sd->{state} ne 'recovery') { # should not happen
866 $haenv->log('err', "cannot recover service '$sid' from fencing, wrong state '$sd->{state}'");
867 return;
868 }
869
870 my $fenced_node = $sd->{node}; # for logging purpose
871
872 $self->recompute_online_node_usage(); # we want the most current node state
873
874 my $recovery_node = select_service_node(
875 $self->{groups},
876 $self->{online_node_usage},
877 $sid,
878 $cd,
879 $sd->{node},
880 );
881
882 if ($recovery_node) {
883 my $msg = "recover service '$sid' from fenced node '$fenced_node' to node '$recovery_node'";
884 if ($recovery_node eq $fenced_node) {
885 # can happen if restriced groups and the node came up again OK
886 $msg = "recover service '$sid' to previous failed and fenced node '$fenced_node' again";
887 }
888 $haenv->log('info', "$msg");
889
890 $fence_recovery_cleanup->($self, $sid, $fenced_node);
891
892 $haenv->steal_service($sid, $sd->{node}, $recovery_node);
893 $self->{online_node_usage}->add_service_usage_to_node($recovery_node, $sid, $recovery_node);
894
895 # NOTE: $sd *is normally read-only*, fencing is the exception
896 $cd->{node} = $sd->{node} = $recovery_node;
897 my $new_state = ($cd->{state} eq 'started') ? 'started' : 'request_stop';
898 $change_service_state->($self, $sid, $new_state, node => $recovery_node);
899 } else {
900 # no possible node found, cannot recover - but retry later, as we always try to make it available
901 $haenv->log('err', "recovering service '$sid' from fenced node '$fenced_node' failed, no recovery node found");
902
903 if ($cd->{state} eq 'disabled') {
904 # allow getting a service out of recovery manually if an admin disables it.
905 delete $sd->{failed_nodes}; # clean up on recovery to stopped
906 $change_service_state->($self, $sid, 'stopped'); # must NOT go through request_stop
907 return;
908 }
909 }
910 }
911
912 1;