]> git.proxmox.com Git - pve-ha-manager.git/blob - src/PVE/HA/Manager.pm
c6e3cba3da02fecf29a0467f5aa154302d401abe
[pve-ha-manager.git] / src / PVE / HA / Manager.pm
1 package PVE::HA::Manager;
2
3 use strict;
4 use warnings;
5 use Digest::MD5 qw(md5_base64);
6
7 use PVE::Tools;
8 use PVE::HA::Tools ':exit_codes';
9 use PVE::HA::NodeStatus;
10
11 ## Variable Name & Abbreviations Convention
12 #
13 # The HA stack has some variables it uses frequently and thus abbreviates it such that it may be
14 # confusing for new readers. Here's a short list of the most common used.
15 #
16 # NOTE: variables should be assumed to be read only if not otherwise stated, only use the specific
17 # methods to re-compute/read/alter them.
18 #
19 # - $haenv -> HA environment, the main interface to the simulator/test/real world
20 # - $sid -> Service ID, unique identifier for a service, `type:vmid` is common
21 #
22 # - $ms -> Master/Manager Status, contains runtime info from the current active manager
23 # - $ns -> Node Status, hash holding online/offline status about all nodes
24 #
25 # - $ss -> Service Status, hash holding the current state (last LRM cmd result, failed starts
26 # or migrates, maintenance fallback node, for *all* services ...
27 # - $sd -> Service Data, the service status of a *single* service, iow. $ss->{$sid}
28 #
29 # - $sc -> Service Configuration, hash for all services including target state, group, ...
30 # - $sd -> Configuration Data, the service config of a *single* service, iow. $sc->{$sid}
31 #
32 # Try to avoid adding new two letter (or similar over abbreviated) names, but also don't send
33 # patches for changing above, as that set is mostly sensible and should be easy to remember once
34 # spending a bit time in the HA code base.
35
36 sub new {
37 my ($this, $haenv) = @_;
38
39 my $class = ref($this) || $this;
40
41 my $self = bless { haenv => $haenv }, $class;
42
43 my $old_ms = $haenv->read_manager_status();
44
45 # we only copy the state part of the manager which cannot be auto generated
46
47 $self->{ns} = PVE::HA::NodeStatus->new($haenv, $old_ms->{node_status} || {});
48
49 # fixme: use separate class PVE::HA::ServiceStatus
50 $self->{ss} = $old_ms->{service_status} || {};
51
52 $self->{ms} = { master_node => $haenv->nodename() };
53
54 return $self;
55 }
56
57 sub cleanup {
58 my ($self) = @_;
59
60 # todo: ?
61 }
62
63 sub flush_master_status {
64 my ($self) = @_;
65
66 my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss});
67
68 $ms->{node_status} = $ns->{status};
69 $ms->{service_status} = $ss;
70 $ms->{timestamp} = $haenv->get_time();
71
72 $haenv->write_manager_status($ms);
73 }
74
75 sub get_service_group {
76 my ($groups, $online_node_usage, $service_conf) = @_;
77
78 my $group = {};
79 # add all online nodes to default group to allow try_next when no group set
80 foreach my $node (keys %$online_node_usage) {
81 $group->{nodes}->{$node} = 1;
82 }
83
84 # overwrite default if service is bound to a specific group
85 if (my $group_id = $service_conf->{group}) {
86 $group = $groups->{ids}->{$group_id} if $groups->{ids}->{$group_id};
87 }
88
89 return $group;
90 }
91
92 # groups available nodes with their priority as group index
93 sub get_node_priority_groups {
94 my ($group, $online_node_usage) = @_;
95
96 my $pri_groups = {};
97 my $group_members = {};
98 foreach my $entry (keys %{$group->{nodes}}) {
99 my ($node, $pri) = ($entry, 0);
100 if ($entry =~ m/^(\S+):(\d+)$/) {
101 ($node, $pri) = ($1, $2);
102 }
103 next if !defined($online_node_usage->{$node}); # offline
104 $pri_groups->{$pri}->{$node} = 1;
105 $group_members->{$node} = $pri;
106 }
107
108 # add non-group members to unrestricted groups (priority -1)
109 if (!$group->{restricted}) {
110 my $pri = -1;
111 foreach my $node (keys %$online_node_usage) {
112 next if defined($group_members->{$node});
113 $pri_groups->{$pri}->{$node} = 1;
114 $group_members->{$node} = -1;
115 }
116 }
117
118 return ($pri_groups, $group_members);
119 }
120
121 sub select_service_node {
122 my ($groups, $online_node_usage, $service_conf, $current_node, $try_next, $tried_nodes, $maintenance_fallback) = @_;
123
124 my $group = get_service_group($groups, $online_node_usage, $service_conf);
125
126 my ($pri_groups, $group_members) = get_node_priority_groups($group, $online_node_usage);
127
128 my @pri_list = sort {$b <=> $a} keys %$pri_groups;
129 return undef if !scalar(@pri_list);
130
131 # stay on current node if possible (avoids random migrations)
132 if (!$try_next && $group->{nofailback} && defined($group_members->{$current_node})) {
133 return $current_node;
134 }
135
136 # select node from top priority node list
137
138 my $top_pri = $pri_list[0];
139
140 # try to avoid nodes where the service failed already if we want to relocate
141 if ($try_next) {
142 foreach my $node (@$tried_nodes) {
143 delete $pri_groups->{$top_pri}->{$node};
144 }
145 }
146
147 my @nodes = sort {
148 $online_node_usage->{$a} <=> $online_node_usage->{$b} || $a cmp $b
149 } keys %{$pri_groups->{$top_pri}};
150
151 my $found;
152 my $found_maintenance_fallback;
153 for (my $i = scalar(@nodes) - 1; $i >= 0; $i--) {
154 my $node = $nodes[$i];
155 if ($node eq $current_node) {
156 $found = $i;
157 }
158 if (defined($maintenance_fallback) && $node eq $maintenance_fallback) {
159 $found_maintenance_fallback = $i;
160 }
161 }
162
163 if (defined($found_maintenance_fallback)) {
164 return $nodes[$found_maintenance_fallback];
165 }
166
167 if ($try_next) {
168 if (defined($found) && ($found < (scalar(@nodes) - 1))) {
169 return $nodes[$found + 1];
170 } else {
171 return $nodes[0];
172 }
173 } elsif (defined($found)) {
174 return $nodes[$found];
175 } else {
176 return $nodes[0];
177 }
178 }
179
180 my $uid_counter = 0;
181
182 sub compute_new_uuid {
183 my ($state) = @_;
184
185 $uid_counter++;
186 return md5_base64($state . $$ . time() . $uid_counter);
187 }
188
189 my $valid_service_states = {
190 stopped => 1,
191 request_stop => 1,
192 started => 1,
193 fence => 1,
194 recovery => 1,
195 migrate => 1,
196 relocate => 1,
197 freeze => 1,
198 error => 1,
199 };
200
201 sub recompute_online_node_usage {
202 my ($self) = @_;
203
204 my $online_node_usage = {};
205
206 my $online_nodes = $self->{ns}->list_online_nodes();
207
208 foreach my $node (@$online_nodes) {
209 $online_node_usage->{$node} = 0;
210 }
211
212 foreach my $sid (keys %{$self->{ss}}) {
213 my $sd = $self->{ss}->{$sid};
214 my $state = $sd->{state};
215 my $target = $sd->{target}; # optional
216 if (defined($online_node_usage->{$sd->{node}})) {
217 if (
218 $state eq 'started' || $state eq 'request_stop' || $state eq 'fence' ||
219 $state eq 'freeze' || $state eq 'error' || $state eq 'recovery'
220 ) {
221 $online_node_usage->{$sd->{node}}++;
222 } elsif (($state eq 'migrate') || ($state eq 'relocate')) {
223 # count it for both, source and target as load is put on both
224 $online_node_usage->{$sd->{node}}++;
225 $online_node_usage->{$target}++;
226 } elsif ($state eq 'stopped') {
227 # do nothing
228 } else {
229 die "should not be reached (sid = '$sid', state = '$state')";
230 }
231 } elsif (defined($target) && defined($online_node_usage->{$target})) {
232 if ($state eq 'migrate' || $state eq 'relocate') {
233 # to correctly track maintenance modi and also consider the target as used for the
234 # case a node dies, as we cannot really know if the to-be-aborted incoming migration
235 # has already cleaned up all used resources
236 $online_node_usage->{$target}++;
237 }
238 }
239 }
240
241 $self->{online_node_usage} = $online_node_usage;
242 }
243
244 my $change_service_state = sub {
245 my ($self, $sid, $new_state, %params) = @_;
246
247 my ($haenv, $ss) = ($self->{haenv}, $self->{ss});
248
249 my $sd = $ss->{$sid} || die "no such service '$sid";
250
251 my $old_state = $sd->{state};
252 my $old_node = $sd->{node};
253 my $old_failed_nodes = $sd->{failed_nodes};
254 my $old_maintenance_node = $sd->{maintenance_node};
255
256 die "no state change" if $old_state eq $new_state; # just to be sure
257
258 die "invalid CRM service state '$new_state'\n" if !$valid_service_states->{$new_state};
259
260 foreach my $k (keys %$sd) { delete $sd->{$k}; };
261
262 $sd->{state} = $new_state;
263 $sd->{node} = $old_node;
264 $sd->{failed_nodes} = $old_failed_nodes if defined($old_failed_nodes);
265 $sd->{maintenance_node} = $old_maintenance_node if defined($old_maintenance_node);
266
267 my $text_state = '';
268 foreach my $k (sort keys %params) {
269 my $v = $params{$k};
270 $text_state .= ", " if $text_state;
271 $text_state .= "$k = $v";
272 $sd->{$k} = $v;
273 }
274
275 $self->recompute_online_node_usage();
276
277 $sd->{uid} = compute_new_uuid($new_state);
278
279 $text_state = " ($text_state)" if $text_state;
280 $haenv->log('info', "service '$sid': state changed from '${old_state}'" .
281 " to '${new_state}'$text_state");
282 };
283
284 # clean up a possible bad state from a recovered service to allow its start
285 my $fence_recovery_cleanup = sub {
286 my ($self, $sid, $fenced_node) = @_;
287
288 my $haenv = $self->{haenv};
289
290 my (undef, $type, $id) = $haenv->parse_sid($sid);
291 my $plugin = PVE::HA::Resources->lookup($type);
292
293 # should not happen
294 die "unknown resource type '$type'" if !$plugin;
295
296 # locks may block recovery, cleanup those which are safe to remove after fencing,
297 # i.e., after the original node was reset and thus all it's state
298 my $removable_locks = [
299 'backup',
300 'mounted',
301 'migrate',
302 'clone',
303 'rollback',
304 'snapshot',
305 'snapshot-delete',
306 'suspending',
307 'suspended',
308 ];
309 if (my $removed_lock = $plugin->remove_locks($haenv, $id, $removable_locks, $fenced_node)) {
310 $haenv->log('warning', "removed leftover lock '$removed_lock' from recovered " .
311 "service '$sid' to allow its start.");
312 }
313 };
314
315 # read LRM status for all nodes
316 sub read_lrm_status {
317 my ($self) = @_;
318
319 my $nodes = $self->{ns}->list_nodes();
320 my $haenv = $self->{haenv};
321
322 my $results = {};
323 my $modes = {};
324 foreach my $node (@$nodes) {
325 my $lrm_status = $haenv->read_lrm_status($node);
326 $modes->{$node} = $lrm_status->{mode} || 'active';
327 foreach my $uid (keys %{$lrm_status->{results}}) {
328 next if $results->{$uid}; # should not happen
329 $results->{$uid} = $lrm_status->{results}->{$uid};
330 }
331 }
332
333 return ($results, $modes);
334 }
335
336 # read new crm commands and save them into crm master status
337 sub update_crm_commands {
338 my ($self) = @_;
339
340 my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss});
341
342 my $cmdlist = $haenv->read_crm_commands();
343
344 foreach my $cmd (split(/\n/, $cmdlist)) {
345 chomp $cmd;
346
347 if ($cmd =~ m/^(migrate|relocate)\s+(\S+)\s+(\S+)$/) {
348 my ($task, $sid, $node) = ($1, $2, $3);
349 if (my $sd = $ss->{$sid}) {
350 if (!$ns->node_is_online($node)) {
351 $haenv->log('err', "crm command error - node not online: $cmd");
352 } else {
353 if ($node eq $sd->{node}) {
354 $haenv->log('info', "ignore crm command - service already on target node: $cmd");
355 } else {
356 $haenv->log('info', "got crm command: $cmd");
357 $ss->{$sid}->{cmd} = [ $task, $node ];
358 }
359 }
360 } else {
361 $haenv->log('err', "crm command error - no such service: $cmd");
362 }
363
364 } elsif ($cmd =~ m/^stop\s+(\S+)\s+(\S+)$/) {
365 my ($sid, $timeout) = ($1, $2);
366 if (my $sd = $ss->{$sid}) {
367 $haenv->log('info', "got crm command: $cmd");
368 $ss->{$sid}->{cmd} = [ 'stop', $timeout ];
369 } else {
370 $haenv->log('err', "crm command error - no such service: $cmd");
371 }
372 } else {
373 $haenv->log('err', "unable to parse crm command: $cmd");
374 }
375 }
376
377 }
378
379 sub manage {
380 my ($self) = @_;
381
382 my ($haenv, $ms, $ns, $ss) = ($self->{haenv}, $self->{ms}, $self->{ns}, $self->{ss});
383
384 my ($node_info) = $haenv->get_node_info();
385 my ($lrm_results, $lrm_modes) = $self->read_lrm_status();
386
387 $ns->update($node_info, $lrm_modes);
388
389 if (!$ns->node_is_operational($haenv->nodename())) {
390 $haenv->log('info', "master seems offline");
391 return;
392 }
393
394 my $sc = $haenv->read_service_config();
395
396 $self->{groups} = $haenv->read_group_config(); # update
397
398 # compute new service status
399
400 # add new service
401 foreach my $sid (sort keys %$sc) {
402 next if $ss->{$sid}; # already there
403 my $cd = $sc->{$sid};
404 next if $cd->{state} eq 'ignored';
405
406 $haenv->log('info', "adding new service '$sid' on node '$cd->{node}'");
407 # assume we are running to avoid relocate running service at add
408 my $state = ($cd->{state} eq 'started') ? 'started' : 'request_stop';
409 $ss->{$sid} = { state => $state, node => $cd->{node},
410 uid => compute_new_uuid('started') };
411 }
412
413 # remove stale or ignored services from manager state
414 foreach my $sid (keys %$ss) {
415 next if $sc->{$sid} && $sc->{$sid}->{state} ne 'ignored';
416
417 my $reason = defined($sc->{$sid}) ? 'ignored state requested' : 'no config';
418 $haenv->log('info', "removing stale service '$sid' ($reason)");
419
420 # remove all service related state information
421 delete $ss->{$sid};
422 }
423
424 $self->update_crm_commands();
425
426 for (;;) {
427 my $repeat = 0;
428
429 $self->recompute_online_node_usage();
430
431 foreach my $sid (sort keys %$ss) {
432 my $sd = $ss->{$sid};
433 my $cd = $sc->{$sid} || { state => 'disabled' };
434
435 my $lrm_res = $sd->{uid} ? $lrm_results->{$sd->{uid}} : undef;
436
437 my $last_state = $sd->{state};
438
439 if ($last_state eq 'stopped') {
440
441 $self->next_state_stopped($sid, $cd, $sd, $lrm_res);
442
443 } elsif ($last_state eq 'started') {
444
445 $self->next_state_started($sid, $cd, $sd, $lrm_res);
446
447 } elsif ($last_state eq 'migrate' || $last_state eq 'relocate') {
448
449 $self->next_state_migrate_relocate($sid, $cd, $sd, $lrm_res);
450
451 } elsif ($last_state eq 'fence') {
452
453 # do nothing here - wait until fenced
454
455 } elsif ($last_state eq 'recovery') {
456
457 $self->next_state_recovery($sid, $cd, $sd, $lrm_res);
458
459 } elsif ($last_state eq 'request_stop') {
460
461 $self->next_state_request_stop($sid, $cd, $sd, $lrm_res);
462
463 } elsif ($last_state eq 'freeze') {
464
465 my $lrm_mode = $sd->{node} ? $lrm_modes->{$sd->{node}} : undef;
466 # unfreeze
467 my $state = ($cd->{state} eq 'started') ? 'started' : 'request_stop';
468 &$change_service_state($self, $sid, $state)
469 if $lrm_mode && $lrm_mode eq 'active';
470
471 } elsif ($last_state eq 'error') {
472
473 $self->next_state_error($sid, $cd, $sd, $lrm_res);
474
475 } else {
476
477 die "unknown service state '$last_state'";
478 }
479
480 my $lrm_mode = $sd->{node} ? $lrm_modes->{$sd->{node}} : undef;
481 if ($lrm_mode && $lrm_mode eq 'restart') {
482 if (($sd->{state} eq 'started' || $sd->{state} eq 'stopped' ||
483 $sd->{state} eq 'request_stop')) {
484 &$change_service_state($self, $sid, 'freeze');
485 }
486 }
487
488 $repeat = 1 if $sd->{state} ne $last_state;
489 }
490
491 # handle fencing
492 my $fenced_nodes = {};
493 foreach my $sid (sort keys %$ss) {
494 my ($service_state, $service_node) = $ss->{$sid}->@{'state', 'node'};
495 next if $service_state ne 'fence';
496
497 if (!defined($fenced_nodes->{$service_node})) {
498 $fenced_nodes->{$service_node} = $ns->fence_node($service_node) || 0;
499 }
500
501 next if !$fenced_nodes->{$service_node};
502
503 # node fence was successful - recover service
504 $change_service_state->($self, $sid, 'recovery');
505 $repeat = 1; # for faster recovery execution
506 }
507
508 # Avoid that a node without services in 'fence' state (e.g., removed
509 # manually by admin) is stuck with the 'fence' node state.
510 for my $node (sort grep { !defined($fenced_nodes->{$_}) } keys $ns->{status}->%*) {
511 next if $ns->get_node_state($node) ne 'fence';
512
513 $haenv->log('notice', "node '$node' in fence state but no services to-fence! admin interference?!");
514 $repeat = 1 if $ns->fence_node($node);
515 }
516
517 last if !$repeat;
518 }
519
520 $self->flush_master_status();
521 }
522
523 # functions to compute next service states
524 # $cd: service configuration data (read only)
525 # $sd: service status data (read only)
526 #
527 # Note: use change_service_state() to alter state
528 #
529
530 sub next_state_request_stop {
531 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
532
533 my $haenv = $self->{haenv};
534 my $ns = $self->{ns};
535
536 # check result from LRM daemon
537 if ($lrm_res) {
538 my $exit_code = $lrm_res->{exit_code};
539 if ($exit_code == SUCCESS) {
540 &$change_service_state($self, $sid, 'stopped');
541 return;
542 } else {
543 $haenv->log('err', "service '$sid' stop failed (exit code $exit_code)");
544 &$change_service_state($self, $sid, 'error'); # fixme: what state?
545 return;
546 }
547 }
548
549 if ($ns->node_is_offline_delayed($sd->{node})) {
550 &$change_service_state($self, $sid, 'fence');
551 return;
552 }
553 }
554
555 sub next_state_migrate_relocate {
556 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
557
558 my $haenv = $self->{haenv};
559 my $ns = $self->{ns};
560
561 # check result from LRM daemon
562 if ($lrm_res) {
563 my $exit_code = $lrm_res->{exit_code};
564 my $req_state = $cd->{state} eq 'started' ? 'started' : 'request_stop';
565 if ($exit_code == SUCCESS) {
566 &$change_service_state($self, $sid, $req_state, node => $sd->{target});
567 return;
568 } elsif ($exit_code == EWRONG_NODE) {
569 $haenv->log('err', "service '$sid' - migration failed: service" .
570 " registered on wrong node!");
571 &$change_service_state($self, $sid, 'error');
572 } else {
573 $haenv->log('err', "service '$sid' - migration failed (exit code $exit_code)");
574 &$change_service_state($self, $sid, $req_state, node => $sd->{node});
575 return;
576 }
577 }
578
579 if ($ns->node_is_offline_delayed($sd->{node})) {
580 &$change_service_state($self, $sid, 'fence');
581 return;
582 }
583 }
584
585 sub next_state_stopped {
586 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
587
588 my $haenv = $self->{haenv};
589 my $ns = $self->{ns};
590
591 if ($sd->{node} ne $cd->{node}) {
592 # this can happen if we fence a node with active migrations
593 # hack: modify $sd (normally this should be considered read-only)
594 $haenv->log('info', "fixup service '$sid' location ($sd->{node} => $cd->{node})");
595 $sd->{node} = $cd->{node};
596 }
597
598 if ($sd->{cmd}) {
599 my $cmd = shift @{$sd->{cmd}};
600
601 if ($cmd eq 'migrate' || $cmd eq 'relocate') {
602 my $target = shift @{$sd->{cmd}};
603 if (!$ns->node_is_online($target)) {
604 $haenv->log('err', "ignore service '$sid' $cmd request - node '$target' not online");
605 } elsif ($sd->{node} eq $target) {
606 $haenv->log('info', "ignore service '$sid' $cmd request - service already on node '$target'");
607 } else {
608 &$change_service_state($self, $sid, $cmd, node => $sd->{node},
609 target => $target);
610 return;
611 }
612 } elsif ($cmd eq 'stop') {
613 $haenv->log('info', "ignore service '$sid' $cmd request - service already stopped");
614 } else {
615 $haenv->log('err', "unknown command '$cmd' for service '$sid'");
616 }
617 delete $sd->{cmd};
618 }
619
620 if ($cd->{state} eq 'disabled') {
621 # NOTE: do nothing here, the stop state is an exception as we do not
622 # process the LRM result here, thus the LRM always tries to stop the
623 # service (protection for the case no CRM is active)
624 return;
625 }
626
627 if ($ns->node_is_offline_delayed($sd->{node}) && $ns->get_node_state($sd->{node}) ne 'maintenance') {
628 &$change_service_state($self, $sid, 'fence');
629 return;
630 }
631
632 if ($cd->{state} eq 'stopped') {
633 # almost the same as 'disabled' state but the service will also get recovered
634 return;
635 }
636
637 if ($cd->{state} eq 'started') {
638 # simply mark it started, if it's on the wrong node
639 # next_state_started will fix that for us
640 &$change_service_state($self, $sid, 'started', node => $sd->{node});
641 return;
642 }
643
644 $haenv->log('err', "service '$sid' - unknown state '$cd->{state}' in service configuration");
645 }
646
647 sub record_service_failed_on_node {
648 my ($self, $sid, $node) = @_;
649
650 if (!defined($self->{ss}->{$sid}->{failed_nodes})) {
651 $self->{ss}->{$sid}->{failed_nodes} = [];
652 }
653
654 push @{$self->{ss}->{$sid}->{failed_nodes}}, $node;
655 }
656
657 sub next_state_started {
658 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
659
660 my $haenv = $self->{haenv};
661 my $master_status = $self->{ms};
662 my $ns = $self->{ns};
663
664 if (!$ns->node_is_online($sd->{node})) {
665 if ($ns->node_is_offline_delayed($sd->{node})) {
666 &$change_service_state($self, $sid, 'fence');
667 }
668 if ($ns->get_node_state($sd->{node}) ne 'maintenance') {
669 return;
670 } else {
671 # save current node as fallback for when it comes out of
672 # maintenance
673 $sd->{maintenance_node} = $sd->{node};
674 }
675 }
676
677 if ($cd->{state} eq 'disabled' || $cd->{state} eq 'stopped') {
678 &$change_service_state($self, $sid, 'request_stop');
679 return;
680 }
681
682 if ($cd->{state} eq 'started') {
683
684 if ($sd->{cmd}) {
685 my $cmd = shift @{$sd->{cmd}};
686
687 if ($cmd eq 'migrate' || $cmd eq 'relocate') {
688 my $target = shift @{$sd->{cmd}};
689 if (!$ns->node_is_online($target)) {
690 $haenv->log('err', "ignore service '$sid' $cmd request - node '$target' not online");
691 } elsif ($sd->{node} eq $target) {
692 $haenv->log('info', "ignore service '$sid' $cmd request - service already on node '$target'");
693 } else {
694 $haenv->log('info', "$cmd service '$sid' to node '$target'");
695 &$change_service_state($self, $sid, $cmd, node => $sd->{node}, target => $target);
696 }
697 } elsif ($cmd eq 'stop') {
698 my $timeout = shift @{$sd->{cmd}};
699 if ($timeout == 0) {
700 $haenv->log('info', "request immediate service hard-stop for service '$sid'");
701 } else {
702 $haenv->log('info', "request graceful stop with timeout '$timeout' for service '$sid'");
703 }
704 &$change_service_state($self, $sid, 'request_stop', timeout => $timeout);
705 $haenv->update_service_config($sid, {'state' => 'stopped'});
706 } else {
707 $haenv->log('err', "unknown command '$cmd' for service '$sid'");
708 }
709
710 delete $sd->{cmd};
711
712 } else {
713
714 my $try_next = 0;
715
716 if ($lrm_res) {
717
718 my $ec = $lrm_res->{exit_code};
719 if ($ec == SUCCESS) {
720
721 if (defined($sd->{failed_nodes})) {
722 $haenv->log('info', "relocation policy successful for '$sid' on node '$sd->{node}'," .
723 " failed nodes: " . join(', ', @{$sd->{failed_nodes}}) );
724 }
725
726 delete $sd->{failed_nodes};
727
728 # store flag to indicate successful start - only valid while state == 'started'
729 $sd->{running} = 1;
730
731 } elsif ($ec == ERROR) {
732
733 delete $sd->{running};
734
735 # apply our relocate policy if we got ERROR from the LRM
736 $self->record_service_failed_on_node($sid, $sd->{node});
737
738 if (scalar(@{$sd->{failed_nodes}}) <= $cd->{max_relocate}) {
739
740 # tell select_service_node to relocate if possible
741 $try_next = 1;
742
743 $haenv->log('warning', "starting service $sid on node".
744 " '$sd->{node}' failed, relocating service.");
745
746 } else {
747
748 $haenv->log('err', "recovery policy for service $sid " .
749 "failed, entering error state. Failed nodes: ".
750 join(', ', @{$sd->{failed_nodes}}));
751 &$change_service_state($self, $sid, 'error');
752 return;
753
754 }
755 } else {
756 $self->record_service_failed_on_node($sid, $sd->{node});
757
758 $haenv->log('err', "service '$sid' got unrecoverable error" .
759 " (exit code $ec))");
760 # we have no save way out (yet) for other errors
761 &$change_service_state($self, $sid, 'error');
762 return;
763 }
764 }
765
766 my $node = select_service_node(
767 $self->{groups},
768 $self->{online_node_usage},
769 $cd,
770 $sd->{node},
771 $try_next,
772 $sd->{failed_nodes},
773 $sd->{maintenance_node},
774 );
775
776 if ($node && ($sd->{node} ne $node)) {
777 $self->{online_node_usage}->{$node}++;
778
779 if (defined(my $fallback = $sd->{maintenance_node})) {
780 if ($node eq $fallback) {
781 $haenv->log('info', "moving service '$sid' back to '$fallback', node came back from maintenance.");
782 delete $sd->{maintenance_node};
783 } elsif ($sd->{node} ne $fallback) {
784 $haenv->log('info', "dropping maintenance fallback node '$fallback' for '$sid'");
785 delete $sd->{maintenance_node};
786 }
787 }
788
789 if ($cd->{type} eq 'vm') {
790 $haenv->log('info', "migrate service '$sid' to node '$node' (running)");
791 &$change_service_state($self, $sid, 'migrate', node => $sd->{node}, target => $node);
792 } else {
793 $haenv->log('info', "relocate service '$sid' to node '$node'");
794 &$change_service_state($self, $sid, 'relocate', node => $sd->{node}, target => $node);
795 }
796 } else {
797 if ($try_next && !defined($node)) {
798 $haenv->log('warning', "Start Error Recovery: Tried all available " .
799 " nodes for service '$sid', retry start on current node. " .
800 "Tried nodes: " . join(', ', @{$sd->{failed_nodes}}));
801 }
802 # ensure service get started again if it went unexpected down
803 # but ensure also no LRM result gets lost
804 $sd->{uid} = compute_new_uuid($sd->{state}) if defined($lrm_res);
805 }
806 }
807
808 return;
809 }
810
811 $haenv->log('err', "service '$sid' - unknown state '$cd->{state}' in service configuration");
812 }
813
814 sub next_state_error {
815 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
816
817 my $ns = $self->{ns};
818 my $ms = $self->{ms};
819
820 if ($cd->{state} eq 'disabled') {
821 # clean up on error recovery
822 delete $sd->{failed_nodes};
823
824 &$change_service_state($self, $sid, 'stopped');
825 return;
826 }
827
828 }
829
830 # after a node was fenced this recovers the service to a new node
831 sub next_state_recovery {
832 my ($self, $sid, $cd, $sd, $lrm_res) = @_;
833
834 my ($haenv, $ss) = ($self->{haenv}, $self->{ss});
835 my $ns = $self->{ns};
836 my $ms = $self->{ms};
837
838 if ($sd->{state} ne 'recovery') { # should not happen
839 $haenv->log('err', "cannot recover service '$sid' from fencing, wrong state '$sd->{state}'");
840 return;
841 }
842
843 my $fenced_node = $sd->{node}; # for logging purpose
844
845 $self->recompute_online_node_usage(); # we want the most current node state
846
847 my $recovery_node = select_service_node(
848 $self->{groups},
849 $self->{online_node_usage},
850 $cd,
851 $sd->{node},
852 );
853
854 if ($recovery_node) {
855 my $msg = "recover service '$sid' from fenced node '$fenced_node' to node '$recovery_node'";
856 if ($recovery_node eq $fenced_node) {
857 # can happen if restriced groups and the node came up again OK
858 $msg = "recover service '$sid' to previous failed and fenced node '$fenced_node' again";
859 }
860 $haenv->log('info', "$msg");
861
862 $fence_recovery_cleanup->($self, $sid, $fenced_node);
863
864 $haenv->steal_service($sid, $sd->{node}, $recovery_node);
865 $self->{online_node_usage}->{$recovery_node}++;
866
867 # NOTE: $sd *is normally read-only*, fencing is the exception
868 $cd->{node} = $sd->{node} = $recovery_node;
869 my $new_state = ($cd->{state} eq 'started') ? 'started' : 'request_stop';
870 $change_service_state->($self, $sid, $new_state, node => $recovery_node);
871 } else {
872 # no possible node found, cannot recover - but retry later, as we always try to make it available
873 $haenv->log('err', "recovering service '$sid' from fenced node '$fenced_node' failed, no recovery node found");
874
875 if ($cd->{state} eq 'disabled') {
876 # allow getting a service out of recovery manually if an admin disables it.
877 delete $sd->{failed_nodes}; # clean up on recovery to stopped
878 $change_service_state->($self, $sid, 'stopped'); # must NOT go through request_stop
879 return;
880 }
881 }
882 }
883
884 1;