3 # Local Resource Manager
7 use POSIX
qw(:sys_wait_h);
11 use PVE
::HA
::Tools
':exit_codes';
12 use PVE
::HA
::Resources
;
14 # Server can have several states:
17 wait_for_agent_lock
=> "waiting for agent lock",
18 active
=> "got agent_lock",
19 maintenance
=> "going into maintenance",
20 lost_agent_lock
=> "lost agent_lock",
24 my ($this, $haenv) = @_;
26 my $class = ref($this) || $this;
30 status
=> { state => 'startup' },
34 shutdown_request
=> 0,
36 # mode can be: active, reboot, shutdown, restart
38 cluster_state_update
=> 0,
41 $self->set_local_status({ state => 'wait_for_agent_lock' });
46 sub shutdown_request
{
49 return if $self->{shutdown_request
}; # already in shutdown mode
51 my $haenv = $self->{haenv
};
53 my $nodename = $haenv->nodename();
55 my ($shutdown, $reboot) = $haenv->is_node_shutdown();
57 my $dc_ha_cfg = $haenv->get_ha_settings();
58 my $shutdown_policy = $dc_ha_cfg->{shutdown_policy
} // 'conditional';
60 if ($shutdown) { # don't log this on service restart, only on node shutdown
61 $haenv->log('info', "got shutdown request with shutdown policy '$shutdown_policy'");
66 if ($shutdown_policy eq 'conditional') {
67 $freeze_all = $reboot;
68 } elsif ($shutdown_policy eq 'freeze') {
70 } elsif ($shutdown_policy eq 'failover') {
72 } elsif ($shutdown_policy eq 'migrate') {
75 $haenv->log('err', "unknown shutdown policy '$shutdown_policy', fall back to conditional");
76 $freeze_all = $reboot;
80 # we get marked as unaivalable by the manager, then all services will
81 # be migrated away, we'll still have the same "can we exit" clause than
82 # a normal shutdown -> no running service on this node
83 # FIXME: after X minutes, add shutdown command for remaining services,
84 # e.g., if they have no alternative node???
86 # *always* queue stop jobs for all services if the node shuts down,
87 # independent if it's a reboot or a poweroff, else we may corrupt
88 # services or hinder node shutdown
89 my $ss = $self->{service_status
};
91 foreach my $sid (keys %$ss) {
94 next if $sd->{node
} ne $nodename;
95 # Note: use undef uid to mark shutdown/stop jobs
96 $self->queue_resource_command($sid, undef, 'request_stop');
101 my $shutdown_type = $reboot ?
'reboot' : 'shutdown';
103 $haenv->log('info', "$shutdown_type LRM, doing maintenance, removing this node from active list");
104 $self->{mode
} = 'maintenance';
105 } elsif ($freeze_all) {
106 $haenv->log('info', "$shutdown_type LRM, stop and freeze all services");
107 $self->{mode
} = 'restart';
109 $haenv->log('info', "shutdown LRM, stop all services");
110 $self->{mode
} = 'shutdown';
113 $haenv->log('info', "restart LRM, freeze all services");
114 $self->{mode
} = 'restart';
117 $self->{shutdown_request
} = $haenv->get_time();
119 eval { $self->update_lrm_status() or die "not quorate?\n"; };
121 $self->log('err', "unable to update lrm status file - $err");
125 sub get_local_status
{
128 return $self->{status
};
131 sub set_local_status
{
132 my ($self, $new) = @_;
134 die "invalid state '$new->{state}'" if !$valid_states->{$new->{state}};
136 my $haenv = $self->{haenv
};
138 my $old = $self->{status
};
140 # important: only update if if really changed
141 return if $old->{state} eq $new->{state};
143 $haenv->log('info', "status change $old->{state} => $new->{state}");
145 $new->{state_change_time
} = $haenv->get_time();
147 $self->{status
} = $new;
150 sub update_lrm_status
{
153 my $haenv = $self->{haenv
};
155 return 0 if !$haenv->quorate();
158 state => $self->{status
}->{state},
159 mode
=> $self->{mode
},
160 results
=> $self->{results
},
161 timestamp
=> $haenv->get_time(),
164 eval { $haenv->write_lrm_status($lrm_status); };
166 $haenv->log('err', "unable to write lrm status file - $err");
173 sub update_service_status
{
176 my $haenv = $self->{haenv
};
178 my $ms = eval { $haenv->read_manager_status(); };
180 $haenv->log('err', "updating service status from manager failed: $err");
183 $self->{service_status
} = $ms->{service_status
} || {};
188 sub get_protected_ha_agent_lock
{
191 my $haenv = $self->{haenv
};
194 my $starttime = $haenv->get_time();
198 if ($haenv->get_ha_agent_lock()) {
199 if ($self->{ha_agent_wd
}) {
200 $haenv->watchdog_update($self->{ha_agent_wd
});
202 my $wfh = $haenv->watchdog_open();
203 $self->{ha_agent_wd
} = $wfh;
208 last if ++$count > 5; # try max 5 time
210 my $delay = $haenv->get_time() - $starttime;
211 last if $delay > 5; # for max 5 seconds
219 sub active_service_count
{
222 my $haenv = $self->{haenv
};
224 my $nodename = $haenv->nodename();
226 my $ss = $self->{service_status
};
230 foreach my $sid (keys %$ss) {
231 my $sd = $ss->{$sid};
232 next if !$sd->{node
};
233 next if $sd->{node
} ne $nodename;
234 my $req_state = $sd->{state};
235 next if !defined($req_state);
236 next if $req_state eq 'stopped';
237 next if $req_state eq 'freeze';
238 # erroneous services are not managed by HA, don't count them as active
239 next if $req_state eq 'error';
247 my $wrote_lrm_status_at_startup = 0;
249 sub do_one_iteration
{
252 my $haenv = $self->{haenv
};
254 $haenv->loop_start_hook();
256 $self->{cluster_state_update
} = $haenv->cluster_state_update();
258 my $res = $self->work();
260 $haenv->loop_end_hook();
268 my $haenv = $self->{haenv
};
270 if (!$wrote_lrm_status_at_startup) {
271 if ($self->update_lrm_status()) {
272 $wrote_lrm_status_at_startup = 1;
276 return $self->{shutdown_request
} ?
0 : 1;
280 my $status = $self->get_local_status();
281 my $state = $status->{state};
283 $self->update_service_status();
285 my $fence_request = PVE
::HA
::Tools
::count_fenced_services
($self->{service_status
}, $haenv->nodename());
287 # do state changes first
289 my $ctime = $haenv->get_time();
291 if ($state eq 'wait_for_agent_lock') {
293 my $service_count = $self->active_service_count();
295 if (!$fence_request && $service_count && $haenv->quorate()) {
296 if ($self->get_protected_ha_agent_lock()) {
297 $self->set_local_status({ state => 'active' });
301 } elsif ($state eq 'lost_agent_lock') {
303 if (!$fence_request && $haenv->quorate()) {
304 if ($self->get_protected_ha_agent_lock()) {
305 $self->set_local_status({ state => 'active' });
309 } elsif ($state eq 'active') {
311 if ($fence_request) {
312 $haenv->log('err', "node need to be fenced - releasing agent_lock\n");
313 $self->set_local_status({ state => 'lost_agent_lock'});
314 } elsif (!$self->get_protected_ha_agent_lock()) {
315 $self->set_local_status({ state => 'lost_agent_lock'});
316 } elsif ($self->{mode
} eq 'maintenance') {
317 $self->set_local_status({ state => 'maintenance'});
319 } elsif ($state eq 'maintenance') {
321 if ($fence_request) {
322 $haenv->log('err', "node need to be fenced during maintenance mode - releasing agent_lock\n");
323 $self->set_local_status({ state => 'lost_agent_lock'});
324 } elsif (!$self->get_protected_ha_agent_lock()) {
325 $self->set_local_status({ state => 'lost_agent_lock'});
329 $status = $self->get_local_status();
330 $state = $status->{state};
334 if ($state eq 'wait_for_agent_lock') {
336 return 0 if $self->{shutdown_request
};
338 $self->update_lrm_status();
342 } elsif ($state eq 'active') {
344 my $startime = $haenv->get_time();
350 # do work (max_time seconds)
352 # fixme: set alert timer
354 # if we could not get the current service status there's no point
355 # in doing anything, try again next round.
356 return if !$self->update_service_status();
358 if ($self->{shutdown_request
}) {
360 if ($self->{mode
} eq 'restart') {
362 my $service_count = $self->active_service_count();
364 if ($service_count == 0) {
366 if ($self->run_workers() == 0) {
367 if ($self->{ha_agent_wd
}) {
368 $haenv->watchdog_close($self->{ha_agent_wd
});
369 delete $self->{ha_agent_wd
};
374 # restart with no or freezed services, release the lock
375 $haenv->release_ha_agent_lock();
380 if ($self->run_workers() == 0) {
381 if ($self->{shutdown_errors
} == 0) {
382 if ($self->{ha_agent_wd
}) {
383 $haenv->watchdog_close($self->{ha_agent_wd
});
384 delete $self->{ha_agent_wd
};
387 # shutdown with all services stopped thus release the lock
388 $haenv->release_ha_agent_lock();
395 if (!$self->{cluster_state_update
}) {
396 # update failed but we could still renew our lock (cfs restart?),
397 # safely skip manage and expect to update just fine next round
398 $haenv->log('notice', "temporary inconsistent cluster state " .
399 "(cfs restart?), skip round");
403 $self->manage_resources();
408 $haenv->log('err', "got unexpected error - $err");
411 $self->update_lrm_status();
413 return 0 if $shutdown;
415 $haenv->sleep_until($startime + $max_time);
417 } elsif ($state eq 'lost_agent_lock') {
419 # Note: watchdog is active an will triger soon!
421 # so we hope to get the lock back soon!
423 if ($self->{shutdown_request
}) {
425 my $service_count = $self->active_service_count();
427 if ($service_count > 0) {
428 $haenv->log('err', "get shutdown request in state 'lost_agent_lock' - " .
429 "detected $service_count running services");
431 if ($self->{mode
} eq 'restart') {
432 my $state_mt = $self->{status
}->{state_change_time
};
434 # watchdog should have already triggered, so either it's set
435 # set to noboot or it failed. As we are in restart mode, and
436 # have infinity stoptimeout -> exit now - we don't touch services
437 # or change state, so this is save, relatively speaking
438 if (($haenv->get_time() - $state_mt) > 90) {
439 $haenv->log('err', "lost agent lock and restart request for over 90 seconds - giving up!");
445 # all services are stopped, so we can close the watchdog
447 if ($self->{ha_agent_wd
}) {
448 $haenv->watchdog_close($self->{ha_agent_wd
});
449 delete $self->{ha_agent_wd
};
458 } elsif ($state eq 'maintenance') {
460 my $startime = $haenv->get_time();
461 return if !$self->update_service_status();
463 # wait until all active services moved away
464 my $service_count = $self->active_service_count();
468 if ($self->{shutdown_request
}) {
469 if ($service_count == 0 && $self->run_workers() == 0) {
470 if ($self->{ha_agent_wd
}) {
471 $haenv->watchdog_close($self->{ha_agent_wd
});
472 delete $self->{ha_agent_wd
};
477 # restart with no or freezed services, release the lock
478 $haenv->release_ha_agent_lock();
482 $self->manage_resources() if !$exit_lrm;
484 $self->update_lrm_status();
486 return 0 if $exit_lrm;
488 $haenv->sleep_until($startime + 5);
492 die "got unexpected status '$state'\n";
502 my $haenv = $self->{haenv
};
504 my $starttime = $haenv->get_time();
506 # number of workers to start, if 0 we exec the command directly witouth forking
507 my $max_workers = $haenv->get_max_workers();
509 my $sc = $haenv->read_service_config();
511 while (($haenv->get_time() - $starttime) < 5) {
512 my $count = $self->check_active_workers();
514 foreach my $sid (sort keys %{$self->{workers
}}) {
515 last if $count >= $max_workers && $max_workers > 0;
517 my $w = $self->{workers
}->{$sid};
519 # only fork if we may else call exec_resource_agent
520 # directly (e.g. for regression tests)
521 if ($max_workers > 0) {
523 if (!defined($pid)) {
524 $haenv->log('err', "fork worker failed");
525 $count = 0; last; # abort, try later
526 } elsif ($pid == 0) {
527 $haenv->after_fork(); # cleanup
532 $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{params
});
535 $haenv->log('err', $err);
546 $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{params
});
547 $res = $res << 8 if $res > 0;
550 $haenv->log('err', $err);
552 if (defined($w->{uid
})) {
553 $self->resource_command_finished($sid, $w->{uid
}, $res);
555 $self->stop_command_finished($sid, $res);
566 return scalar(keys %{$self->{workers
}});
569 sub manage_resources
{
572 my $haenv = $self->{haenv
};
574 my $nodename = $haenv->nodename();
576 my $ss = $self->{service_status
};
578 foreach my $sid (keys %{$self->{restart_tries
}}) {
579 delete $self->{restart_tries
}->{$sid} if !$ss->{$sid};
582 foreach my $sid (keys %$ss) {
583 my $sd = $ss->{$sid};
584 next if !$sd->{node
};
586 next if $sd->{node
} ne $nodename;
587 my $req_state = $sd->{state};
588 next if !defined($req_state);
589 next if $req_state eq 'freeze';
590 $self->queue_resource_command($sid, $sd->{uid
}, $req_state, {'target' => $sd->{target
}, 'timeout' => $sd->{timeout
}});
593 return $self->run_workers();
596 sub queue_resource_command
{
597 my ($self, $sid, $uid, $state, $params) = @_;
599 # do not queue the excatly same command twice as this may lead to
600 # an inconsistent HA state when the first command fails but the CRM
601 # does not process its failure right away and the LRM starts a second
602 # try, without the CRM knowing of it (race condition)
603 # The 'stopped' command is an exception as we do not process its result
604 # in the CRM and we want to execute it always (even with no active CRM)
605 return if $state ne 'stopped' && $uid && defined($self->{results
}->{$uid});
607 if (my $w = $self->{workers
}->{$sid}) {
608 return if $w->{pid
}; # already started
609 # else, delete and overwrite queue entry with new command
610 delete $self->{workers
}->{$sid};
613 $self->{workers
}->{$sid} = {
619 $self->{workers
}->{$sid}->{params
} = $params if $params;
622 sub check_active_workers
{
625 # finish/count workers
627 foreach my $sid (keys %{$self->{workers
}}) {
628 my $w = $self->{workers
}->{$sid};
629 if (my $pid = $w->{pid
}) {
631 my $waitpid = waitpid($pid, WNOHANG
);
632 if (defined($waitpid) && ($waitpid == $pid)) {
633 if (defined($w->{uid
})) {
634 $self->resource_command_finished($sid, $w->{uid
}, $?);
636 $self->stop_command_finished($sid, $?);
647 sub stop_command_finished
{
648 my ($self, $sid, $status) = @_;
650 my $haenv = $self->{haenv
};
652 my $w = delete $self->{workers
}->{$sid};
653 return if !$w; # should not happen
658 $haenv->log('err', "resource agent $sid finished - failed to execute");
659 } elsif (my $sig = ($status & 127)) {
660 $haenv->log('err', "resource agent $sid finished - got signal $sig");
662 $exit_code = ($status >> 8);
665 if ($exit_code != 0) {
666 $self->{shutdown_errors
}++;
670 sub resource_command_finished
{
671 my ($self, $sid, $uid, $status) = @_;
673 my $haenv = $self->{haenv
};
675 my $w = delete $self->{workers
}->{$sid};
676 return if !$w; # should not happen
681 $haenv->log('err', "resource agent $sid finished - failed to execute");
682 } elsif (my $sig = ($status & 127)) {
683 $haenv->log('err', "resource agent $sid finished - got signal $sig");
685 $exit_code = ($status >> 8);
688 $exit_code = $self->handle_service_exitcode($sid, $w->{state}, $exit_code);
690 return if $exit_code == ETRY_AGAIN
; # tell nobody, simply retry
692 $self->{results
}->{$uid} = {
694 state => $w->{state},
695 exit_code
=> $exit_code,
698 my $ss = $self->{service_status
};
700 # compute hash of valid/existing uids
702 foreach my $sid (keys %$ss) {
703 my $sd = $ss->{$sid};
705 $valid_uids->{$sd->{uid
}} = 1;
709 foreach my $id (keys %{$self->{results
}}) {
710 next if !$valid_uids->{$id};
711 $results->{$id} = $self->{results
}->{$id};
713 $self->{results
} = $results;
716 # processes the exit code from a finished resource agent, so that the CRM knows
717 # if the LRM wants to retry an action based on the current recovery policies for
718 # the failed service, or the CRM itself must try to recover from the failure.
719 sub handle_service_exitcode
{
720 my ($self, $sid, $cmd, $exit_code) = @_;
722 my $haenv = $self->{haenv
};
723 my $tries = $self->{restart_tries
};
725 my $sc = $haenv->read_service_config();
729 if (my $cd = $sc->{$sid}) {
730 $max_restart = $cd->{max_restart
};
733 if ($cmd eq 'started') {
735 if ($exit_code == SUCCESS
) {
741 } elsif ($exit_code == ERROR
) {
743 $tries->{$sid} = 0 if !defined($tries->{$sid});
745 if ($tries->{$sid} >= $max_restart) {
746 $haenv->log('err', "unable to start service $sid on local node".
747 " after $tries->{$sid} retries");
754 $haenv->log('warning', "restart policy: retry number $tries->{$sid}" .
755 " for service '$sid'");
756 # tell CRM that we retry the start
765 sub exec_resource_agent
{
766 my ($self, $sid, $service_config, $cmd, $params) = @_;
768 # setup execution environment
770 $ENV{'PATH'} = '/sbin:/bin:/usr/sbin:/usr/bin';
772 my $haenv = $self->{haenv
};
774 my $nodename = $haenv->nodename();
776 my (undef, $service_type, $service_name) = $haenv->parse_sid($sid);
778 my $plugin = PVE
::HA
::Resources-
>lookup($service_type);
780 $haenv->log('err', "service type '$service_type' not implemented");
781 return EUNKNOWN_SERVICE_TYPE
;
784 if (!$service_config) {
785 $haenv->log('err', "missing resource configuration for '$sid'");
786 return EUNKNOWN_SERVICE
;
789 # process error state early
790 if ($cmd eq 'error') {
792 $haenv->log('err', "service $sid is in an error state and needs manual " .
793 "intervention. Look up 'ERROR RECOVERY' in the documentation.");
795 return SUCCESS
; # error always succeeds
798 if ($service_config->{node
} ne $nodename) {
799 $haenv->log('err', "service '$sid' not on this node");
803 my $id = $service_name;
805 my $running = $plugin->check_running($haenv, $id);
807 if ($cmd eq 'started') {
809 return SUCCESS
if $running;
811 $haenv->log("info", "starting service $sid");
813 $plugin->start($haenv, $id);
815 $running = $plugin->check_running($haenv, $id);
818 $haenv->log("info", "service status $sid started");
821 $haenv->log("warning", "unable to start service $sid");
825 } elsif ($cmd eq 'request_stop' || $cmd eq 'stopped') {
827 return SUCCESS
if !$running;
829 if (defined($params->{timeout
})) {
830 $haenv->log("info", "stopping service $sid (timeout=$params->{timeout})");
832 $haenv->log("info", "stopping service $sid");
835 $plugin->shutdown($haenv, $id, $params->{timeout
});
837 $running = $plugin->check_running($haenv, $id);
840 $haenv->log("info", "service status $sid stopped");
843 $haenv->log("info", "unable to stop stop service $sid (still running)");
847 } elsif ($cmd eq 'migrate' || $cmd eq 'relocate') {
849 my $target = $params->{target
};
850 if (!defined($target)) {
851 die "$cmd '$sid' failed - missing target\n" if !defined($target);
852 return EINVALID_PARAMETER
;
855 if ($service_config->{node
} eq $target) {
860 my $online = ($cmd eq 'migrate') ?
1 : 0;
862 my $res = $plugin->migrate($haenv, $id, $target, $online);
864 # something went wrong if service is still on this node
866 $haenv->log("err", "service $sid not moved (migration error)");
874 $haenv->log("err", "implement me (cmd '$cmd')");
875 return EUNKNOWN_COMMAND
;