3 # Local Resource Manager
7 use POSIX
qw(:sys_wait_h);
11 use PVE
::HA
::Tools
':exit_codes';
12 use PVE
::HA
::Resources
;
14 # Server can have several states:
17 wait_for_agent_lock
=> "waiting for agent lock",
18 active
=> "got agent_lock",
19 lost_agent_lock
=> "lost agent_lock",
23 my ($this, $haenv) = @_;
25 my $class = ref($this) || $this;
29 status
=> { state => 'startup' },
33 shutdown_request
=> 0,
35 # mode can be: active, reboot, shutdown, restart
37 cluster_state_update
=> 0,
40 $self->set_local_status({ state => 'wait_for_agent_lock' });
45 sub shutdown_request
{
48 return if $self->{shutdown_request
}; # already in shutdown mode
50 my $haenv = $self->{haenv
};
52 my $nodename = $haenv->nodename();
54 my ($shutdown, $reboot) = $haenv->is_node_shutdown();
56 my $dc_ha_cfg = $haenv->get_ha_settings();
57 my $shutdown_policy = $dc_ha_cfg->{shutdown_policy
} // 'conditional';
59 if ($shutdown) { # don't log this on service restart, only on node shutdown
60 $haenv->log('info', "got shutdown request with shutdown policy '$shutdown_policy'");
64 if ($shutdown_policy eq 'conditional') {
65 $freeze_all = $reboot;
66 } elsif ($shutdown_policy eq 'freeze') {
68 } elsif ($shutdown_policy eq 'failover') {
71 $haenv->log('err', "unknown shutdown policy '$shutdown_policy', fall back to conditional");
72 $freeze_all = $reboot;
76 # *always* queue stop jobs for all services if the node shuts down,
77 # independent if it's a reboot or a poweroff, else we may corrupt
78 # services or hinder node shutdown
79 my $ss = $self->{service_status
};
81 foreach my $sid (keys %$ss) {
84 next if $sd->{node
} ne $nodename;
85 # Note: use undef uid to mark shutdown/stop jobs
86 $self->queue_resource_command($sid, undef, 'request_stop');
91 my $shutdown_type = $reboot ?
'reboot' : 'shutdown';
93 $haenv->log('info', "$shutdown_type LRM, stop and freeze all services");
94 $self->{mode
} = 'restart';
96 $haenv->log('info', "shutdown LRM, stop all services");
97 $self->{mode
} = 'shutdown';
100 $haenv->log('info', "restart LRM, freeze all services");
101 $self->{mode
} = 'restart';
104 $self->{shutdown_request
} = 1;
106 eval { $self->update_lrm_status() or die "not quorate?\n"; };
108 $self->log('err', "unable to update lrm status file - $err");
112 sub get_local_status
{
115 return $self->{status
};
118 sub set_local_status
{
119 my ($self, $new) = @_;
121 die "invalid state '$new->{state}'" if !$valid_states->{$new->{state}};
123 my $haenv = $self->{haenv
};
125 my $old = $self->{status
};
127 # important: only update if if really changed
128 return if $old->{state} eq $new->{state};
130 $haenv->log('info', "status change $old->{state} => $new->{state}");
132 $new->{state_change_time
} = $haenv->get_time();
134 $self->{status
} = $new;
137 sub update_lrm_status
{
140 my $haenv = $self->{haenv
};
142 return 0 if !$haenv->quorate();
145 state => $self->{status
}->{state},
146 mode
=> $self->{mode
},
147 results
=> $self->{results
},
148 timestamp
=> $haenv->get_time(),
151 eval { $haenv->write_lrm_status($lrm_status); };
153 $haenv->log('err', "unable to write lrm status file - $err");
160 sub update_service_status
{
163 my $haenv = $self->{haenv
};
165 my $ms = eval { $haenv->read_manager_status(); };
167 $haenv->log('err', "updating service status from manager failed: $err");
170 $self->{service_status
} = $ms->{service_status
} || {};
175 sub get_protected_ha_agent_lock
{
178 my $haenv = $self->{haenv
};
181 my $starttime = $haenv->get_time();
185 if ($haenv->get_ha_agent_lock()) {
186 if ($self->{ha_agent_wd
}) {
187 $haenv->watchdog_update($self->{ha_agent_wd
});
189 my $wfh = $haenv->watchdog_open();
190 $self->{ha_agent_wd
} = $wfh;
195 last if ++$count > 5; # try max 5 time
197 my $delay = $haenv->get_time() - $starttime;
198 last if $delay > 5; # for max 5 seconds
206 sub active_service_count
{
209 my $haenv = $self->{haenv
};
211 my $nodename = $haenv->nodename();
213 my $ss = $self->{service_status
};
217 foreach my $sid (keys %$ss) {
218 my $sd = $ss->{$sid};
219 next if !$sd->{node
};
220 next if $sd->{node
} ne $nodename;
221 my $req_state = $sd->{state};
222 next if !defined($req_state);
223 next if $req_state eq 'stopped';
224 next if $req_state eq 'freeze';
225 # erroneous services are not managed by HA, don't count them as active
226 next if $req_state eq 'error';
234 my $wrote_lrm_status_at_startup = 0;
236 sub do_one_iteration
{
239 my $haenv = $self->{haenv
};
241 $haenv->loop_start_hook();
243 $self->{cluster_state_update
} = $haenv->cluster_state_update();
245 my $res = $self->work();
247 $haenv->loop_end_hook();
255 my $haenv = $self->{haenv
};
257 if (!$wrote_lrm_status_at_startup) {
258 if ($self->update_lrm_status()) {
259 $wrote_lrm_status_at_startup = 1;
263 return $self->{shutdown_request
} ?
0 : 1;
267 my $status = $self->get_local_status();
268 my $state = $status->{state};
270 $self->update_service_status();
272 my $fence_request = PVE
::HA
::Tools
::count_fenced_services
($self->{service_status
}, $haenv->nodename());
274 # do state changes first
276 my $ctime = $haenv->get_time();
278 if ($state eq 'wait_for_agent_lock') {
280 my $service_count = $self->active_service_count();
282 if (!$fence_request && $service_count && $haenv->quorate()) {
283 if ($self->get_protected_ha_agent_lock()) {
284 $self->set_local_status({ state => 'active' });
288 } elsif ($state eq 'lost_agent_lock') {
290 if (!$fence_request && $haenv->quorate()) {
291 if ($self->get_protected_ha_agent_lock()) {
292 $self->set_local_status({ state => 'active' });
296 } elsif ($state eq 'active') {
298 if ($fence_request) {
299 $haenv->log('err', "node need to be fenced - releasing agent_lock\n");
300 $self->set_local_status({ state => 'lost_agent_lock'});
301 } elsif (!$self->get_protected_ha_agent_lock()) {
302 $self->set_local_status({ state => 'lost_agent_lock'});
306 $status = $self->get_local_status();
307 $state = $status->{state};
311 if ($state eq 'wait_for_agent_lock') {
313 return 0 if $self->{shutdown_request
};
315 $self->update_lrm_status();
319 } elsif ($state eq 'active') {
321 my $startime = $haenv->get_time();
327 # do work (max_time seconds)
329 # fixme: set alert timer
331 # if we could not get the current service status there's no point
332 # in doing anything, try again next round.
333 return if !$self->update_service_status();
335 if ($self->{shutdown_request
}) {
337 if ($self->{mode
} eq 'restart') {
339 my $service_count = $self->active_service_count();
341 if ($service_count == 0) {
343 if ($self->run_workers() == 0) {
344 if ($self->{ha_agent_wd
}) {
345 $haenv->watchdog_close($self->{ha_agent_wd
});
346 delete $self->{ha_agent_wd
};
351 # restart with no or freezed services, release the lock
352 $haenv->release_ha_agent_lock();
357 if ($self->run_workers() == 0) {
358 if ($self->{shutdown_errors
} == 0) {
359 if ($self->{ha_agent_wd
}) {
360 $haenv->watchdog_close($self->{ha_agent_wd
});
361 delete $self->{ha_agent_wd
};
364 # shutdown with all services stopped thus release the lock
365 $haenv->release_ha_agent_lock();
372 if (!$self->{cluster_state_update
}) {
373 # update failed but we could still renew our lock (cfs restart?),
374 # safely skip manage and expect to update just fine next round
375 $haenv->log('notice', "temporary inconsistent cluster state " .
376 "(cfs restart?), skip round");
380 $self->manage_resources();
385 $haenv->log('err', "got unexpected error - $err");
388 $self->update_lrm_status();
390 return 0 if $shutdown;
392 $haenv->sleep_until($startime + $max_time);
394 } elsif ($state eq 'lost_agent_lock') {
396 # Note: watchdog is active an will triger soon!
398 # so we hope to get the lock back soon!
400 if ($self->{shutdown_request
}) {
402 my $service_count = $self->active_service_count();
404 if ($service_count > 0) {
405 $haenv->log('err', "get shutdown request in state 'lost_agent_lock' - " .
406 "detected $service_count running services");
408 if ($self->{mode
} eq 'restart') {
409 my $state_mt = $self->{status
}->{state_change_time
};
411 # watchdog should have already triggered, so either it's set
412 # set to noboot or it failed. As we are in restart mode, and
413 # have infinity stoptimeout -> exit now - we don't touch services
414 # or change state, so this is save, relatively speaking
415 if (($haenv->get_time() - $state_mt) > 90) {
416 $haenv->log('err', "lost agent lock and restart request for over 90 seconds - giving up!");
422 # all services are stopped, so we can close the watchdog
424 if ($self->{ha_agent_wd
}) {
425 $haenv->watchdog_close($self->{ha_agent_wd
});
426 delete $self->{ha_agent_wd
};
437 die "got unexpected status '$state'\n";
447 my $haenv = $self->{haenv
};
449 my $starttime = $haenv->get_time();
451 # number of workers to start, if 0 we exec the command directly witouth forking
452 my $max_workers = $haenv->get_max_workers();
454 my $sc = $haenv->read_service_config();
456 while (($haenv->get_time() - $starttime) < 5) {
457 my $count = $self->check_active_workers();
459 foreach my $sid (sort keys %{$self->{workers
}}) {
460 last if $count >= $max_workers && $max_workers > 0;
462 my $w = $self->{workers
}->{$sid};
464 # only fork if we may else call exec_resource_agent
465 # directly (e.g. for regression tests)
466 if ($max_workers > 0) {
468 if (!defined($pid)) {
469 $haenv->log('err', "fork worker failed");
470 $count = 0; last; # abort, try later
471 } elsif ($pid == 0) {
472 $haenv->after_fork(); # cleanup
477 $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{params
});
480 $haenv->log('err', $err);
491 $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{params
});
492 $res = $res << 8 if $res > 0;
495 $haenv->log('err', $err);
497 if (defined($w->{uid
})) {
498 $self->resource_command_finished($sid, $w->{uid
}, $res);
500 $self->stop_command_finished($sid, $res);
511 return scalar(keys %{$self->{workers
}});
514 sub manage_resources
{
517 my $haenv = $self->{haenv
};
519 my $nodename = $haenv->nodename();
521 my $ss = $self->{service_status
};
523 foreach my $sid (keys %{$self->{restart_tries
}}) {
524 delete $self->{restart_tries
}->{$sid} if !$ss->{$sid};
527 foreach my $sid (keys %$ss) {
528 my $sd = $ss->{$sid};
529 next if !$sd->{node
};
531 next if $sd->{node
} ne $nodename;
532 my $req_state = $sd->{state};
533 next if !defined($req_state);
534 next if $req_state eq 'freeze';
535 $self->queue_resource_command($sid, $sd->{uid
}, $req_state, {'target' => $sd->{target
}});
538 return $self->run_workers();
541 sub queue_resource_command
{
542 my ($self, $sid, $uid, $state, $params) = @_;
544 # do not queue the excatly same command twice as this may lead to
545 # an inconsistent HA state when the first command fails but the CRM
546 # does not process its failure right away and the LRM starts a second
547 # try, without the CRM knowing of it (race condition)
548 # The 'stopped' command is an exception as we do not process its result
549 # in the CRM and we want to execute it always (even with no active CRM)
550 return if $state ne 'stopped' && $uid && defined($self->{results
}->{$uid});
552 if (my $w = $self->{workers
}->{$sid}) {
553 return if $w->{pid
}; # already started
554 # else, delete and overwrite queue entry with new command
555 delete $self->{workers
}->{$sid};
558 $self->{workers
}->{$sid} = {
564 $self->{workers
}->{$sid}->{params
} = $params if $params;
567 sub check_active_workers
{
570 # finish/count workers
572 foreach my $sid (keys %{$self->{workers
}}) {
573 my $w = $self->{workers
}->{$sid};
574 if (my $pid = $w->{pid
}) {
576 my $waitpid = waitpid($pid, WNOHANG
);
577 if (defined($waitpid) && ($waitpid == $pid)) {
578 if (defined($w->{uid
})) {
579 $self->resource_command_finished($sid, $w->{uid
}, $?);
581 $self->stop_command_finished($sid, $?);
592 sub stop_command_finished
{
593 my ($self, $sid, $status) = @_;
595 my $haenv = $self->{haenv
};
597 my $w = delete $self->{workers
}->{$sid};
598 return if !$w; # should not happen
603 $haenv->log('err', "resource agent $sid finished - failed to execute");
604 } elsif (my $sig = ($status & 127)) {
605 $haenv->log('err', "resource agent $sid finished - got signal $sig");
607 $exit_code = ($status >> 8);
610 if ($exit_code != 0) {
611 $self->{shutdown_errors
}++;
615 sub resource_command_finished
{
616 my ($self, $sid, $uid, $status) = @_;
618 my $haenv = $self->{haenv
};
620 my $w = delete $self->{workers
}->{$sid};
621 return if !$w; # should not happen
626 $haenv->log('err', "resource agent $sid finished - failed to execute");
627 } elsif (my $sig = ($status & 127)) {
628 $haenv->log('err', "resource agent $sid finished - got signal $sig");
630 $exit_code = ($status >> 8);
633 $exit_code = $self->handle_service_exitcode($sid, $w->{state}, $exit_code);
635 return if $exit_code == ETRY_AGAIN
; # tell nobody, simply retry
637 $self->{results
}->{$uid} = {
639 state => $w->{state},
640 exit_code
=> $exit_code,
643 my $ss = $self->{service_status
};
645 # compute hash of valid/existing uids
647 foreach my $sid (keys %$ss) {
648 my $sd = $ss->{$sid};
650 $valid_uids->{$sd->{uid
}} = 1;
654 foreach my $id (keys %{$self->{results
}}) {
655 next if !$valid_uids->{$id};
656 $results->{$id} = $self->{results
}->{$id};
658 $self->{results
} = $results;
661 # processes the exit code from a finished resource agent, so that the CRM knows
662 # if the LRM wants to retry an action based on the current recovery policies for
663 # the failed service, or the CRM itself must try to recover from the failure.
664 sub handle_service_exitcode
{
665 my ($self, $sid, $cmd, $exit_code) = @_;
667 my $haenv = $self->{haenv
};
668 my $tries = $self->{restart_tries
};
670 my $sc = $haenv->read_service_config();
674 if (my $cd = $sc->{$sid}) {
675 $max_restart = $cd->{max_restart
};
678 if ($cmd eq 'started') {
680 if ($exit_code == SUCCESS
) {
686 } elsif ($exit_code == ERROR
) {
688 $tries->{$sid} = 0 if !defined($tries->{$sid});
690 if ($tries->{$sid} >= $max_restart) {
691 $haenv->log('err', "unable to start service $sid on local node".
692 " after $tries->{$sid} retries");
699 $haenv->log('warning', "restart policy: retry number $tries->{$sid}" .
700 " for service '$sid'");
701 # tell CRM that we retry the start
710 sub exec_resource_agent
{
711 my ($self, $sid, $service_config, $cmd, $params) = @_;
713 # setup execution environment
715 $ENV{'PATH'} = '/sbin:/bin:/usr/sbin:/usr/bin';
717 my $haenv = $self->{haenv
};
719 my $nodename = $haenv->nodename();
721 my (undef, $service_type, $service_name) = $haenv->parse_sid($sid);
723 my $plugin = PVE
::HA
::Resources-
>lookup($service_type);
725 $haenv->log('err', "service type '$service_type' not implemented");
726 return EUNKNOWN_SERVICE_TYPE
;
729 if (!$service_config) {
730 $haenv->log('err', "missing resource configuration for '$sid'");
731 return EUNKNOWN_SERVICE
;
734 # process error state early
735 if ($cmd eq 'error') {
737 $haenv->log('err', "service $sid is in an error state and needs manual " .
738 "intervention. Look up 'ERROR RECOVERY' in the documentation.");
740 return SUCCESS
; # error always succeeds
743 if ($service_config->{node
} ne $nodename) {
744 $haenv->log('err', "service '$sid' not on this node");
748 my $id = $service_name;
750 my $running = $plugin->check_running($haenv, $id);
752 if ($cmd eq 'started') {
754 return SUCCESS
if $running;
756 $haenv->log("info", "starting service $sid");
758 $plugin->start($haenv, $id);
760 $running = $plugin->check_running($haenv, $id);
763 $haenv->log("info", "service status $sid started");
766 $haenv->log("warning", "unable to start service $sid");
770 } elsif ($cmd eq 'request_stop' || $cmd eq 'stopped') {
772 return SUCCESS
if !$running;
774 $haenv->log("info", "stopping service $sid");
776 $plugin->shutdown($haenv, $id);
778 $running = $plugin->check_running($haenv, $id);
781 $haenv->log("info", "service status $sid stopped");
784 $haenv->log("info", "unable to stop stop service $sid (still running)");
788 } elsif ($cmd eq 'migrate' || $cmd eq 'relocate') {
790 my $target = $params->{target
};
791 if (!defined($target)) {
792 die "$cmd '$sid' failed - missing target\n" if !defined($target);
793 return EINVALID_PARAMETER
;
796 if ($service_config->{node
} eq $target) {
801 my $online = ($cmd eq 'migrate') ?
1 : 0;
803 my $res = $plugin->migrate($haenv, $id, $target, $online);
805 # something went wrong if service is still on this node
807 $haenv->log("err", "service $sid not moved (migration error)");
815 $haenv->log("err", "implement me (cmd '$cmd')");
816 return EUNKNOWN_COMMAND
;