3 # Local Resource Manager
7 use POSIX
qw(:sys_wait_h);
11 use PVE
::HA
::Tools
':exit_codes';
12 use PVE
::HA
::Resources
;
14 # Server can have several states:
17 wait_for_agent_lock
=> "waiting for agent lock",
18 active
=> "got agent_lock",
19 lost_agent_lock
=> "lost agent_lock",
23 my ($this, $haenv) = @_;
25 my $class = ref($this) || $this;
29 status
=> { state => 'startup' },
33 shutdown_request
=> 0,
35 # mode can be: active, reboot, shutdown, restart
37 cluster_state_update
=> 0,
40 $self->set_local_status({ state => 'wait_for_agent_lock' });
45 sub shutdown_request
{
48 return if $self->{shutdown_request
}; # already in shutdown mode
50 my $haenv = $self->{haenv
};
52 my $nodename = $haenv->nodename();
54 my ($shutdown, $reboot) = $haenv->is_node_shutdown();
56 my $dc_ha_cfg = $haenv->get_ha_settings();
57 my $shutdown_policy = $dc_ha_cfg->{shutdown_policy
} // 'conditional';
59 if ($shutdown) { # don't log this on service restart, only on node shutdown
60 $haenv->log('info', "got shutdown request with shutdown policy '$shutdown_policy'");
63 my $freeze_all = $reboot;
64 if ($shutdown_policy eq 'conditional') {
65 $freeze_all = $reboot;
66 } elsif ($shutdown_policy eq 'freeze') {
68 } elsif ($shutdown_policy eq 'failover') {
71 $haenv->log('err', "unkown shutdown policy '$shutdown_policy', fall back to conditional");
75 # *always* queue stop jobs for all services if the node shuts down,
76 # independent if it's a reboot or a poweroff, else we may corrupt
77 # services or hinder node shutdown
78 my $ss = $self->{service_status
};
80 foreach my $sid (keys %$ss) {
83 next if $sd->{node
} ne $nodename;
84 # Note: use undef uid to mark shutdown/stop jobs
85 $self->queue_resource_command($sid, undef, 'request_stop');
91 if ($shutdown_policy eq 'conditional') {
92 $haenv->log('info', "reboot LRM, stop and freeze all services");
94 $haenv->log('info', "shutdown LRM, stop and freeze all services");
96 $self->{mode
} = 'restart';
98 $haenv->log('info', "shutdown LRM, stop all services");
99 $self->{mode
} = 'shutdown';
102 $haenv->log('info', "restart LRM, freeze all services");
103 $self->{mode
} = 'restart';
106 $self->{shutdown_request
} = 1;
108 eval { $self->update_lrm_status(); };
110 $self->log('err', "unable to update lrm status file - $err");
114 sub get_local_status
{
117 return $self->{status
};
120 sub set_local_status
{
121 my ($self, $new) = @_;
123 die "invalid state '$new->{state}'" if !$valid_states->{$new->{state}};
125 my $haenv = $self->{haenv
};
127 my $old = $self->{status
};
129 # important: only update if if really changed
130 return if $old->{state} eq $new->{state};
132 $haenv->log('info', "status change $old->{state} => $new->{state}");
134 $new->{state_change_time
} = $haenv->get_time();
136 $self->{status
} = $new;
139 sub update_lrm_status
{
142 my $haenv = $self->{haenv
};
144 return 0 if !$haenv->quorate();
147 state => $self->{status
}->{state},
148 mode
=> $self->{mode
},
149 results
=> $self->{results
},
150 timestamp
=> $haenv->get_time(),
153 eval { $haenv->write_lrm_status($lrm_status); };
155 $haenv->log('err', "unable to write lrm status file - $err");
162 sub update_service_status
{
165 my $haenv = $self->{haenv
};
167 my $ms = eval { $haenv->read_manager_status(); };
169 $haenv->log('err', "updating service status from manager failed: $err");
172 $self->{service_status
} = $ms->{service_status
} || {};
177 sub get_protected_ha_agent_lock
{
180 my $haenv = $self->{haenv
};
183 my $starttime = $haenv->get_time();
187 if ($haenv->get_ha_agent_lock()) {
188 if ($self->{ha_agent_wd
}) {
189 $haenv->watchdog_update($self->{ha_agent_wd
});
191 my $wfh = $haenv->watchdog_open();
192 $self->{ha_agent_wd
} = $wfh;
197 last if ++$count > 5; # try max 5 time
199 my $delay = $haenv->get_time() - $starttime;
200 last if $delay > 5; # for max 5 seconds
208 sub active_service_count
{
211 my $haenv = $self->{haenv
};
213 my $nodename = $haenv->nodename();
215 my $ss = $self->{service_status
};
219 foreach my $sid (keys %$ss) {
220 my $sd = $ss->{$sid};
221 next if !$sd->{node
};
222 next if $sd->{node
} ne $nodename;
223 my $req_state = $sd->{state};
224 next if !defined($req_state);
225 next if $req_state eq 'stopped';
226 next if $req_state eq 'freeze';
227 # erroneous services are not managed by HA, don't count them as active
228 next if $req_state eq 'error';
236 my $wrote_lrm_status_at_startup = 0;
238 sub do_one_iteration
{
241 my $haenv = $self->{haenv
};
243 $haenv->loop_start_hook();
245 $self->{cluster_state_update
} = $haenv->cluster_state_update();
247 my $res = $self->work();
249 $haenv->loop_end_hook();
257 my $haenv = $self->{haenv
};
259 if (!$wrote_lrm_status_at_startup) {
260 if ($self->update_lrm_status()) {
261 $wrote_lrm_status_at_startup = 1;
265 return $self->{shutdown_request
} ?
0 : 1;
269 my $status = $self->get_local_status();
270 my $state = $status->{state};
272 $self->update_service_status();
274 my $fence_request = PVE
::HA
::Tools
::count_fenced_services
($self->{service_status
}, $haenv->nodename());
276 # do state changes first
278 my $ctime = $haenv->get_time();
280 if ($state eq 'wait_for_agent_lock') {
282 my $service_count = $self->active_service_count();
284 if (!$fence_request && $service_count && $haenv->quorate()) {
285 if ($self->get_protected_ha_agent_lock()) {
286 $self->set_local_status({ state => 'active' });
290 } elsif ($state eq 'lost_agent_lock') {
292 if (!$fence_request && $haenv->quorate()) {
293 if ($self->get_protected_ha_agent_lock()) {
294 $self->set_local_status({ state => 'active' });
298 } elsif ($state eq 'active') {
300 if ($fence_request) {
301 $haenv->log('err', "node need to be fenced - releasing agent_lock\n");
302 $self->set_local_status({ state => 'lost_agent_lock'});
303 } elsif (!$self->get_protected_ha_agent_lock()) {
304 $self->set_local_status({ state => 'lost_agent_lock'});
308 $status = $self->get_local_status();
309 $state = $status->{state};
313 if ($state eq 'wait_for_agent_lock') {
315 return 0 if $self->{shutdown_request
};
317 $self->update_lrm_status();
321 } elsif ($state eq 'active') {
323 my $startime = $haenv->get_time();
329 # do work (max_time seconds)
331 # fixme: set alert timer
333 # if we could not get the current service status there's no point
334 # in doing anything, try again next round.
335 return if !$self->update_service_status();
337 if ($self->{shutdown_request
}) {
339 if ($self->{mode
} eq 'restart') {
341 my $service_count = $self->active_service_count();
343 if ($service_count == 0) {
345 if ($self->run_workers() == 0) {
346 if ($self->{ha_agent_wd
}) {
347 $haenv->watchdog_close($self->{ha_agent_wd
});
348 delete $self->{ha_agent_wd
};
353 # restart with no or freezed services, release the lock
354 $haenv->release_ha_agent_lock();
359 if ($self->run_workers() == 0) {
360 if ($self->{shutdown_errors
} == 0) {
361 if ($self->{ha_agent_wd
}) {
362 $haenv->watchdog_close($self->{ha_agent_wd
});
363 delete $self->{ha_agent_wd
};
366 # shutdown with all services stopped thus release the lock
367 $haenv->release_ha_agent_lock();
374 if (!$self->{cluster_state_update
}) {
375 # update failed but we could still renew our lock (cfs restart?),
376 # safely skip manage and expect to update just fine next round
377 $haenv->log('notice', "temporary inconsistent cluster state " .
378 "(cfs restart?), skip round");
382 $self->manage_resources();
387 $haenv->log('err', "got unexpected error - $err");
390 $self->update_lrm_status();
392 return 0 if $shutdown;
394 $haenv->sleep_until($startime + $max_time);
396 } elsif ($state eq 'lost_agent_lock') {
398 # Note: watchdog is active an will triger soon!
400 # so we hope to get the lock back soon!
402 if ($self->{shutdown_request
}) {
404 my $service_count = $self->active_service_count();
406 if ($service_count > 0) {
407 $haenv->log('err', "get shutdown request in state 'lost_agent_lock' - " .
408 "detected $service_count running services");
412 # all services are stopped, so we can close the watchdog
414 if ($self->{ha_agent_wd
}) {
415 $haenv->watchdog_close($self->{ha_agent_wd
});
416 delete $self->{ha_agent_wd
};
427 die "got unexpected status '$state'\n";
437 my $haenv = $self->{haenv
};
439 my $starttime = $haenv->get_time();
441 # number of workers to start, if 0 we exec the command directly witouth forking
442 my $max_workers = $haenv->get_max_workers();
444 my $sc = $haenv->read_service_config();
446 while (($haenv->get_time() - $starttime) < 5) {
447 my $count = $self->check_active_workers();
449 foreach my $sid (sort keys %{$self->{workers
}}) {
450 last if $count >= $max_workers && $max_workers > 0;
452 my $w = $self->{workers
}->{$sid};
454 # only fork if we may else call exec_resource_agent
455 # directly (e.g. for regression tests)
456 if ($max_workers > 0) {
458 if (!defined($pid)) {
459 $haenv->log('err', "fork worker failed");
460 $count = 0; last; # abort, try later
461 } elsif ($pid == 0) {
462 $haenv->after_fork(); # cleanup
467 $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{target
});
470 $haenv->log('err', $err);
481 $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{target
});
482 $res = $res << 8 if $res > 0;
485 $haenv->log('err', $err);
487 if (defined($w->{uid
})) {
488 $self->resource_command_finished($sid, $w->{uid
}, $res);
490 $self->stop_command_finished($sid, $res);
501 return scalar(keys %{$self->{workers
}});
504 sub manage_resources
{
507 my $haenv = $self->{haenv
};
509 my $nodename = $haenv->nodename();
511 my $ss = $self->{service_status
};
513 foreach my $sid (keys %{$self->{restart_tries
}}) {
514 delete $self->{restart_tries
}->{$sid} if !$ss->{$sid};
517 foreach my $sid (keys %$ss) {
518 my $sd = $ss->{$sid};
519 next if !$sd->{node
};
521 next if $sd->{node
} ne $nodename;
522 my $req_state = $sd->{state};
523 next if !defined($req_state);
524 next if $req_state eq 'freeze';
525 $self->queue_resource_command($sid, $sd->{uid
}, $req_state, $sd->{target
});
528 return $self->run_workers();
531 sub queue_resource_command
{
532 my ($self, $sid, $uid, $state, $target) = @_;
534 # do not queue the excatly same command twice as this may lead to
535 # an inconsistent HA state when the first command fails but the CRM
536 # does not process its failure right away and the LRM starts a second
537 # try, without the CRM knowing of it (race condition)
538 # The 'stopped' command is an exception as we do not process its result
539 # in the CRM and we want to execute it always (even with no active CRM)
540 return if $state ne 'stopped' && $uid && defined($self->{results
}->{$uid});
542 if (my $w = $self->{workers
}->{$sid}) {
543 return if $w->{pid
}; # already started
544 # else, delete and overwrite queue entry with new command
545 delete $self->{workers
}->{$sid};
548 $self->{workers
}->{$sid} = {
554 $self->{workers
}->{$sid}->{target
} = $target if $target;
557 sub check_active_workers
{
560 # finish/count workers
562 foreach my $sid (keys %{$self->{workers
}}) {
563 my $w = $self->{workers
}->{$sid};
564 if (my $pid = $w->{pid
}) {
566 my $waitpid = waitpid($pid, WNOHANG
);
567 if (defined($waitpid) && ($waitpid == $pid)) {
568 if (defined($w->{uid
})) {
569 $self->resource_command_finished($sid, $w->{uid
}, $?);
571 $self->stop_command_finished($sid, $?);
582 sub stop_command_finished
{
583 my ($self, $sid, $status) = @_;
585 my $haenv = $self->{haenv
};
587 my $w = delete $self->{workers
}->{$sid};
588 return if !$w; # should not happen
593 $haenv->log('err', "resource agent $sid finished - failed to execute");
594 } elsif (my $sig = ($status & 127)) {
595 $haenv->log('err', "resource agent $sid finished - got signal $sig");
597 $exit_code = ($status >> 8);
600 if ($exit_code != 0) {
601 $self->{shutdown_errors
}++;
605 sub resource_command_finished
{
606 my ($self, $sid, $uid, $status) = @_;
608 my $haenv = $self->{haenv
};
610 my $w = delete $self->{workers
}->{$sid};
611 return if !$w; # should not happen
616 $haenv->log('err', "resource agent $sid finished - failed to execute");
617 } elsif (my $sig = ($status & 127)) {
618 $haenv->log('err', "resource agent $sid finished - got signal $sig");
620 $exit_code = ($status >> 8);
623 $exit_code = $self->handle_service_exitcode($sid, $w->{state}, $exit_code);
625 return if $exit_code == ETRY_AGAIN
; # tell nobody, simply retry
627 $self->{results
}->{$uid} = {
629 state => $w->{state},
630 exit_code
=> $exit_code,
633 my $ss = $self->{service_status
};
635 # compute hash of valid/existing uids
637 foreach my $sid (keys %$ss) {
638 my $sd = $ss->{$sid};
640 $valid_uids->{$sd->{uid
}} = 1;
644 foreach my $id (keys %{$self->{results
}}) {
645 next if !$valid_uids->{$id};
646 $results->{$id} = $self->{results
}->{$id};
648 $self->{results
} = $results;
651 # processes the exit code from a finished resource agent, so that the CRM knows
652 # if the LRM wants to retry an action based on the current recovery policies for
653 # the failed service, or the CRM itself must try to recover from the failure.
654 sub handle_service_exitcode
{
655 my ($self, $sid, $cmd, $exit_code) = @_;
657 my $haenv = $self->{haenv
};
658 my $tries = $self->{restart_tries
};
660 my $sc = $haenv->read_service_config();
664 if (my $cd = $sc->{$sid}) {
665 $max_restart = $cd->{max_restart
};
668 if ($cmd eq 'started') {
670 if ($exit_code == SUCCESS
) {
676 } elsif ($exit_code == ERROR
) {
678 $tries->{$sid} = 0 if !defined($tries->{$sid});
680 if ($tries->{$sid} >= $max_restart) {
681 $haenv->log('err', "unable to start service $sid on local node".
682 " after $tries->{$sid} retries");
689 $haenv->log('warning', "restart policy: retry number $tries->{$sid}" .
690 " for service '$sid'");
691 # tell CRM that we retry the start
700 sub exec_resource_agent
{
701 my ($self, $sid, $service_config, $cmd, @params) = @_;
703 # setup execution environment
705 $ENV{'PATH'} = '/sbin:/bin:/usr/sbin:/usr/bin';
707 my $haenv = $self->{haenv
};
709 my $nodename = $haenv->nodename();
711 my (undef, $service_type, $service_name) = $haenv->parse_sid($sid);
713 my $plugin = PVE
::HA
::Resources-
>lookup($service_type);
715 $haenv->log('err', "service type '$service_type' not implemented");
716 return EUNKNOWN_SERVICE_TYPE
;
719 if (!$service_config) {
720 $haenv->log('err', "missing resource configuration for '$sid'");
721 return EUNKNOWN_SERVICE
;
724 # process error state early
725 if ($cmd eq 'error') {
727 $haenv->log('err', "service $sid is in an error state and needs manual " .
728 "intervention. Look up 'ERROR RECOVERY' in the documentation.");
730 return SUCCESS
; # error always succeeds
733 if ($service_config->{node
} ne $nodename) {
734 $haenv->log('err', "service '$sid' not on this node");
738 my $id = $service_name;
740 my $running = $plugin->check_running($haenv, $id);
742 if ($cmd eq 'started') {
744 return SUCCESS
if $running;
746 $haenv->log("info", "starting service $sid");
748 $plugin->start($haenv, $id);
750 $running = $plugin->check_running($haenv, $id);
753 $haenv->log("info", "service status $sid started");
756 $haenv->log("warning", "unable to start service $sid");
760 } elsif ($cmd eq 'request_stop' || $cmd eq 'stopped') {
762 return SUCCESS
if !$running;
764 $haenv->log("info", "stopping service $sid");
766 $plugin->shutdown($haenv, $id);
768 $running = $plugin->check_running($haenv, $id);
771 $haenv->log("info", "service status $sid stopped");
774 $haenv->log("info", "unable to stop stop service $sid (still running)");
778 } elsif ($cmd eq 'migrate' || $cmd eq 'relocate') {
780 my $target = $params[0];
781 if (!defined($target)) {
782 die "$cmd '$sid' failed - missing target\n" if !defined($target);
783 return EINVALID_PARAMETER
;
786 if ($service_config->{node
} eq $target) {
791 my $online = ($cmd eq 'migrate') ?
1 : 0;
793 my $res = $plugin->migrate($haenv, $id, $target, $online);
795 # something went wrong if service is still on this node
797 $haenv->log("err", "service $sid not moved (migration error)");
805 $haenv->log("err", "implement me (cmd '$cmd')");
806 return EUNKNOWN_COMMAND
;