3 # Local Resource Manager
7 use POSIX
qw(:sys_wait_h);
11 use PVE
::HA
::Tools
':exit_codes';
12 use PVE
::HA
::Resources
;
14 # Server can have several states:
17 wait_for_agent_lock
=> "waiting for agent lock",
18 active
=> "got agent_lock",
19 lost_agent_lock
=> "lost agent_lock",
23 my ($this, $haenv) = @_;
25 my $class = ref($this) || $this;
29 status
=> { state => 'startup' },
33 shutdown_request
=> 0,
35 # mode can be: active, reboot, shutdown, restart
37 cluster_state_update
=> 0,
40 $self->set_local_status({ state => 'wait_for_agent_lock' });
45 sub shutdown_request
{
48 return if $self->{shutdown_request
}; # already in shutdown mode
50 my $haenv = $self->{haenv
};
52 my $nodename = $haenv->nodename();
54 my ($shutdown, $reboot) = $haenv->is_node_shutdown();
56 my $dc_ha_cfg = $haenv->get_ha_settings();
57 my $shutdown_policy = $dc_ha_cfg->{shutdown_policy
} // 'conditional';
59 if ($shutdown) { # don't log this on service restart, only on node shutdown
60 $haenv->log('info', "got shutdown request with shutdown policy '$shutdown_policy'");
64 if ($shutdown_policy eq 'conditional') {
65 $freeze_all = $reboot;
66 } elsif ($shutdown_policy eq 'freeze') {
68 } elsif ($shutdown_policy eq 'failover') {
71 $haenv->log('err', "unknown shutdown policy '$shutdown_policy', fall back to conditional");
72 $freeze_all = $reboot;
76 # *always* queue stop jobs for all services if the node shuts down,
77 # independent if it's a reboot or a poweroff, else we may corrupt
78 # services or hinder node shutdown
79 my $ss = $self->{service_status
};
81 foreach my $sid (keys %$ss) {
84 next if $sd->{node
} ne $nodename;
85 # Note: use undef uid to mark shutdown/stop jobs
86 $self->queue_resource_command($sid, undef, 'request_stop');
93 $haenv->log('info', "reboot LRM, stop and freeze all services");
95 $haenv->log('info', "shutdown LRM, stop and freeze all services");
97 $self->{mode
} = 'restart';
99 $haenv->log('info', "shutdown LRM, stop all services");
100 $self->{mode
} = 'shutdown';
103 $haenv->log('info', "restart LRM, freeze all services");
104 $self->{mode
} = 'restart';
107 $self->{shutdown_request
} = 1;
109 eval { $self->update_lrm_status(); };
111 $self->log('err', "unable to update lrm status file - $err");
115 sub get_local_status
{
118 return $self->{status
};
121 sub set_local_status
{
122 my ($self, $new) = @_;
124 die "invalid state '$new->{state}'" if !$valid_states->{$new->{state}};
126 my $haenv = $self->{haenv
};
128 my $old = $self->{status
};
130 # important: only update if if really changed
131 return if $old->{state} eq $new->{state};
133 $haenv->log('info', "status change $old->{state} => $new->{state}");
135 $new->{state_change_time
} = $haenv->get_time();
137 $self->{status
} = $new;
140 sub update_lrm_status
{
143 my $haenv = $self->{haenv
};
145 return 0 if !$haenv->quorate();
148 state => $self->{status
}->{state},
149 mode
=> $self->{mode
},
150 results
=> $self->{results
},
151 timestamp
=> $haenv->get_time(),
154 eval { $haenv->write_lrm_status($lrm_status); };
156 $haenv->log('err', "unable to write lrm status file - $err");
163 sub update_service_status
{
166 my $haenv = $self->{haenv
};
168 my $ms = eval { $haenv->read_manager_status(); };
170 $haenv->log('err', "updating service status from manager failed: $err");
173 $self->{service_status
} = $ms->{service_status
} || {};
178 sub get_protected_ha_agent_lock
{
181 my $haenv = $self->{haenv
};
184 my $starttime = $haenv->get_time();
188 if ($haenv->get_ha_agent_lock()) {
189 if ($self->{ha_agent_wd
}) {
190 $haenv->watchdog_update($self->{ha_agent_wd
});
192 my $wfh = $haenv->watchdog_open();
193 $self->{ha_agent_wd
} = $wfh;
198 last if ++$count > 5; # try max 5 time
200 my $delay = $haenv->get_time() - $starttime;
201 last if $delay > 5; # for max 5 seconds
209 sub active_service_count
{
212 my $haenv = $self->{haenv
};
214 my $nodename = $haenv->nodename();
216 my $ss = $self->{service_status
};
220 foreach my $sid (keys %$ss) {
221 my $sd = $ss->{$sid};
222 next if !$sd->{node
};
223 next if $sd->{node
} ne $nodename;
224 my $req_state = $sd->{state};
225 next if !defined($req_state);
226 next if $req_state eq 'stopped';
227 next if $req_state eq 'freeze';
228 # erroneous services are not managed by HA, don't count them as active
229 next if $req_state eq 'error';
237 my $wrote_lrm_status_at_startup = 0;
239 sub do_one_iteration
{
242 my $haenv = $self->{haenv
};
244 $haenv->loop_start_hook();
246 $self->{cluster_state_update
} = $haenv->cluster_state_update();
248 my $res = $self->work();
250 $haenv->loop_end_hook();
258 my $haenv = $self->{haenv
};
260 if (!$wrote_lrm_status_at_startup) {
261 if ($self->update_lrm_status()) {
262 $wrote_lrm_status_at_startup = 1;
266 return $self->{shutdown_request
} ?
0 : 1;
270 my $status = $self->get_local_status();
271 my $state = $status->{state};
273 $self->update_service_status();
275 my $fence_request = PVE
::HA
::Tools
::count_fenced_services
($self->{service_status
}, $haenv->nodename());
277 # do state changes first
279 my $ctime = $haenv->get_time();
281 if ($state eq 'wait_for_agent_lock') {
283 my $service_count = $self->active_service_count();
285 if (!$fence_request && $service_count && $haenv->quorate()) {
286 if ($self->get_protected_ha_agent_lock()) {
287 $self->set_local_status({ state => 'active' });
291 } elsif ($state eq 'lost_agent_lock') {
293 if (!$fence_request && $haenv->quorate()) {
294 if ($self->get_protected_ha_agent_lock()) {
295 $self->set_local_status({ state => 'active' });
299 } elsif ($state eq 'active') {
301 if ($fence_request) {
302 $haenv->log('err', "node need to be fenced - releasing agent_lock\n");
303 $self->set_local_status({ state => 'lost_agent_lock'});
304 } elsif (!$self->get_protected_ha_agent_lock()) {
305 $self->set_local_status({ state => 'lost_agent_lock'});
309 $status = $self->get_local_status();
310 $state = $status->{state};
314 if ($state eq 'wait_for_agent_lock') {
316 return 0 if $self->{shutdown_request
};
318 $self->update_lrm_status();
322 } elsif ($state eq 'active') {
324 my $startime = $haenv->get_time();
330 # do work (max_time seconds)
332 # fixme: set alert timer
334 # if we could not get the current service status there's no point
335 # in doing anything, try again next round.
336 return if !$self->update_service_status();
338 if ($self->{shutdown_request
}) {
340 if ($self->{mode
} eq 'restart') {
342 my $service_count = $self->active_service_count();
344 if ($service_count == 0) {
346 if ($self->run_workers() == 0) {
347 if ($self->{ha_agent_wd
}) {
348 $haenv->watchdog_close($self->{ha_agent_wd
});
349 delete $self->{ha_agent_wd
};
354 # restart with no or freezed services, release the lock
355 $haenv->release_ha_agent_lock();
360 if ($self->run_workers() == 0) {
361 if ($self->{shutdown_errors
} == 0) {
362 if ($self->{ha_agent_wd
}) {
363 $haenv->watchdog_close($self->{ha_agent_wd
});
364 delete $self->{ha_agent_wd
};
367 # shutdown with all services stopped thus release the lock
368 $haenv->release_ha_agent_lock();
375 if (!$self->{cluster_state_update
}) {
376 # update failed but we could still renew our lock (cfs restart?),
377 # safely skip manage and expect to update just fine next round
378 $haenv->log('notice', "temporary inconsistent cluster state " .
379 "(cfs restart?), skip round");
383 $self->manage_resources();
388 $haenv->log('err', "got unexpected error - $err");
391 $self->update_lrm_status();
393 return 0 if $shutdown;
395 $haenv->sleep_until($startime + $max_time);
397 } elsif ($state eq 'lost_agent_lock') {
399 # Note: watchdog is active an will triger soon!
401 # so we hope to get the lock back soon!
403 if ($self->{shutdown_request
}) {
405 my $service_count = $self->active_service_count();
407 if ($service_count > 0) {
408 $haenv->log('err', "get shutdown request in state 'lost_agent_lock' - " .
409 "detected $service_count running services");
411 if ($self->{mode
} eq 'restart') {
412 my $state_mt = $self->{status
}->{state_change_time
};
414 # watchdog should have already triggered, so either it's set
415 # set to noboot or it failed. As we are in restart mode, and
416 # have infinity stoptimeout -> exit now - we don't touch services
417 # or change state, so this is save, relatively speaking
418 if (($haenv->get_time() - $state_mt) > 90) {
419 $haenv->log('err', "lost agent lock and restart request for over 90 seconds - giving up!");
425 # all services are stopped, so we can close the watchdog
427 if ($self->{ha_agent_wd
}) {
428 $haenv->watchdog_close($self->{ha_agent_wd
});
429 delete $self->{ha_agent_wd
};
440 die "got unexpected status '$state'\n";
450 my $haenv = $self->{haenv
};
452 my $starttime = $haenv->get_time();
454 # number of workers to start, if 0 we exec the command directly witouth forking
455 my $max_workers = $haenv->get_max_workers();
457 my $sc = $haenv->read_service_config();
459 while (($haenv->get_time() - $starttime) < 5) {
460 my $count = $self->check_active_workers();
462 foreach my $sid (sort keys %{$self->{workers
}}) {
463 last if $count >= $max_workers && $max_workers > 0;
465 my $w = $self->{workers
}->{$sid};
467 # only fork if we may else call exec_resource_agent
468 # directly (e.g. for regression tests)
469 if ($max_workers > 0) {
471 if (!defined($pid)) {
472 $haenv->log('err', "fork worker failed");
473 $count = 0; last; # abort, try later
474 } elsif ($pid == 0) {
475 $haenv->after_fork(); # cleanup
480 $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{target
});
483 $haenv->log('err', $err);
494 $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{target
});
495 $res = $res << 8 if $res > 0;
498 $haenv->log('err', $err);
500 if (defined($w->{uid
})) {
501 $self->resource_command_finished($sid, $w->{uid
}, $res);
503 $self->stop_command_finished($sid, $res);
514 return scalar(keys %{$self->{workers
}});
517 sub manage_resources
{
520 my $haenv = $self->{haenv
};
522 my $nodename = $haenv->nodename();
524 my $ss = $self->{service_status
};
526 foreach my $sid (keys %{$self->{restart_tries
}}) {
527 delete $self->{restart_tries
}->{$sid} if !$ss->{$sid};
530 foreach my $sid (keys %$ss) {
531 my $sd = $ss->{$sid};
532 next if !$sd->{node
};
534 next if $sd->{node
} ne $nodename;
535 my $req_state = $sd->{state};
536 next if !defined($req_state);
537 next if $req_state eq 'freeze';
538 $self->queue_resource_command($sid, $sd->{uid
}, $req_state, $sd->{target
});
541 return $self->run_workers();
544 sub queue_resource_command
{
545 my ($self, $sid, $uid, $state, $target) = @_;
547 # do not queue the excatly same command twice as this may lead to
548 # an inconsistent HA state when the first command fails but the CRM
549 # does not process its failure right away and the LRM starts a second
550 # try, without the CRM knowing of it (race condition)
551 # The 'stopped' command is an exception as we do not process its result
552 # in the CRM and we want to execute it always (even with no active CRM)
553 return if $state ne 'stopped' && $uid && defined($self->{results
}->{$uid});
555 if (my $w = $self->{workers
}->{$sid}) {
556 return if $w->{pid
}; # already started
557 # else, delete and overwrite queue entry with new command
558 delete $self->{workers
}->{$sid};
561 $self->{workers
}->{$sid} = {
567 $self->{workers
}->{$sid}->{target
} = $target if $target;
570 sub check_active_workers
{
573 # finish/count workers
575 foreach my $sid (keys %{$self->{workers
}}) {
576 my $w = $self->{workers
}->{$sid};
577 if (my $pid = $w->{pid
}) {
579 my $waitpid = waitpid($pid, WNOHANG
);
580 if (defined($waitpid) && ($waitpid == $pid)) {
581 if (defined($w->{uid
})) {
582 $self->resource_command_finished($sid, $w->{uid
}, $?);
584 $self->stop_command_finished($sid, $?);
595 sub stop_command_finished
{
596 my ($self, $sid, $status) = @_;
598 my $haenv = $self->{haenv
};
600 my $w = delete $self->{workers
}->{$sid};
601 return if !$w; # should not happen
606 $haenv->log('err', "resource agent $sid finished - failed to execute");
607 } elsif (my $sig = ($status & 127)) {
608 $haenv->log('err', "resource agent $sid finished - got signal $sig");
610 $exit_code = ($status >> 8);
613 if ($exit_code != 0) {
614 $self->{shutdown_errors
}++;
618 sub resource_command_finished
{
619 my ($self, $sid, $uid, $status) = @_;
621 my $haenv = $self->{haenv
};
623 my $w = delete $self->{workers
}->{$sid};
624 return if !$w; # should not happen
629 $haenv->log('err', "resource agent $sid finished - failed to execute");
630 } elsif (my $sig = ($status & 127)) {
631 $haenv->log('err', "resource agent $sid finished - got signal $sig");
633 $exit_code = ($status >> 8);
636 $exit_code = $self->handle_service_exitcode($sid, $w->{state}, $exit_code);
638 return if $exit_code == ETRY_AGAIN
; # tell nobody, simply retry
640 $self->{results
}->{$uid} = {
642 state => $w->{state},
643 exit_code
=> $exit_code,
646 my $ss = $self->{service_status
};
648 # compute hash of valid/existing uids
650 foreach my $sid (keys %$ss) {
651 my $sd = $ss->{$sid};
653 $valid_uids->{$sd->{uid
}} = 1;
657 foreach my $id (keys %{$self->{results
}}) {
658 next if !$valid_uids->{$id};
659 $results->{$id} = $self->{results
}->{$id};
661 $self->{results
} = $results;
664 # processes the exit code from a finished resource agent, so that the CRM knows
665 # if the LRM wants to retry an action based on the current recovery policies for
666 # the failed service, or the CRM itself must try to recover from the failure.
667 sub handle_service_exitcode
{
668 my ($self, $sid, $cmd, $exit_code) = @_;
670 my $haenv = $self->{haenv
};
671 my $tries = $self->{restart_tries
};
673 my $sc = $haenv->read_service_config();
677 if (my $cd = $sc->{$sid}) {
678 $max_restart = $cd->{max_restart
};
681 if ($cmd eq 'started') {
683 if ($exit_code == SUCCESS
) {
689 } elsif ($exit_code == ERROR
) {
691 $tries->{$sid} = 0 if !defined($tries->{$sid});
693 if ($tries->{$sid} >= $max_restart) {
694 $haenv->log('err', "unable to start service $sid on local node".
695 " after $tries->{$sid} retries");
702 $haenv->log('warning', "restart policy: retry number $tries->{$sid}" .
703 " for service '$sid'");
704 # tell CRM that we retry the start
713 sub exec_resource_agent
{
714 my ($self, $sid, $service_config, $cmd, @params) = @_;
716 # setup execution environment
718 $ENV{'PATH'} = '/sbin:/bin:/usr/sbin:/usr/bin';
720 my $haenv = $self->{haenv
};
722 my $nodename = $haenv->nodename();
724 my (undef, $service_type, $service_name) = $haenv->parse_sid($sid);
726 my $plugin = PVE
::HA
::Resources-
>lookup($service_type);
728 $haenv->log('err', "service type '$service_type' not implemented");
729 return EUNKNOWN_SERVICE_TYPE
;
732 if (!$service_config) {
733 $haenv->log('err', "missing resource configuration for '$sid'");
734 return EUNKNOWN_SERVICE
;
737 # process error state early
738 if ($cmd eq 'error') {
740 $haenv->log('err', "service $sid is in an error state and needs manual " .
741 "intervention. Look up 'ERROR RECOVERY' in the documentation.");
743 return SUCCESS
; # error always succeeds
746 if ($service_config->{node
} ne $nodename) {
747 $haenv->log('err', "service '$sid' not on this node");
751 my $id = $service_name;
753 my $running = $plugin->check_running($haenv, $id);
755 if ($cmd eq 'started') {
757 return SUCCESS
if $running;
759 $haenv->log("info", "starting service $sid");
761 $plugin->start($haenv, $id);
763 $running = $plugin->check_running($haenv, $id);
766 $haenv->log("info", "service status $sid started");
769 $haenv->log("warning", "unable to start service $sid");
773 } elsif ($cmd eq 'request_stop' || $cmd eq 'stopped') {
775 return SUCCESS
if !$running;
777 $haenv->log("info", "stopping service $sid");
779 $plugin->shutdown($haenv, $id);
781 $running = $plugin->check_running($haenv, $id);
784 $haenv->log("info", "service status $sid stopped");
787 $haenv->log("info", "unable to stop stop service $sid (still running)");
791 } elsif ($cmd eq 'migrate' || $cmd eq 'relocate') {
793 my $target = $params[0];
794 if (!defined($target)) {
795 die "$cmd '$sid' failed - missing target\n" if !defined($target);
796 return EINVALID_PARAMETER
;
799 if ($service_config->{node
} eq $target) {
804 my $online = ($cmd eq 'migrate') ?
1 : 0;
806 my $res = $plugin->migrate($haenv, $id, $target, $online);
808 # something went wrong if service is still on this node
810 $haenv->log("err", "service $sid not moved (migration error)");
818 $haenv->log("err", "implement me (cmd '$cmd')");
819 return EUNKNOWN_COMMAND
;