3 # Local Resource Manager
7 use POSIX
qw(:sys_wait_h);
11 use PVE
::HA
::Tools
':exit_codes';
12 use PVE
::HA
::Resources
;
14 # Server can have several states:
17 wait_for_agent_lock
=> "waiting for agent lock",
18 active
=> "got agent_lock",
19 maintenance
=> "going into maintenance",
20 lost_agent_lock
=> "lost agent_lock",
23 # we sleep ~10s per 'active' round, so if no services is available for >= 10 min we'd go in wait
24 # state giving up the watchdog and the LRM lock voluntary, ensuring the WD can do no harm
25 my $max_active_idle_rounds = 60;
28 my ($this, $haenv) = @_;
30 my $class = ref($this) || $this;
34 status
=> { state => 'startup' },
38 shutdown_request
=> 0,
40 # mode can be: active, reboot, shutdown, restart
42 cluster_state_update
=> 0,
43 active_idle_rounds
=> 0,
46 $self->set_local_status({ state => 'wait_for_agent_lock' });
51 sub shutdown_request
{
54 return if $self->{shutdown_request
}; # already in shutdown mode
56 my $haenv = $self->{haenv
};
58 my $nodename = $haenv->nodename();
60 my ($shutdown, $reboot) = $haenv->is_node_shutdown();
62 my $dc_cfg = $haenv->get_datacenter_settings();
63 my $shutdown_policy = $dc_cfg->{ha
}->{shutdown_policy
} // 'conditional';
65 if ($shutdown) { # don't log this on service restart, only on node shutdown
66 $haenv->log('info', "got shutdown request with shutdown policy '$shutdown_policy'");
71 if ($shutdown_policy eq 'conditional') {
72 $freeze_all = $reboot;
73 } elsif ($shutdown_policy eq 'freeze') {
75 } elsif ($shutdown_policy eq 'failover') {
77 } elsif ($shutdown_policy eq 'migrate') {
80 $haenv->log('err', "unknown shutdown policy '$shutdown_policy', fall back to conditional");
81 $freeze_all = $reboot;
85 # we get marked as unaivalable by the manager, then all services will
86 # be migrated away, we'll still have the same "can we exit" clause than
87 # a normal shutdown -> no running service on this node
88 # FIXME: after X minutes, add shutdown command for remaining services,
89 # e.g., if they have no alternative node???
91 # *always* queue stop jobs for all services if the node shuts down,
92 # independent if it's a reboot or a poweroff, else we may corrupt
93 # services or hinder node shutdown
94 my $ss = $self->{service_status
};
96 foreach my $sid (keys %$ss) {
99 next if $sd->{node
} ne $nodename;
100 # Note: use undef uid to mark shutdown/stop jobs
101 $self->queue_resource_command($sid, undef, 'request_stop');
106 my $shutdown_type = $reboot ?
'reboot' : 'shutdown';
108 $haenv->log('info', "$shutdown_type LRM, doing maintenance, removing this node from active list");
109 $self->{mode
} = 'maintenance';
110 } elsif ($freeze_all) {
111 $haenv->log('info', "$shutdown_type LRM, stop and freeze all services");
112 $self->{mode
} = 'restart';
114 $haenv->log('info', "shutdown LRM, stop all services");
115 $self->{mode
} = 'shutdown';
118 $haenv->log('info', "restart LRM, freeze all services");
119 $self->{mode
} = 'restart';
122 $self->{shutdown_request
} = $haenv->get_time();
124 eval { $self->update_lrm_status() or die "not quorate?\n"; };
126 $haenv->log('err', "unable to update lrm status file - $err");
130 sub get_local_status
{
133 return $self->{status
};
136 sub set_local_status
{
137 my ($self, $new) = @_;
139 die "invalid state '$new->{state}'" if !$valid_states->{$new->{state}};
141 my $haenv = $self->{haenv
};
143 my $old = $self->{status
};
145 # important: only update if if really changed
146 return if $old->{state} eq $new->{state};
148 $haenv->log('info', "status change $old->{state} => $new->{state}");
150 $new->{state_change_time
} = $haenv->get_time();
152 $self->{status
} = $new;
155 sub update_lrm_status
{
158 my $haenv = $self->{haenv
};
160 return 0 if !$haenv->quorate();
163 state => $self->{status
}->{state},
164 mode
=> $self->{mode
},
165 results
=> $self->{results
},
166 timestamp
=> $haenv->get_time(),
169 eval { $haenv->write_lrm_status($lrm_status); };
171 $haenv->log('err', "unable to write lrm status file - $err");
178 sub update_service_status
{
181 my $haenv = $self->{haenv
};
183 my $ms = eval { $haenv->read_manager_status(); };
185 $haenv->log('err', "updating service status from manager failed: $err");
188 $self->{service_status
} = $ms->{service_status
} || {};
189 my $nodename = $haenv->nodename();
190 $self->{node_status
} = $ms->{node_status
}->{$nodename} || 'unknown';
195 sub get_protected_ha_agent_lock
{
198 my $haenv = $self->{haenv
};
201 my $starttime = $haenv->get_time();
205 if ($haenv->get_ha_agent_lock()) {
206 if ($self->{ha_agent_wd
}) {
207 $haenv->watchdog_update($self->{ha_agent_wd
});
209 my $wfh = $haenv->watchdog_open();
210 $self->{ha_agent_wd
} = $wfh;
215 last if ++$count > 5; # try max 5 time
217 my $delay = $haenv->get_time() - $starttime;
218 last if $delay > 5; # for max 5 seconds
226 # only cares if any service has the local node as their node, independent of which req.state it is
227 sub has_configured_service_on_local_node
{
230 my $haenv = $self->{haenv
};
231 my $nodename = $haenv->nodename();
233 my $ss = $self->{service_status
};
234 foreach my $sid (keys %$ss) {
235 my $sd = $ss->{$sid};
236 next if !$sd->{node
} || $sd->{node
} ne $nodename;
243 sub is_fence_requested
{
246 my $haenv = $self->{haenv
};
248 my $nodename = $haenv->nodename();
249 my $ss = $self->{service_status
};
251 my $fenced_services = PVE
::HA
::Tools
::count_fenced_services
($ss, $nodename);
253 return $fenced_services || $self->{node_status
} eq 'fence';
256 sub active_service_count
{
259 my $haenv = $self->{haenv
};
260 my $nodename = $haenv->nodename();
262 my $ss = $self->{service_status
};
265 foreach my $sid (keys %$ss) {
266 my $sd = $ss->{$sid};
267 next if !$sd->{node
};
268 next if $sd->{node
} ne $nodename;
269 my $req_state = $sd->{state};
270 next if !defined($req_state);
271 next if $req_state eq 'stopped';
272 # NOTE: 'ignored' ones are already dropped by the manager from service_status
273 next if $req_state eq 'freeze';
274 # erroneous services are not managed by HA, don't count them as active
275 next if $req_state eq 'error';
283 my $wrote_lrm_status_at_startup = 0;
285 sub do_one_iteration
{
288 my $haenv = $self->{haenv
};
290 $haenv->loop_start_hook();
292 $self->{cluster_state_update
} = $haenv->cluster_state_update();
294 my $res = $self->work();
296 $haenv->loop_end_hook();
301 # NOTE: this is disabling the self-fence mechanism, so it must NOT be called with active services
302 # It's normally *only* OK on graceful shutdown (with no services, or all services frozen)
303 my sub give_up_watchdog_protection
{
306 if ($self->{ha_agent_wd
}) {
307 $self->{haenv
}->watchdog_close($self->{ha_agent_wd
});
308 delete $self->{ha_agent_wd
}; # only delete after close!
315 my $haenv = $self->{haenv
};
317 if (!$wrote_lrm_status_at_startup) {
318 if ($self->update_lrm_status()) {
319 $wrote_lrm_status_at_startup = 1;
323 return $self->{shutdown_request
} ?
0 : 1;
327 my $status = $self->get_local_status();
328 my $state = $status->{state};
330 $self->update_service_status();
332 my $fence_request = $self->is_fence_requested();
334 # do state changes first
336 my $ctime = $haenv->get_time();
338 if ($state eq 'wait_for_agent_lock') {
340 my $service_count = $self->active_service_count();
342 if (!$fence_request && $service_count && $haenv->quorate()) {
343 if ($self->get_protected_ha_agent_lock()) {
344 $self->set_local_status({ state => 'active' });
348 } elsif ($state eq 'lost_agent_lock') {
350 if (!$fence_request && $haenv->quorate()) {
351 if ($self->get_protected_ha_agent_lock()) {
352 $self->set_local_status({ state => 'active' });
356 } elsif ($state eq 'active') {
358 if ($fence_request) {
359 $haenv->log('err', "node need to be fenced - releasing agent_lock\n");
360 $self->set_local_status({ state => 'lost_agent_lock'});
361 } elsif (!$self->get_protected_ha_agent_lock()) {
362 $self->set_local_status({ state => 'lost_agent_lock'});
363 } elsif ($self->{mode
} eq 'maintenance') {
364 $self->set_local_status({ state => 'maintenance'});
366 if (!$self->has_configured_service_on_local_node() && !$self->run_workers()) {
367 # no active service configured for this node and all (old) workers are done
368 $self->{active_idle_rounds
}++;
369 if ($self->{active_idle_rounds
} > $max_active_idle_rounds) {
370 $haenv->log('info', "node had no service configured for $max_active_idle_rounds rounds, going idle.\n");
371 # safety: no active service & no running worker for quite some time -> OK
372 $haenv->release_ha_agent_lock();
373 give_up_watchdog_protection
($self);
374 $self->set_local_status({ state => 'wait_for_agent_lock'});
375 $self->{active_idle_rounds
} = 0;
377 } elsif ($self->{active_idle_rounds
}) {
378 $self->{active_idle_rounds
} = 0;
381 } elsif ($state eq 'maintenance') {
383 if ($fence_request) {
384 $haenv->log('err', "node need to be fenced during maintenance mode - releasing agent_lock\n");
385 $self->set_local_status({ state => 'lost_agent_lock'});
386 } elsif (!$self->get_protected_ha_agent_lock()) {
387 $self->set_local_status({ state => 'lost_agent_lock'});
391 $status = $self->get_local_status();
392 $state = $status->{state};
396 if ($state eq 'wait_for_agent_lock') {
398 return 0 if $self->{shutdown_request
};
400 $self->update_lrm_status();
404 } elsif ($state eq 'active') {
406 my $startime = $haenv->get_time();
412 # do work (max_time seconds)
414 # fixme: set alert timer
416 # if we could not get the current service status there's no point
417 # in doing anything, try again next round.
418 return if !$self->update_service_status();
420 if ($self->{shutdown_request
}) {
422 if ($self->{mode
} eq 'restart') {
423 # catch exited workers to update service state
424 my $workers = $self->run_workers();
425 my $service_count = $self->active_service_count();
427 if ($service_count == 0 && $workers == 0) {
428 # safety: no active services or workers -> OK
429 give_up_watchdog_protection
($self);
432 # restart with no or freezed services, release the lock
433 $haenv->release_ha_agent_lock();
437 if ($self->run_workers() == 0) {
438 if ($self->{shutdown_errors
} == 0) {
439 # safety: no active services and LRM shutdown -> OK
440 give_up_watchdog_protection
($self);
442 # shutdown with all services stopped thus release the lock
443 $haenv->release_ha_agent_lock();
450 if (!$self->{cluster_state_update
}) {
451 # update failed but we could still renew our lock (cfs restart?),
452 # safely skip manage and expect to update just fine next round
453 $haenv->log('notice', "temporary inconsistent cluster state " .
454 "(cfs restart?), skip round");
458 $self->manage_resources();
463 $haenv->log('err', "got unexpected error - $err");
466 $self->update_lrm_status();
468 return 0 if $shutdown;
470 $haenv->sleep_until($startime + $max_time);
472 } elsif ($state eq 'lost_agent_lock') {
474 # NOTE: watchdog is active an will trigger soon!
475 # so we hope to get the lock back soon!
476 if ($self->{shutdown_request
}) {
478 my $service_count = $self->active_service_count();
480 if ($service_count > 0) {
481 $haenv->log('err', "get shutdown request in state 'lost_agent_lock' - " .
482 "detected $service_count running services");
484 if ($self->{mode
} eq 'restart') {
485 my $state_mt = $self->{status
}->{state_change_time
};
487 # watchdog should have already triggered, so either it's set
488 # set to noboot or it failed. As we are in restart mode, and
489 # have infinity stoptimeout -> exit now - we don't touch services
490 # or change state, so this is save, relatively speaking
491 if (($haenv->get_time() - $state_mt) > 90) {
492 $haenv->log('err', "lost agent lock and restart request for over 90 seconds - giving up!");
497 # safety: all services are stopped, so we can close the watchdog
498 give_up_watchdog_protection
($self);
506 } elsif ($state eq 'maintenance') {
508 my $startime = $haenv->get_time();
509 return if !$self->update_service_status();
511 # wait until all active services moved away
512 my $service_count = $self->active_service_count();
516 if ($self->{shutdown_request
}) {
517 if ($service_count == 0 && $self->run_workers() == 0) {
518 # safety: going into maintenance and all active services got moved -> OK
519 give_up_watchdog_protection
($self);
523 # restart with no or freezed services, release the lock
524 $haenv->release_ha_agent_lock();
528 $self->manage_resources() if !$exit_lrm;
530 $self->update_lrm_status();
532 return 0 if $exit_lrm;
534 $haenv->sleep_until($startime + 5);
538 die "got unexpected status '$state'\n";
548 my $haenv = $self->{haenv
};
550 my $starttime = $haenv->get_time();
552 # number of workers to start, if 0 we exec the command directly witouth forking
553 my $max_workers = $haenv->get_max_workers();
554 my $sc = $haenv->read_service_config();
556 my $worker = $self->{workers
};
557 # we only got limited time but want to ensure that every queued worker is scheduled
558 # eventually, so sort by the count a worker was seen here in this loop
559 my $fair_sorter = sub {
560 $worker->{$b}->{start_tries
} <=> $worker->{$a}->{start_tries
} || $a cmp $b
563 while (($haenv->get_time() - $starttime) <= 8) {
564 my $count = $self->check_active_workers();
566 for my $sid (sort $fair_sorter grep { !$worker->{$_}->{pid
} } keys %$worker) {
567 my $w = $worker->{$sid};
568 # higher try-count means higher priority especially compared to newly queued jobs, so
569 # count every try to avoid starvation
571 next if $count >= $max_workers && $max_workers > 0;
573 # only fork if we may, else call exec_resource_agent directly (e.g. for tests)
574 if ($max_workers > 0) {
576 if (!defined($pid)) {
577 $haenv->log('err', "forking worker failed - $!");
578 $count = 0; last; # abort, try later
579 } elsif ($pid == 0) {
580 $haenv->after_fork(); # cleanup
585 $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{params
});
588 $haenv->log('err', $err);
599 $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{params
});
600 $res = $res << 8 if $res > 0;
603 $haenv->log('err', $err);
605 if (defined($w->{uid
})) {
606 $self->resource_command_finished($sid, $w->{uid
}, $res);
608 $self->stop_command_finished($sid, $res);
618 return scalar(keys %{$self->{workers
}});
621 sub manage_resources
{
624 my $haenv = $self->{haenv
};
626 my $nodename = $haenv->nodename();
628 my $ss = $self->{service_status
};
630 foreach my $sid (keys %{$self->{restart_tries
}}) {
631 delete $self->{restart_tries
}->{$sid} if !$ss->{$sid};
634 foreach my $sid (keys %$ss) {
635 my $sd = $ss->{$sid};
636 next if !$sd->{node
} || !$sd->{uid
};
637 next if $sd->{node
} ne $nodename;
638 my $request_state = $sd->{state};
639 next if !defined($request_state);
640 # can only happen for restricted groups where the failed node itself needs to be the
641 # reocvery target. Always let the master first do so, it will then marked as 'stopped' and
642 # we can just continue normally. But we must NOT do anything with it while still in recovery
643 next if $request_state eq 'recovery';
644 next if $request_state eq 'freeze';
646 $self->queue_resource_command($sid, $sd->{uid
}, $request_state, {
647 'target' => $sd->{target
},
648 'timeout' => $sd->{timeout
},
652 return $self->run_workers();
655 sub queue_resource_command
{
656 my ($self, $sid, $uid, $state, $params) = @_;
658 # do not queue the exact same command twice as this may lead to an inconsistent HA state when
659 # the first command fails but the CRM does not process its failure right away and the LRM starts
660 # a second try, without the CRM knowing of it (race condition) The 'stopped' command is an
661 # exception as we do not process its result in the CRM and we want to execute it always (even
662 # with no active CRM)
663 return if $state ne 'stopped' && $uid && defined($self->{results
}->{$uid});
665 if (my $w = $self->{workers
}->{$sid}) {
666 return if $w->{pid
}; # already started
667 # else, delete and overwrite queue entry with new command
668 delete $self->{workers
}->{$sid};
671 $self->{workers
}->{$sid} = {
678 $self->{workers
}->{$sid}->{params
} = $params if $params;
681 sub check_active_workers
{
684 # finish/count workers
686 foreach my $sid (keys %{$self->{workers
}}) {
687 my $w = $self->{workers
}->{$sid};
688 my $pid = $w->{pid
} || next;
690 my $waitpid = waitpid($pid, WNOHANG
); # check status
691 if (defined($waitpid) && ($waitpid == $pid)) {
692 if (defined($w->{uid
})) {
693 $self->resource_command_finished($sid, $w->{uid
}, $?);
695 $self->stop_command_finished($sid, $?);
698 $count++; # still active
705 sub stop_command_finished
{
706 my ($self, $sid, $status) = @_;
708 my $haenv = $self->{haenv
};
710 my $w = delete $self->{workers
}->{$sid};
711 return if !$w; # should not happen
716 $haenv->log('err', "resource agent $sid finished - failed to execute");
717 } elsif (my $sig = ($status & 127)) {
718 $haenv->log('err', "resource agent $sid finished - got signal $sig");
720 $exit_code = ($status >> 8);
723 if ($exit_code != 0) {
724 $self->{shutdown_errors
}++;
728 sub resource_command_finished
{
729 my ($self, $sid, $uid, $status) = @_;
731 my $haenv = $self->{haenv
};
733 my $w = delete $self->{workers
}->{$sid};
734 return if !$w; # should not happen
739 $haenv->log('err', "resource agent $sid finished - failed to execute");
740 } elsif (my $sig = ($status & 127)) {
741 $haenv->log('err', "resource agent $sid finished - got signal $sig");
743 $exit_code = ($status >> 8);
746 $exit_code = $self->handle_service_exitcode($sid, $w->{state}, $exit_code);
748 return if $exit_code == ETRY_AGAIN
; # tell nobody, simply retry
750 $self->{results
}->{$uid} = {
752 state => $w->{state},
753 exit_code
=> $exit_code,
756 my $ss = $self->{service_status
};
758 # compute hash of valid/existing uids
760 foreach my $sid (keys %$ss) {
761 my $sd = $ss->{$sid};
763 $valid_uids->{$sd->{uid
}} = 1;
767 foreach my $id (keys %{$self->{results
}}) {
768 next if !$valid_uids->{$id};
769 $results->{$id} = $self->{results
}->{$id};
771 $self->{results
} = $results;
774 # processes the exit code from a finished resource agent, so that the CRM knows
775 # if the LRM wants to retry an action based on the current recovery policies for
776 # the failed service, or the CRM itself must try to recover from the failure.
777 sub handle_service_exitcode
{
778 my ($self, $sid, $cmd, $exit_code) = @_;
780 my $haenv = $self->{haenv
};
781 my $tries = $self->{restart_tries
};
783 my $sc = $haenv->read_service_config();
787 if (my $cd = $sc->{$sid}) {
788 $max_restart = $cd->{max_restart
};
791 if ($cmd eq 'started') {
793 if ($exit_code == SUCCESS
) {
799 } elsif ($exit_code == ERROR
) {
801 $tries->{$sid} = 0 if !defined($tries->{$sid});
803 if ($tries->{$sid} >= $max_restart) {
804 $haenv->log('err', "unable to start service $sid on local node".
805 " after $tries->{$sid} retries");
812 $haenv->log('warning', "restart policy: retry number $tries->{$sid}" .
813 " for service '$sid'");
814 # tell CRM that we retry the start
823 sub exec_resource_agent
{
824 my ($self, $sid, $service_config, $cmd, $params) = @_;
826 # setup execution environment
828 $ENV{'PATH'} = '/sbin:/bin:/usr/sbin:/usr/bin';
830 my $haenv = $self->{haenv
};
832 my $nodename = $haenv->nodename();
834 my (undef, $service_type, $service_name) = $haenv->parse_sid($sid);
836 my $plugin = PVE
::HA
::Resources-
>lookup($service_type);
838 $haenv->log('err', "service type '$service_type' not implemented");
839 return EUNKNOWN_SERVICE_TYPE
;
842 if (!$service_config) {
843 $haenv->log('err', "missing resource configuration for '$sid'");
844 return EUNKNOWN_SERVICE
;
847 # process error state early
848 if ($cmd eq 'error') {
849 $haenv->log('err', "service $sid is in an error state and needs manual " .
850 "intervention. Look up 'ERROR RECOVERY' in the documentation.");
852 return SUCCESS
; # error always succeeds
855 if ($service_config->{node
} ne $nodename) {
856 $haenv->log('err', "service '$sid' not on this node");
860 my $id = $service_name;
862 my $running = $plugin->check_running($haenv, $id);
864 if ($cmd eq 'started') {
866 return SUCCESS
if $running;
868 $haenv->log("info", "starting service $sid");
870 $plugin->start($haenv, $id);
872 $running = $plugin->check_running($haenv, $id);
875 $haenv->log("info", "service status $sid started");
878 $haenv->log("warning", "unable to start service $sid");
882 } elsif ($cmd eq 'request_stop' || $cmd eq 'stopped') {
884 return SUCCESS
if !$running;
886 if (defined($params->{timeout
})) {
887 $haenv->log("info", "stopping service $sid (timeout=$params->{timeout})");
889 $haenv->log("info", "stopping service $sid");
892 $plugin->shutdown($haenv, $id, $params->{timeout
});
894 $running = $plugin->check_running($haenv, $id);
897 $haenv->log("info", "service status $sid stopped");
900 $haenv->log("info", "unable to stop stop service $sid (still running)");
904 } elsif ($cmd eq 'migrate' || $cmd eq 'relocate') {
906 my $target = $params->{target
};
907 if (!defined($target)) {
908 die "$cmd '$sid' failed - missing target\n" if !defined($target);
909 return EINVALID_PARAMETER
;
912 if ($service_config->{node
} eq $target) {
917 my $online = ($cmd eq 'migrate') ?
1 : 0;
919 my $res = $plugin->migrate($haenv, $id, $target, $online);
921 # something went wrong if service is still on this node
923 $haenv->log("err", "service $sid not moved (migration error)");
931 $haenv->log("err", "implement me (cmd '$cmd')");
932 return EUNKNOWN_COMMAND
;