3 # Local Resource Manager
7 use POSIX
qw(:sys_wait_h);
11 use PVE
::HA
::Tools
':exit_codes';
12 use PVE
::HA
::Resources
;
14 # Server can have several states:
17 wait_for_agent_lock
=> "waiting for agent lock",
18 active
=> "got agent_lock",
19 lost_agent_lock
=> "lost agent_lock",
23 my ($this, $haenv) = @_;
25 my $class = ref($this) || $this;
29 status
=> { state => 'startup' },
33 shutdown_request
=> 0,
35 # mode can be: active, reboot, shutdown, restart
39 $self->set_local_status({ state => 'wait_for_agent_lock' });
44 sub shutdown_request
{
47 return if $self->{shutdown_request
}; # already in shutdown mode
49 my $haenv = $self->{haenv
};
51 my $nodename = $haenv->nodename();
53 my ($shutdown, $reboot) = $haenv->is_node_shutdown();
56 # *always* queue stop jobs for all services if the node shuts down,
57 # independent if it's a reboot or a poweroff, else we may corrupt
58 # services or hinder node shutdown
59 my $ss = $self->{service_status
};
61 foreach my $sid (keys %$ss) {
64 next if $sd->{node
} ne $nodename;
65 # Note: use undef uid to mark shutdown/stop jobs
66 $self->queue_resource_command($sid, undef, 'request_stop');
72 $haenv->log('info', "reboot LRM, stop and freeze all services");
73 $self->{mode
} = 'restart';
75 $haenv->log('info', "shutdown LRM, stop all services");
76 $self->{mode
} = 'shutdown';
79 $haenv->log('info', "restart LRM, freeze all services");
80 $self->{mode
} = 'restart';
83 $self->{shutdown_request
} = 1;
85 eval { $self->update_lrm_status(); };
87 $self->log('err', "unable to update lrm status file - $err");
91 sub get_local_status
{
94 return $self->{status
};
97 sub set_local_status
{
98 my ($self, $new) = @_;
100 die "invalid state '$new->{state}'" if !$valid_states->{$new->{state}};
102 my $haenv = $self->{haenv
};
104 my $old = $self->{status
};
106 # important: only update if if really changed
107 return if $old->{state} eq $new->{state};
109 $haenv->log('info', "status change $old->{state} => $new->{state}");
111 $new->{state_change_time
} = $haenv->get_time();
113 $self->{status
} = $new;
116 sub update_lrm_status
{
119 my $haenv = $self->{haenv
};
121 return 0 if !$haenv->quorate();
124 state => $self->{status
}->{state},
125 mode
=> $self->{mode
},
126 results
=> $self->{results
},
127 timestamp
=> $haenv->get_time(),
130 eval { $haenv->write_lrm_status($lrm_status); };
132 $haenv->log('err', "unable to write lrm status file - $err");
139 sub update_service_status
{
142 my $haenv = $self->{haenv
};
144 my $ms = eval { $haenv->read_manager_status(); };
146 $haenv->log('err', "updating service status from manager failed: $err");
149 $self->{service_status
} = $ms->{service_status
} || {};
154 sub get_protected_ha_agent_lock
{
157 my $haenv = $self->{haenv
};
160 my $starttime = $haenv->get_time();
164 if ($haenv->get_ha_agent_lock()) {
165 if ($self->{ha_agent_wd
}) {
166 $haenv->watchdog_update($self->{ha_agent_wd
});
168 my $wfh = $haenv->watchdog_open();
169 $self->{ha_agent_wd
} = $wfh;
174 last if ++$count > 5; # try max 5 time
176 my $delay = $haenv->get_time() - $starttime;
177 last if $delay > 5; # for max 5 seconds
185 sub active_service_count
{
188 my $haenv = $self->{haenv
};
190 my $nodename = $haenv->nodename();
192 my $ss = $self->{service_status
};
196 foreach my $sid (keys %$ss) {
197 my $sd = $ss->{$sid};
198 next if !$sd->{node
};
199 next if $sd->{node
} ne $nodename;
200 my $req_state = $sd->{state};
201 next if !defined($req_state);
202 next if $req_state eq 'stopped';
203 next if $req_state eq 'freeze';
204 # erroneous services are not managed by HA, don't count them as active
205 next if $req_state eq 'error';
213 my $wrote_lrm_status_at_startup = 0;
215 sub do_one_iteration
{
218 my $haenv = $self->{haenv
};
220 $haenv->loop_start_hook();
222 my $res = $self->work();
224 $haenv->loop_end_hook();
232 my $haenv = $self->{haenv
};
234 if (!$wrote_lrm_status_at_startup) {
235 if ($self->update_lrm_status()) {
236 $wrote_lrm_status_at_startup = 1;
240 return $self->{shutdown_request
} ?
0 : 1;
244 my $status = $self->get_local_status();
245 my $state = $status->{state};
247 $self->update_service_status();
249 my $fence_request = PVE
::HA
::Tools
::count_fenced_services
($self->{service_status
}, $haenv->nodename());
251 # do state changes first
253 my $ctime = $haenv->get_time();
255 if ($state eq 'wait_for_agent_lock') {
257 my $service_count = $self->active_service_count();
259 if (!$fence_request && $service_count && $haenv->quorate()) {
260 if ($self->get_protected_ha_agent_lock()) {
261 $self->set_local_status({ state => 'active' });
265 } elsif ($state eq 'lost_agent_lock') {
267 if (!$fence_request && $haenv->quorate()) {
268 if ($self->get_protected_ha_agent_lock()) {
269 $self->set_local_status({ state => 'active' });
273 } elsif ($state eq 'active') {
275 if ($fence_request) {
276 $haenv->log('err', "node need to be fenced - releasing agent_lock\n");
277 $self->set_local_status({ state => 'lost_agent_lock'});
278 } elsif (!$self->get_protected_ha_agent_lock()) {
279 $self->set_local_status({ state => 'lost_agent_lock'});
283 $status = $self->get_local_status();
284 $state = $status->{state};
288 if ($state eq 'wait_for_agent_lock') {
290 return 0 if $self->{shutdown_request
};
292 $self->update_lrm_status();
296 } elsif ($state eq 'active') {
298 my $startime = $haenv->get_time();
304 # do work (max_time seconds)
306 # fixme: set alert timer
308 # if we could not get the current service status there's no point
309 # in doing anything, try again next round.
310 return if !$self->update_service_status();
312 if ($self->{shutdown_request
}) {
314 if ($self->{mode
} eq 'restart') {
316 my $service_count = $self->active_service_count();
318 if ($service_count == 0) {
320 if ($self->run_workers() == 0) {
321 if ($self->{ha_agent_wd
}) {
322 $haenv->watchdog_close($self->{ha_agent_wd
});
323 delete $self->{ha_agent_wd
};
328 # restart with no or freezed services, release the lock
329 $haenv->release_ha_agent_lock();
334 if ($self->run_workers() == 0) {
335 if ($self->{shutdown_errors
} == 0) {
336 if ($self->{ha_agent_wd
}) {
337 $haenv->watchdog_close($self->{ha_agent_wd
});
338 delete $self->{ha_agent_wd
};
341 # shutdown with all services stopped thus release the lock
342 $haenv->release_ha_agent_lock();
350 $self->manage_resources();
355 $haenv->log('err', "got unexpected error - $err");
358 $self->update_lrm_status();
360 return 0 if $shutdown;
362 $haenv->sleep_until($startime + $max_time);
364 } elsif ($state eq 'lost_agent_lock') {
366 # Note: watchdog is active an will triger soon!
368 # so we hope to get the lock back soon!
370 if ($self->{shutdown_request
}) {
372 my $service_count = $self->active_service_count();
374 if ($service_count > 0) {
375 $haenv->log('err', "get shutdown request in state 'lost_agent_lock' - " .
376 "detected $service_count running services");
380 # all services are stopped, so we can close the watchdog
382 if ($self->{ha_agent_wd
}) {
383 $haenv->watchdog_close($self->{ha_agent_wd
});
384 delete $self->{ha_agent_wd
};
395 die "got unexpected status '$state'\n";
405 my $haenv = $self->{haenv
};
407 my $starttime = $haenv->get_time();
409 # number of workers to start, if 0 we exec the command directly witouth forking
410 my $max_workers = $haenv->get_max_workers();
412 my $sc = $haenv->read_service_config();
414 while (($haenv->get_time() - $starttime) < 5) {
415 my $count = $self->check_active_workers();
417 foreach my $sid (sort keys %{$self->{workers
}}) {
418 last if $count >= $max_workers && $max_workers > 0;
420 my $w = $self->{workers
}->{$sid};
422 # only fork if we may else call exec_resource_agent
423 # directly (e.g. for regression tests)
424 if ($max_workers > 0) {
426 if (!defined($pid)) {
427 $haenv->log('err', "fork worker failed");
428 $count = 0; last; # abort, try later
429 } elsif ($pid == 0) {
430 $haenv->after_fork(); # cleanup
435 $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{target
});
438 $haenv->log('err', $err);
449 $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{target
});
450 $res = $res << 8 if $res > 0;
453 $haenv->log('err', $err);
455 if (defined($w->{uid
})) {
456 $self->resource_command_finished($sid, $w->{uid
}, $res);
458 $self->stop_command_finished($sid, $res);
469 return scalar(keys %{$self->{workers
}});
472 sub manage_resources
{
475 my $haenv = $self->{haenv
};
477 my $nodename = $haenv->nodename();
479 my $ss = $self->{service_status
};
481 foreach my $sid (keys %{$self->{restart_tries
}}) {
482 delete $self->{restart_tries
}->{$sid} if !$ss->{$sid};
485 foreach my $sid (keys %$ss) {
486 my $sd = $ss->{$sid};
487 next if !$sd->{node
};
489 next if $sd->{node
} ne $nodename;
490 my $req_state = $sd->{state};
491 next if !defined($req_state);
492 next if $req_state eq 'freeze';
493 $self->queue_resource_command($sid, $sd->{uid
}, $req_state, $sd->{target
});
496 return $self->run_workers();
499 sub queue_resource_command
{
500 my ($self, $sid, $uid, $state, $target) = @_;
502 # do not queue the excatly same command twice as this may lead to
503 # an inconsistent HA state when the first command fails but the CRM
504 # does not process its failure right away and the LRM starts a second
505 # try, without the CRM knowing of it (race condition)
506 # The 'stopped' command is an exception as we do not process its result
507 # in the CRM and we want to execute it always (even with no active CRM)
508 return if $state ne 'stopped' && $uid && defined($self->{results
}->{$uid});
510 if (my $w = $self->{workers
}->{$sid}) {
511 return if $w->{pid
}; # already started
512 # else, delete and overwrite queue entry with new command
513 delete $self->{workers
}->{$sid};
516 $self->{workers
}->{$sid} = {
522 $self->{workers
}->{$sid}->{target
} = $target if $target;
525 sub check_active_workers
{
528 # finish/count workers
530 foreach my $sid (keys %{$self->{workers
}}) {
531 my $w = $self->{workers
}->{$sid};
532 if (my $pid = $w->{pid
}) {
534 my $waitpid = waitpid($pid, WNOHANG
);
535 if (defined($waitpid) && ($waitpid == $pid)) {
536 if (defined($w->{uid
})) {
537 $self->resource_command_finished($sid, $w->{uid
}, $?);
539 $self->stop_command_finished($sid, $?);
550 sub stop_command_finished
{
551 my ($self, $sid, $status) = @_;
553 my $haenv = $self->{haenv
};
555 my $w = delete $self->{workers
}->{$sid};
556 return if !$w; # should not happen
561 $haenv->log('err', "resource agent $sid finished - failed to execute");
562 } elsif (my $sig = ($status & 127)) {
563 $haenv->log('err', "resource agent $sid finished - got signal $sig");
565 $exit_code = ($status >> 8);
568 if ($exit_code != 0) {
569 $self->{shutdown_errors
}++;
573 sub resource_command_finished
{
574 my ($self, $sid, $uid, $status) = @_;
576 my $haenv = $self->{haenv
};
578 my $w = delete $self->{workers
}->{$sid};
579 return if !$w; # should not happen
584 $haenv->log('err', "resource agent $sid finished - failed to execute");
585 } elsif (my $sig = ($status & 127)) {
586 $haenv->log('err', "resource agent $sid finished - got signal $sig");
588 $exit_code = ($status >> 8);
591 $exit_code = $self->handle_service_exitcode($sid, $w->{state}, $exit_code);
593 return if $exit_code == ETRY_AGAIN
; # tell nobody, simply retry
595 $self->{results
}->{$uid} = {
597 state => $w->{state},
598 exit_code
=> $exit_code,
601 my $ss = $self->{service_status
};
603 # compute hash of valid/existing uids
605 foreach my $sid (keys %$ss) {
606 my $sd = $ss->{$sid};
608 $valid_uids->{$sd->{uid
}} = 1;
612 foreach my $id (keys %{$self->{results
}}) {
613 next if !$valid_uids->{$id};
614 $results->{$id} = $self->{results
}->{$id};
616 $self->{results
} = $results;
619 # processes the exit code from a finished resource agent, so that the CRM knows
620 # if the LRM wants to retry an action based on the current recovery policies for
621 # the failed service, or the CRM itself must try to recover from the failure.
622 sub handle_service_exitcode
{
623 my ($self, $sid, $cmd, $exit_code) = @_;
625 my $haenv = $self->{haenv
};
626 my $tries = $self->{restart_tries
};
628 my $sc = $haenv->read_service_config();
632 if (my $cd = $sc->{$sid}) {
633 $max_restart = $cd->{max_restart
};
636 if ($cmd eq 'started') {
638 if ($exit_code == SUCCESS
) {
644 } elsif ($exit_code == ERROR
) {
646 $tries->{$sid} = 0 if !defined($tries->{$sid});
648 if ($tries->{$sid} >= $max_restart) {
649 $haenv->log('err', "unable to start service $sid on local node".
650 " after $tries->{$sid} retries");
657 $haenv->log('warning', "restart policy: retry number $tries->{$sid}" .
658 " for service '$sid'");
659 # tell CRM that we retry the start
668 sub exec_resource_agent
{
669 my ($self, $sid, $service_config, $cmd, @params) = @_;
671 # setup execution environment
673 $ENV{'PATH'} = '/sbin:/bin:/usr/sbin:/usr/bin';
675 my $haenv = $self->{haenv
};
677 my $nodename = $haenv->nodename();
679 my (undef, $service_type, $service_name) = PVE
::HA
::Tools
::parse_sid
($sid);
681 my $plugin = PVE
::HA
::Resources-
>lookup($service_type);
683 $haenv->log('err', "service type '$service_type' not implemented");
684 return EUNKNOWN_SERVICE_TYPE
;
687 if (!$service_config) {
688 $haenv->log('err', "missing resource configuration for '$sid'");
689 return EUNKNOWN_SERVICE
;
692 # process error state early
693 if ($cmd eq 'error') {
695 $haenv->log('err', "service $sid is in an error state and needs manual " .
696 "intervention. Look up 'ERROR RECOVERY' in the documentation.");
698 return SUCCESS
; # error always succeeds
701 if ($service_config->{node
} ne $nodename) {
702 $haenv->log('err', "service '$sid' not on this node");
706 my $id = $service_name;
708 my $running = $plugin->check_running($haenv, $id);
710 if ($cmd eq 'started') {
712 return SUCCESS
if $running;
714 $haenv->log("info", "starting service $sid");
716 $plugin->start($haenv, $id);
718 $running = $plugin->check_running($haenv, $id);
721 $haenv->log("info", "service status $sid started");
724 $haenv->log("warning", "unable to start service $sid");
728 } elsif ($cmd eq 'request_stop' || $cmd eq 'stopped') {
730 return SUCCESS
if !$running;
732 $haenv->log("info", "stopping service $sid");
734 $plugin->shutdown($haenv, $id);
736 $running = $plugin->check_running($haenv, $id);
739 $haenv->log("info", "service status $sid stopped");
742 $haenv->log("info", "unable to stop stop service $sid (still running)");
746 } elsif ($cmd eq 'migrate' || $cmd eq 'relocate') {
748 my $target = $params[0];
749 if (!defined($target)) {
750 die "$cmd '$sid' failed - missing target\n" if !defined($target);
751 return EINVALID_PARAMETER
;
754 if ($service_config->{node
} eq $target) {
759 my $online = ($cmd eq 'migrate') ?
1 : 0;
761 my $res = $plugin->migrate($haenv, $id, $target, $online);
763 # something went wrong if service is still on this node
765 $haenv->log("err", "service $sid not moved (migration error)");
773 $haenv->log("err", "implement me (cmd '$cmd')");
774 return EUNKNOWN_COMMAND
;