3 # Local Resource Manager
8 use POSIX
qw(:sys_wait_h);
12 use PVE
::HA
::Tools
':exit_codes';
14 # Server can have several states:
17 wait_for_agent_lock
=> "waiting for agent lock",
18 active
=> "got agent_lock",
19 lost_agent_lock
=> "lost agent_lock",
23 my ($this, $haenv) = @_;
25 my $class = ref($this) || $this;
29 status
=> { state => 'startup' },
33 shutdown_request
=> 0,
34 # mode can be: active, reboot, shutdown, restart
38 $self->set_local_status({ state => 'wait_for_agent_lock' });
43 sub shutdown_request
{
46 my $haenv = $self->{haenv
};
48 my $shutdown = $haenv->is_node_shutdown();
51 $haenv->log('info', "shutdown LRM, stop all services");
52 $self->{mode
} = 'shutdown';
54 $haenv->log('info', "restart LRM, freeze all services");
55 $self->{mode
} = 'restart';
58 $self->{shutdown_request
} = 1;
60 eval { $self->update_lrm_status(); };
62 $self->log('err', "unable to update lrm status file - $err");
66 sub get_local_status
{
69 return $self->{status
};
72 sub set_local_status
{
73 my ($self, $new) = @_;
75 die "invalid state '$new->{state}'" if !$valid_states->{$new->{state}};
77 my $haenv = $self->{haenv
};
79 my $old = $self->{status
};
81 # important: only update if if really changed
82 return if $old->{state} eq $new->{state};
84 $haenv->log('info', "status change $old->{state} => $new->{state}");
86 $new->{state_change_time
} = $haenv->get_time();
88 $self->{status
} = $new;
91 sub update_lrm_status
{
94 my $haenv = $self->{haenv
};
96 return 0 if !$haenv->quorate();
99 mode
=> $self->{mode
},
100 results
=> $self->{results
},
101 timestamp
=> $haenv->get_time(),
104 eval { $haenv->write_lrm_status($lrm_status); };
106 $haenv->log('err', "unable to write lrm status file - $err");
113 sub get_protected_ha_agent_lock
{
116 my $haenv = $self->{haenv
};
119 my $starttime = $haenv->get_time();
123 if ($haenv->get_ha_agent_lock()) {
124 if ($self->{ha_agent_wd
}) {
125 $haenv->watchdog_update($self->{ha_agent_wd
});
127 my $wfh = $haenv->watchdog_open();
128 $self->{ha_agent_wd
} = $wfh;
133 last if ++$count > 5; # try max 5 time
135 my $delay = $haenv->get_time() - $starttime;
136 last if $delay > 5; # for max 5 seconds
144 sub active_service_count
{
147 my $haenv = $self->{haenv
};
149 my $nodename = $haenv->nodename();
151 my $ss = $self->{service_status
};
155 foreach my $sid (keys %$ss) {
156 my $sd = $ss->{$sid};
157 next if !$sd->{node
};
158 next if $sd->{node
} ne $nodename;
159 my $req_state = $sd->{state};
160 next if !defined($req_state);
161 next if $req_state eq 'stopped';
162 next if $req_state eq 'freeze';
170 my $wrote_lrm_status_at_startup = 0;
172 sub do_one_iteration
{
175 my $haenv = $self->{haenv
};
177 if (!$wrote_lrm_status_at_startup) {
178 if ($self->update_lrm_status()) {
179 $wrote_lrm_status_at_startup = 1;
183 return $self->{shutdown_request
} ?
0 : 1;
187 my $status = $self->get_local_status();
188 my $state = $status->{state};
190 my $ms = $haenv->read_manager_status();
191 $self->{service_status
} = $ms->{service_status
} || {};
193 my $fence_request = PVE
::HA
::Tools
::count_fenced_services
($self->{service_status
}, $haenv->nodename());
195 # do state changes first
197 my $ctime = $haenv->get_time();
199 if ($state eq 'wait_for_agent_lock') {
201 my $service_count = $self->active_service_count();
203 if (!$fence_request && $service_count && $haenv->quorate()) {
204 if ($self->get_protected_ha_agent_lock()) {
205 $self->set_local_status({ state => 'active' });
209 } elsif ($state eq 'lost_agent_lock') {
211 if (!$fence_request && $haenv->quorate()) {
212 if ($self->get_protected_ha_agent_lock()) {
213 $self->set_local_status({ state => 'active' });
217 } elsif ($state eq 'active') {
219 if ($fence_request) {
220 $haenv->log('err', "node need to be fenced - releasing agent_lock\n");
221 $self->set_local_status({ state => 'lost_agent_lock'});
222 } elsif (!$self->get_protected_ha_agent_lock()) {
223 $self->set_local_status({ state => 'lost_agent_lock'});
227 $status = $self->get_local_status();
228 $state = $status->{state};
232 if ($state eq 'wait_for_agent_lock') {
234 return 0 if $self->{shutdown_request
};
236 $self->update_lrm_status();
240 } elsif ($state eq 'active') {
242 my $startime = $haenv->get_time();
248 # do work (max_time seconds)
250 # fixme: set alert timer
252 if ($self->{shutdown_request
}) {
254 if ($self->{mode
} eq 'restart') {
256 my $service_count = $self->active_service_count();
258 if ($service_count == 0) {
260 if ($self->{ha_agent_wd
}) {
261 $haenv->watchdog_close($self->{ha_agent_wd
});
262 delete $self->{ha_agent_wd
};
268 # fixme: stop all services
273 $self->manage_resources();
278 $haenv->log('err', "got unexpected error - $err");
281 $self->update_lrm_status();
283 return 0 if $shutdown;
285 $haenv->sleep_until($startime + $max_time);
287 } elsif ($state eq 'lost_agent_lock') {
289 # Note: watchdog is active an will triger soon!
291 # so we hope to get the lock back soon!
293 if ($self->{shutdown_request
}) {
295 my $service_count = $self->active_service_count();
297 if ($service_count > 0) {
298 $haenv->log('err', "get shutdown request in state 'lost_agent_lock' - " .
299 "detected $service_count running services");
303 # all services are stopped, so we can close the watchdog
305 if ($self->{ha_agent_wd
}) {
306 $haenv->watchdog_close($self->{ha_agent_wd
});
307 delete $self->{ha_agent_wd
};
318 die "got unexpected status '$state'\n";
325 sub manage_resources
{
328 my $haenv = $self->{haenv
};
330 my $nodename = $haenv->nodename();
332 my $ss = $self->{service_status
};
334 foreach my $sid (keys %$ss) {
335 my $sd = $ss->{$sid};
336 next if !$sd->{node
};
338 next if $sd->{node
} ne $nodename;
339 my $req_state = $sd->{state};
340 next if !defined($req_state);
341 next if $req_state eq 'freeze';
343 $self->queue_resource_command($sid, $sd->{uid
}, $req_state, $sd->{target
});
346 $haenv->log('err', "unable to run resource agent for '$sid' - $err"); # fixme
350 my $starttime = $haenv->get_time();
355 my $sc = $haenv->read_service_config();
357 while (($haenv->get_time() - $starttime) < 5) {
358 my $count = $self->check_active_workers();
360 foreach my $sid (keys %{$self->{workers
}}) {
361 last if $count >= $max_workers;
362 my $w = $self->{workers
}->{$sid};
363 my $cd = $sc->{$sid};
365 $haenv->log('err', "missing resource configuration for '$sid'");
369 if ($haenv->can_fork()) {
371 if (!defined($pid)) {
372 $haenv->log('err', "fork worker failed");
373 $count = 0; last; # abort, try later
374 } elsif ($pid == 0) {
378 $res = $haenv->exec_resource_agent($sid, $cd, $w->{state}, $w->{target
});
381 $haenv->log('err', $err);
392 $res = $haenv->exec_resource_agent($sid, $cd, $w->{state}, $w->{target
});
395 $haenv->log('err', $err);
397 $self->resource_command_finished($sid, $w->{uid
}, $res);
408 # fixme: use a queue an limit number of parallel workers?
409 sub queue_resource_command
{
410 my ($self, $sid, $uid, $state, $target) = @_;
412 if (my $w = $self->{workers
}->{$sid}) {
413 return if $w->{pid
}; # already started
414 # else, delete and overwrite queue entry with new command
415 delete $self->{workers
}->{$sid};
418 $self->{workers
}->{$sid} = {
424 $self->{workers
}->{$sid}->{target
} = $target if $target;
427 sub check_active_workers
{
430 # finish/count workers
432 foreach my $sid (keys %{$self->{workers
}}) {
433 my $w = $self->{workers
}->{$sid};
434 if (my $pid = $w->{pid
}) {
436 my $waitpid = waitpid($pid, WNOHANG
);
437 if (defined($waitpid) && ($waitpid == $pid)) {
438 $self->resource_command_finished($sid, $w->{uid
}, $?);
448 sub resource_command_finished
{
449 my ($self, $sid, $uid, $status) = @_;
451 my $haenv = $self->{haenv
};
453 my $w = delete $self->{workers
}->{$sid};
454 return if !$w; # should not happen
459 $haenv->log('err', "resource agent $sid finished - failed to execute");
460 } elsif (my $sig = ($status & 127)) {
461 $haenv->log('err', "resource agent $sid finished - got signal $sig");
463 $exit_code = ($status >> 8);
466 $exit_code = $self->handle_service_exitcode($sid, $w->{state}, $exit_code);
468 $self->{results
}->{$uid} = {
470 state => $w->{state},
471 exit_code
=> $exit_code,
474 my $ss = $self->{service_status
};
476 # compute hash of valid/existing uids
478 foreach my $sid (keys %$ss) {
479 my $sd = $ss->{$sid};
481 $valid_uids->{$sd->{uid
}} = 1;
485 foreach my $id (keys %{$self->{results
}}) {
486 next if !$valid_uids->{$id};
487 $results->{$id} = $self->{results
}->{$id};
489 $self->{results
} = $results;
492 # processes the exit code from a finished resource agent, so that the CRM knows
493 # if the LRM wants to retry an action based on the current recovery policies for
494 # the failed service, or the CRM itself must try to recover from the failure.
495 sub handle_service_exitcode
{
496 my ($self, $sid, $cmd, $exit_code) = @_;
498 my $haenv = $self->{haenv
};
499 my $tries = $self->{restart_tries
};
501 my $sc = $haenv->read_service_config();
502 my $cd = $sc->{$sid};
504 if ($cmd eq 'started') {
506 if ($exit_code == SUCCESS
) {
512 } elsif ($exit_code == ERROR
) {
514 $tries->{$sid} = 0 if !defined($tries->{$sid});
517 if ($tries->{$sid} >= $cd->{max_restart
}) {
518 $haenv->log('err', "unable to start service $sid on local node".
519 " after $tries->{$sid} retries");
524 # tell CRM that we retry the start