3 # Local Resource Manager
8 use POSIX
qw(:sys_wait_h);
12 use PVE
::HA
::Tools
':exit_codes';
14 # Server can have several states:
17 wait_for_agent_lock
=> "waiting for agent lock",
18 active
=> "got agent_lock",
19 lost_agent_lock
=> "lost agent_lock",
23 my ($this, $haenv) = @_;
25 my $class = ref($this) || $this;
29 status
=> { state => 'startup' },
33 shutdown_request
=> 0,
34 # mode can be: active, reboot, shutdown, restart
38 $self->set_local_status({ state => 'wait_for_agent_lock' });
43 sub shutdown_request
{
46 return if $self->{shutdown_request
}; # already in shutdown mode
48 my $haenv = $self->{haenv
};
50 my $shutdown = $haenv->is_node_shutdown();
53 $haenv->log('info', "shutdown LRM, stop all services");
54 $self->{mode
} = 'shutdown';
56 $haenv->log('info', "restart LRM, freeze all services");
57 $self->{mode
} = 'restart';
60 $self->{shutdown_request
} = 1;
62 eval { $self->update_lrm_status(); };
64 $self->log('err', "unable to update lrm status file - $err");
68 sub get_local_status
{
71 return $self->{status
};
74 sub set_local_status
{
75 my ($self, $new) = @_;
77 die "invalid state '$new->{state}'" if !$valid_states->{$new->{state}};
79 my $haenv = $self->{haenv
};
81 my $old = $self->{status
};
83 # important: only update if if really changed
84 return if $old->{state} eq $new->{state};
86 $haenv->log('info', "status change $old->{state} => $new->{state}");
88 $new->{state_change_time
} = $haenv->get_time();
90 $self->{status
} = $new;
93 sub update_lrm_status
{
96 my $haenv = $self->{haenv
};
98 return 0 if !$haenv->quorate();
101 mode
=> $self->{mode
},
102 results
=> $self->{results
},
103 timestamp
=> $haenv->get_time(),
106 eval { $haenv->write_lrm_status($lrm_status); };
108 $haenv->log('err', "unable to write lrm status file - $err");
115 sub get_protected_ha_agent_lock
{
118 my $haenv = $self->{haenv
};
121 my $starttime = $haenv->get_time();
125 if ($haenv->get_ha_agent_lock()) {
126 if ($self->{ha_agent_wd
}) {
127 $haenv->watchdog_update($self->{ha_agent_wd
});
129 my $wfh = $haenv->watchdog_open();
130 $self->{ha_agent_wd
} = $wfh;
135 last if ++$count > 5; # try max 5 time
137 my $delay = $haenv->get_time() - $starttime;
138 last if $delay > 5; # for max 5 seconds
146 sub active_service_count
{
149 my $haenv = $self->{haenv
};
151 my $nodename = $haenv->nodename();
153 my $ss = $self->{service_status
};
157 foreach my $sid (keys %$ss) {
158 my $sd = $ss->{$sid};
159 next if !$sd->{node
};
160 next if $sd->{node
} ne $nodename;
161 my $req_state = $sd->{state};
162 next if !defined($req_state);
163 next if $req_state eq 'stopped';
164 next if $req_state eq 'freeze';
172 my $wrote_lrm_status_at_startup = 0;
174 sub do_one_iteration
{
177 my $haenv = $self->{haenv
};
179 if (!$wrote_lrm_status_at_startup) {
180 if ($self->update_lrm_status()) {
181 $wrote_lrm_status_at_startup = 1;
185 return $self->{shutdown_request
} ?
0 : 1;
189 my $status = $self->get_local_status();
190 my $state = $status->{state};
192 my $ms = $haenv->read_manager_status();
193 $self->{service_status
} = $ms->{service_status
} || {};
195 my $fence_request = PVE
::HA
::Tools
::count_fenced_services
($self->{service_status
}, $haenv->nodename());
197 # do state changes first
199 my $ctime = $haenv->get_time();
201 if ($state eq 'wait_for_agent_lock') {
203 my $service_count = $self->active_service_count();
205 if (!$fence_request && $service_count && $haenv->quorate()) {
206 if ($self->get_protected_ha_agent_lock()) {
207 $self->set_local_status({ state => 'active' });
211 } elsif ($state eq 'lost_agent_lock') {
213 if (!$fence_request && $haenv->quorate()) {
214 if ($self->get_protected_ha_agent_lock()) {
215 $self->set_local_status({ state => 'active' });
219 } elsif ($state eq 'active') {
221 if ($fence_request) {
222 $haenv->log('err', "node need to be fenced - releasing agent_lock\n");
223 $self->set_local_status({ state => 'lost_agent_lock'});
224 } elsif (!$self->get_protected_ha_agent_lock()) {
225 $self->set_local_status({ state => 'lost_agent_lock'});
229 $status = $self->get_local_status();
230 $state = $status->{state};
234 if ($state eq 'wait_for_agent_lock') {
236 return 0 if $self->{shutdown_request
};
238 $self->update_lrm_status();
242 } elsif ($state eq 'active') {
244 my $startime = $haenv->get_time();
250 # do work (max_time seconds)
252 # fixme: set alert timer
254 if ($self->{shutdown_request
}) {
256 if ($self->{mode
} eq 'restart') {
258 my $service_count = $self->active_service_count();
260 if ($service_count == 0) {
262 if ($self->{ha_agent_wd
}) {
263 $haenv->watchdog_close($self->{ha_agent_wd
});
264 delete $self->{ha_agent_wd
};
270 # fixme: stop all services
275 $self->manage_resources();
280 $haenv->log('err', "got unexpected error - $err");
283 $self->update_lrm_status();
285 return 0 if $shutdown;
287 $haenv->sleep_until($startime + $max_time);
289 } elsif ($state eq 'lost_agent_lock') {
291 # Note: watchdog is active an will triger soon!
293 # so we hope to get the lock back soon!
295 if ($self->{shutdown_request
}) {
297 my $service_count = $self->active_service_count();
299 if ($service_count > 0) {
300 $haenv->log('err', "get shutdown request in state 'lost_agent_lock' - " .
301 "detected $service_count running services");
305 # all services are stopped, so we can close the watchdog
307 if ($self->{ha_agent_wd
}) {
308 $haenv->watchdog_close($self->{ha_agent_wd
});
309 delete $self->{ha_agent_wd
};
320 die "got unexpected status '$state'\n";
327 sub manage_resources
{
330 my $haenv = $self->{haenv
};
332 my $nodename = $haenv->nodename();
334 my $ss = $self->{service_status
};
336 foreach my $sid (keys %$ss) {
337 my $sd = $ss->{$sid};
338 next if !$sd->{node
};
340 next if $sd->{node
} ne $nodename;
341 my $req_state = $sd->{state};
342 next if !defined($req_state);
343 next if $req_state eq 'freeze';
345 $self->queue_resource_command($sid, $sd->{uid
}, $req_state, $sd->{target
});
348 $haenv->log('err', "unable to run resource agent for '$sid' - $err"); # fixme
352 my $starttime = $haenv->get_time();
357 my $sc = $haenv->read_service_config();
359 while (($haenv->get_time() - $starttime) < 5) {
360 my $count = $self->check_active_workers();
362 foreach my $sid (keys %{$self->{workers
}}) {
363 last if $count >= $max_workers;
364 my $w = $self->{workers
}->{$sid};
365 my $cd = $sc->{$sid};
367 $haenv->log('err', "missing resource configuration for '$sid'");
371 if ($haenv->can_fork()) {
373 if (!defined($pid)) {
374 $haenv->log('err', "fork worker failed");
375 $count = 0; last; # abort, try later
376 } elsif ($pid == 0) {
380 $res = $haenv->exec_resource_agent($sid, $cd, $w->{state}, $w->{target
});
383 $haenv->log('err', $err);
394 $res = $haenv->exec_resource_agent($sid, $cd, $w->{state}, $w->{target
});
397 $haenv->log('err', $err);
399 $self->resource_command_finished($sid, $w->{uid
}, $res);
410 # fixme: use a queue an limit number of parallel workers?
411 sub queue_resource_command
{
412 my ($self, $sid, $uid, $state, $target) = @_;
414 if (my $w = $self->{workers
}->{$sid}) {
415 return if $w->{pid
}; # already started
416 # else, delete and overwrite queue entry with new command
417 delete $self->{workers
}->{$sid};
420 $self->{workers
}->{$sid} = {
426 $self->{workers
}->{$sid}->{target
} = $target if $target;
429 sub check_active_workers
{
432 # finish/count workers
434 foreach my $sid (keys %{$self->{workers
}}) {
435 my $w = $self->{workers
}->{$sid};
436 if (my $pid = $w->{pid
}) {
438 my $waitpid = waitpid($pid, WNOHANG
);
439 if (defined($waitpid) && ($waitpid == $pid)) {
440 $self->resource_command_finished($sid, $w->{uid
}, $?);
450 sub resource_command_finished
{
451 my ($self, $sid, $uid, $status) = @_;
453 my $haenv = $self->{haenv
};
455 my $w = delete $self->{workers
}->{$sid};
456 return if !$w; # should not happen
461 $haenv->log('err', "resource agent $sid finished - failed to execute");
462 } elsif (my $sig = ($status & 127)) {
463 $haenv->log('err', "resource agent $sid finished - got signal $sig");
465 $exit_code = ($status >> 8);
468 $exit_code = $self->handle_service_exitcode($sid, $w->{state}, $exit_code);
470 $self->{results
}->{$uid} = {
472 state => $w->{state},
473 exit_code
=> $exit_code,
476 my $ss = $self->{service_status
};
478 # compute hash of valid/existing uids
480 foreach my $sid (keys %$ss) {
481 my $sd = $ss->{$sid};
483 $valid_uids->{$sd->{uid
}} = 1;
487 foreach my $id (keys %{$self->{results
}}) {
488 next if !$valid_uids->{$id};
489 $results->{$id} = $self->{results
}->{$id};
491 $self->{results
} = $results;
494 # processes the exit code from a finished resource agent, so that the CRM knows
495 # if the LRM wants to retry an action based on the current recovery policies for
496 # the failed service, or the CRM itself must try to recover from the failure.
497 sub handle_service_exitcode
{
498 my ($self, $sid, $cmd, $exit_code) = @_;
500 my $haenv = $self->{haenv
};
501 my $tries = $self->{restart_tries
};
503 my $sc = $haenv->read_service_config();
504 my $cd = $sc->{$sid};
506 if ($cmd eq 'started') {
508 if ($exit_code == SUCCESS
) {
514 } elsif ($exit_code == ERROR
) {
516 $tries->{$sid} = 0 if !defined($tries->{$sid});
519 if ($tries->{$sid} >= $cd->{max_restart
}) {
520 $haenv->log('err', "unable to start service $sid on local node".
521 " after $tries->{$sid} retries");
526 # tell CRM that we retry the start