]> git.proxmox.com Git - pve-ha-manager.git/blob - src/PVE/HA/LRM.pm
lrm: catch repeated calls to shutdown_request
[pve-ha-manager.git] / src / PVE / HA / LRM.pm
1 package PVE::HA::LRM;
2
3 # Local Resource Manager
4
5 use strict;
6 use warnings;
7 use Data::Dumper;
8 use POSIX qw(:sys_wait_h);
9
10 use PVE::SafeSyslog;
11 use PVE::Tools;
12 use PVE::HA::Tools ':exit_codes';
13
14 # Server can have several states:
15
16 my $valid_states = {
17 wait_for_agent_lock => "waiting for agent lock",
18 active => "got agent_lock",
19 lost_agent_lock => "lost agent_lock",
20 };
21
22 sub new {
23 my ($this, $haenv) = @_;
24
25 my $class = ref($this) || $this;
26
27 my $self = bless {
28 haenv => $haenv,
29 status => { state => 'startup' },
30 workers => {},
31 results => {},
32 restart_tries => {},
33 shutdown_request => 0,
34 # mode can be: active, reboot, shutdown, restart
35 mode => 'active',
36 }, $class;
37
38 $self->set_local_status({ state => 'wait_for_agent_lock' });
39
40 return $self;
41 }
42
43 sub shutdown_request {
44 my ($self) = @_;
45
46 return if $self->{shutdown_request}; # already in shutdown mode
47
48 my $haenv = $self->{haenv};
49
50 my $shutdown = $haenv->is_node_shutdown();
51
52 if ($shutdown) {
53 $haenv->log('info', "shutdown LRM, stop all services");
54 $self->{mode} = 'shutdown';
55 } else {
56 $haenv->log('info', "restart LRM, freeze all services");
57 $self->{mode} = 'restart';
58 }
59
60 $self->{shutdown_request} = 1;
61
62 eval { $self->update_lrm_status(); };
63 if (my $err = $@) {
64 $self->log('err', "unable to update lrm status file - $err");
65 }
66 }
67
68 sub get_local_status {
69 my ($self) = @_;
70
71 return $self->{status};
72 }
73
74 sub set_local_status {
75 my ($self, $new) = @_;
76
77 die "invalid state '$new->{state}'" if !$valid_states->{$new->{state}};
78
79 my $haenv = $self->{haenv};
80
81 my $old = $self->{status};
82
83 # important: only update if if really changed
84 return if $old->{state} eq $new->{state};
85
86 $haenv->log('info', "status change $old->{state} => $new->{state}");
87
88 $new->{state_change_time} = $haenv->get_time();
89
90 $self->{status} = $new;
91 }
92
93 sub update_lrm_status {
94 my ($self) = @_;
95
96 my $haenv = $self->{haenv};
97
98 return 0 if !$haenv->quorate();
99
100 my $lrm_status = {
101 mode => $self->{mode},
102 results => $self->{results},
103 timestamp => $haenv->get_time(),
104 };
105
106 eval { $haenv->write_lrm_status($lrm_status); };
107 if (my $err = $@) {
108 $haenv->log('err', "unable to write lrm status file - $err");
109 return 0;
110 }
111
112 return 1;
113 }
114
115 sub get_protected_ha_agent_lock {
116 my ($self) = @_;
117
118 my $haenv = $self->{haenv};
119
120 my $count = 0;
121 my $starttime = $haenv->get_time();
122
123 for (;;) {
124
125 if ($haenv->get_ha_agent_lock()) {
126 if ($self->{ha_agent_wd}) {
127 $haenv->watchdog_update($self->{ha_agent_wd});
128 } else {
129 my $wfh = $haenv->watchdog_open();
130 $self->{ha_agent_wd} = $wfh;
131 }
132 return 1;
133 }
134
135 last if ++$count > 5; # try max 5 time
136
137 my $delay = $haenv->get_time() - $starttime;
138 last if $delay > 5; # for max 5 seconds
139
140 $haenv->sleep(1);
141 }
142
143 return 0;
144 }
145
146 sub active_service_count {
147 my ($self) = @_;
148
149 my $haenv = $self->{haenv};
150
151 my $nodename = $haenv->nodename();
152
153 my $ss = $self->{service_status};
154
155 my $count = 0;
156
157 foreach my $sid (keys %$ss) {
158 my $sd = $ss->{$sid};
159 next if !$sd->{node};
160 next if $sd->{node} ne $nodename;
161 my $req_state = $sd->{state};
162 next if !defined($req_state);
163 next if $req_state eq 'stopped';
164 next if $req_state eq 'freeze';
165
166 $count++;
167 }
168
169 return $count;
170 }
171
172 my $wrote_lrm_status_at_startup = 0;
173
174 sub do_one_iteration {
175 my ($self) = @_;
176
177 my $haenv = $self->{haenv};
178
179 if (!$wrote_lrm_status_at_startup) {
180 if ($self->update_lrm_status()) {
181 $wrote_lrm_status_at_startup = 1;
182 } else {
183 # do nothing
184 $haenv->sleep(5);
185 return $self->{shutdown_request} ? 0 : 1;
186 }
187 }
188
189 my $status = $self->get_local_status();
190 my $state = $status->{state};
191
192 my $ms = $haenv->read_manager_status();
193 $self->{service_status} = $ms->{service_status} || {};
194
195 my $fence_request = PVE::HA::Tools::count_fenced_services($self->{service_status}, $haenv->nodename());
196
197 # do state changes first
198
199 my $ctime = $haenv->get_time();
200
201 if ($state eq 'wait_for_agent_lock') {
202
203 my $service_count = $self->active_service_count();
204
205 if (!$fence_request && $service_count && $haenv->quorate()) {
206 if ($self->get_protected_ha_agent_lock()) {
207 $self->set_local_status({ state => 'active' });
208 }
209 }
210
211 } elsif ($state eq 'lost_agent_lock') {
212
213 if (!$fence_request && $haenv->quorate()) {
214 if ($self->get_protected_ha_agent_lock()) {
215 $self->set_local_status({ state => 'active' });
216 }
217 }
218
219 } elsif ($state eq 'active') {
220
221 if ($fence_request) {
222 $haenv->log('err', "node need to be fenced - releasing agent_lock\n");
223 $self->set_local_status({ state => 'lost_agent_lock'});
224 } elsif (!$self->get_protected_ha_agent_lock()) {
225 $self->set_local_status({ state => 'lost_agent_lock'});
226 }
227 }
228
229 $status = $self->get_local_status();
230 $state = $status->{state};
231
232 # do work
233
234 if ($state eq 'wait_for_agent_lock') {
235
236 return 0 if $self->{shutdown_request};
237
238 $self->update_lrm_status();
239
240 $haenv->sleep(5);
241
242 } elsif ($state eq 'active') {
243
244 my $startime = $haenv->get_time();
245
246 my $max_time = 10;
247
248 my $shutdown = 0;
249
250 # do work (max_time seconds)
251 eval {
252 # fixme: set alert timer
253
254 if ($self->{shutdown_request}) {
255
256 if ($self->{mode} eq 'restart') {
257
258 my $service_count = $self->active_service_count();
259
260 if ($service_count == 0) {
261
262 if ($self->{ha_agent_wd}) {
263 $haenv->watchdog_close($self->{ha_agent_wd});
264 delete $self->{ha_agent_wd};
265 }
266
267 $shutdown = 1;
268 }
269 } else {
270 # fixme: stop all services
271 $shutdown = 1;
272 }
273 } else {
274
275 $self->manage_resources();
276
277 }
278 };
279 if (my $err = $@) {
280 $haenv->log('err', "got unexpected error - $err");
281 }
282
283 $self->update_lrm_status();
284
285 return 0 if $shutdown;
286
287 $haenv->sleep_until($startime + $max_time);
288
289 } elsif ($state eq 'lost_agent_lock') {
290
291 # Note: watchdog is active an will triger soon!
292
293 # so we hope to get the lock back soon!
294
295 if ($self->{shutdown_request}) {
296
297 my $service_count = $self->active_service_count();
298
299 if ($service_count > 0) {
300 $haenv->log('err', "get shutdown request in state 'lost_agent_lock' - " .
301 "detected $service_count running services");
302
303 } else {
304
305 # all services are stopped, so we can close the watchdog
306
307 if ($self->{ha_agent_wd}) {
308 $haenv->watchdog_close($self->{ha_agent_wd});
309 delete $self->{ha_agent_wd};
310 }
311
312 return 0;
313 }
314 }
315
316 $haenv->sleep(5);
317
318 } else {
319
320 die "got unexpected status '$state'\n";
321
322 }
323
324 return 1;
325 }
326
327 sub manage_resources {
328 my ($self) = @_;
329
330 my $haenv = $self->{haenv};
331
332 my $nodename = $haenv->nodename();
333
334 my $ss = $self->{service_status};
335
336 foreach my $sid (keys %$ss) {
337 my $sd = $ss->{$sid};
338 next if !$sd->{node};
339 next if !$sd->{uid};
340 next if $sd->{node} ne $nodename;
341 my $req_state = $sd->{state};
342 next if !defined($req_state);
343 next if $req_state eq 'freeze';
344 eval {
345 $self->queue_resource_command($sid, $sd->{uid}, $req_state, $sd->{target});
346 };
347 if (my $err = $@) {
348 $haenv->log('err', "unable to run resource agent for '$sid' - $err"); # fixme
349 }
350 }
351
352 my $starttime = $haenv->get_time();
353
354 # start workers
355 my $max_workers = 4;
356
357 my $sc = $haenv->read_service_config();
358
359 while (($haenv->get_time() - $starttime) < 5) {
360 my $count = $self->check_active_workers();
361
362 foreach my $sid (keys %{$self->{workers}}) {
363 last if $count >= $max_workers;
364 my $w = $self->{workers}->{$sid};
365 my $cd = $sc->{$sid};
366 if (!$cd) {
367 $haenv->log('err', "missing resource configuration for '$sid'");
368 next;
369 }
370 if (!$w->{pid}) {
371 if ($haenv->can_fork()) {
372 my $pid = fork();
373 if (!defined($pid)) {
374 $haenv->log('err', "fork worker failed");
375 $count = 0; last; # abort, try later
376 } elsif ($pid == 0) {
377 # do work
378 my $res = -1;
379 eval {
380 $res = $haenv->exec_resource_agent($sid, $cd, $w->{state}, $w->{target});
381 };
382 if (my $err = $@) {
383 $haenv->log('err', $err);
384 POSIX::_exit(-1);
385 }
386 POSIX::_exit($res);
387 } else {
388 $count++;
389 $w->{pid} = $pid;
390 }
391 } else {
392 my $res = -1;
393 eval {
394 $res = $haenv->exec_resource_agent($sid, $cd, $w->{state}, $w->{target});
395 };
396 if (my $err = $@) {
397 $haenv->log('err', $err);
398 }
399 $self->resource_command_finished($sid, $w->{uid}, $res);
400 }
401 }
402 }
403
404 last if !$count;
405
406 $haenv->sleep(1);
407 }
408 }
409
410 # fixme: use a queue an limit number of parallel workers?
411 sub queue_resource_command {
412 my ($self, $sid, $uid, $state, $target) = @_;
413
414 if (my $w = $self->{workers}->{$sid}) {
415 return if $w->{pid}; # already started
416 # else, delete and overwrite queue entry with new command
417 delete $self->{workers}->{$sid};
418 }
419
420 $self->{workers}->{$sid} = {
421 sid => $sid,
422 uid => $uid,
423 state => $state,
424 };
425
426 $self->{workers}->{$sid}->{target} = $target if $target;
427 }
428
429 sub check_active_workers {
430 my ($self) = @_;
431
432 # finish/count workers
433 my $count = 0;
434 foreach my $sid (keys %{$self->{workers}}) {
435 my $w = $self->{workers}->{$sid};
436 if (my $pid = $w->{pid}) {
437 # check status
438 my $waitpid = waitpid($pid, WNOHANG);
439 if (defined($waitpid) && ($waitpid == $pid)) {
440 $self->resource_command_finished($sid, $w->{uid}, $?);
441 } else {
442 $count++;
443 }
444 }
445 }
446
447 return $count;
448 }
449
450 sub resource_command_finished {
451 my ($self, $sid, $uid, $status) = @_;
452
453 my $haenv = $self->{haenv};
454
455 my $w = delete $self->{workers}->{$sid};
456 return if !$w; # should not happen
457
458 my $exit_code = -1;
459
460 if ($status == -1) {
461 $haenv->log('err', "resource agent $sid finished - failed to execute");
462 } elsif (my $sig = ($status & 127)) {
463 $haenv->log('err', "resource agent $sid finished - got signal $sig");
464 } else {
465 $exit_code = ($status >> 8);
466 }
467
468 $exit_code = $self->handle_service_exitcode($sid, $w->{state}, $exit_code);
469
470 $self->{results}->{$uid} = {
471 sid => $w->{sid},
472 state => $w->{state},
473 exit_code => $exit_code,
474 };
475
476 my $ss = $self->{service_status};
477
478 # compute hash of valid/existing uids
479 my $valid_uids = {};
480 foreach my $sid (keys %$ss) {
481 my $sd = $ss->{$sid};
482 next if !$sd->{uid};
483 $valid_uids->{$sd->{uid}} = 1;
484 }
485
486 my $results = {};
487 foreach my $id (keys %{$self->{results}}) {
488 next if !$valid_uids->{$id};
489 $results->{$id} = $self->{results}->{$id};
490 }
491 $self->{results} = $results;
492 }
493
494 # processes the exit code from a finished resource agent, so that the CRM knows
495 # if the LRM wants to retry an action based on the current recovery policies for
496 # the failed service, or the CRM itself must try to recover from the failure.
497 sub handle_service_exitcode {
498 my ($self, $sid, $cmd, $exit_code) = @_;
499
500 my $haenv = $self->{haenv};
501 my $tries = $self->{restart_tries};
502
503 my $sc = $haenv->read_service_config();
504 my $cd = $sc->{$sid};
505
506 if ($cmd eq 'started') {
507
508 if ($exit_code == SUCCESS) {
509
510 $tries->{$sid} = 0;
511
512 return $exit_code;
513
514 } elsif ($exit_code == ERROR) {
515
516 $tries->{$sid} = 0 if !defined($tries->{$sid});
517
518 $tries->{$sid}++;
519 if ($tries->{$sid} >= $cd->{max_restart}) {
520 $haenv->log('err', "unable to start service $sid on local node".
521 " after $tries->{$sid} retries");
522 $tries->{$sid} = 0;
523 return ERROR;
524 }
525
526 # tell CRM that we retry the start
527 return ETRY_AGAIN;
528 }
529 }
530
531 return $exit_code;
532
533 }
534
535 1;