]> git.proxmox.com Git - pve-ha-manager.git/blame - src/PVE/HA/LRM.pm
Adding constants to gain more readability
[pve-ha-manager.git] / src / PVE / HA / LRM.pm
CommitLineData
5f095798
DM
1package PVE::HA::LRM;
2
3# Local Resource Manager
4
5use strict;
6use warnings;
c4a221bc
DM
7use Data::Dumper;
8use POSIX qw(:sys_wait_h);
5f095798
DM
9
10use PVE::SafeSyslog;
11use PVE::Tools;
12use PVE::HA::Tools;
13
14# Server can have several states:
15
16my $valid_states = {
ec911edd 17 wait_for_agent_lock => "waiting for agent lock",
0bba8f60 18 active => "got agent_lock",
5f095798
DM
19 lost_agent_lock => "lost agent_lock",
20};
21
22sub new {
23 my ($this, $haenv) = @_;
24
25 my $class = ref($this) || $this;
26
27 my $self = bless {
28 haenv => $haenv,
29 status => { state => 'startup' },
c4a221bc
DM
30 workers => {},
31 results => {},
ea4443cc 32 restart_tries => {},
067cdf33 33 shutdown_request => 0,
9c7d068b
DM
34 # mode can be: active, reboot, shutdown, restart
35 mode => 'active',
5f095798
DM
36 }, $class;
37
b0bf08a9 38 $self->set_local_status({ state => 'wait_for_agent_lock' });
9c7d068b 39
5f095798
DM
40 return $self;
41}
42
43sub shutdown_request {
44 my ($self) = @_;
45
46 $self->{shutdown_request} = 1;
9c7d068b
DM
47
48 $self->{mode} = 'restart'; # fixme: detect shutdown/reboot
49
50 eval { $self->update_lrm_status(); };
51 if (my $err = $@) {
5bd7aa54 52 $self->log('err', "unable to update lrm status file - $err");
9c7d068b 53 }
5f095798
DM
54}
55
56sub get_local_status {
57 my ($self) = @_;
58
59 return $self->{status};
60}
61
62sub set_local_status {
63 my ($self, $new) = @_;
64
65 die "invalid state '$new->{state}'" if !$valid_states->{$new->{state}};
66
67 my $haenv = $self->{haenv};
68
69 my $old = $self->{status};
70
71 # important: only update if if really changed
72 return if $old->{state} eq $new->{state};
73
0bba8f60 74 $haenv->log('info', "status change $old->{state} => $new->{state}");
5f095798
DM
75
76 $new->{state_change_time} = $haenv->get_time();
77
78 $self->{status} = $new;
79}
80
9c7d068b
DM
81sub update_lrm_status {
82 my ($self) = @_;
83
5bd7aa54
DM
84 my $haenv = $self->{haenv};
85
79829202
DM
86 return 0 if !$haenv->quorate();
87
9c7d068b
DM
88 my $lrm_status = {
89 mode => $self->{mode},
90 results => $self->{results},
aa330d1c 91 timestamp => $haenv->get_time(),
9c7d068b
DM
92 };
93
5bd7aa54
DM
94 eval { $haenv->write_lrm_status($lrm_status); };
95 if (my $err = $@) {
96 $haenv->log('err', "unable to write lrm status file - $err");
97 return 0;
98 }
99
100 return 1;
9c7d068b
DM
101}
102
5f095798
DM
103sub get_protected_ha_agent_lock {
104 my ($self) = @_;
105
106 my $haenv = $self->{haenv};
107
108 my $count = 0;
109 my $starttime = $haenv->get_time();
110
111 for (;;) {
112
113 if ($haenv->get_ha_agent_lock()) {
114 if ($self->{ha_agent_wd}) {
115 $haenv->watchdog_update($self->{ha_agent_wd});
116 } else {
117 my $wfh = $haenv->watchdog_open();
118 $self->{ha_agent_wd} = $wfh;
119 }
120 return 1;
121 }
122
123 last if ++$count > 5; # try max 5 time
124
125 my $delay = $haenv->get_time() - $starttime;
126 last if $delay > 5; # for max 5 seconds
127
128 $haenv->sleep(1);
129 }
130
131 return 0;
132}
133
546e2f1f
DM
134sub active_service_count {
135 my ($self) = @_;
136
137 my $haenv = $self->{haenv};
138
139 my $nodename = $haenv->nodename();
140
141 my $ss = $self->{service_status};
142
143 my $count = 0;
144
145 foreach my $sid (keys %$ss) {
146 my $sd = $ss->{$sid};
147 next if !$sd->{node};
148 next if $sd->{node} ne $nodename;
149 my $req_state = $sd->{state};
150 next if !defined($req_state);
151 next if $req_state eq 'stopped';
9c7d068b 152 next if $req_state eq 'freeze';
546e2f1f
DM
153
154 $count++;
155 }
156
157 return $count;
158}
5bd7aa54
DM
159
160my $wrote_lrm_status_at_startup = 0;
161
5f095798
DM
162sub do_one_iteration {
163 my ($self) = @_;
164
165 my $haenv = $self->{haenv};
166
c5ec095f 167 if (!$wrote_lrm_status_at_startup) {
79829202 168 if ($self->update_lrm_status()) {
c5ec095f
DM
169 $wrote_lrm_status_at_startup = 1;
170 } else {
171 # do nothing
172 $haenv->sleep(5);
173 return $self->{shutdown_request} ? 0 : 1;
174 }
5bd7aa54
DM
175 }
176
5f095798
DM
177 my $status = $self->get_local_status();
178 my $state = $status->{state};
179
067cdf33
DM
180 my $ms = $haenv->read_manager_status();
181 $self->{service_status} = $ms->{service_status} || {};
182
49777d09 183 my $fence_request = PVE::HA::Tools::count_fenced_services($self->{service_status}, $haenv->nodename());
067cdf33 184
5f095798
DM
185 # do state changes first
186
187 my $ctime = $haenv->get_time();
188
b0bf08a9 189 if ($state eq 'wait_for_agent_lock') {
5f095798 190
546e2f1f 191 my $service_count = $self->active_service_count();
5f095798 192
067cdf33 193 if (!$fence_request && $service_count && $haenv->quorate()) {
0bba8f60
DM
194 if ($self->get_protected_ha_agent_lock()) {
195 $self->set_local_status({ state => 'active' });
5f095798
DM
196 }
197 }
198
199 } elsif ($state eq 'lost_agent_lock') {
200
067cdf33 201 if (!$fence_request && $haenv->quorate()) {
0bba8f60
DM
202 if ($self->get_protected_ha_agent_lock()) {
203 $self->set_local_status({ state => 'active' });
5f095798
DM
204 }
205 }
206
0bba8f60 207 } elsif ($state eq 'active') {
5f095798 208
067cdf33
DM
209 if ($fence_request) {
210 $haenv->log('err', "node need to be fenced - releasing agent_lock\n");
211 $self->set_local_status({ state => 'lost_agent_lock'});
212 } elsif (!$self->get_protected_ha_agent_lock()) {
5f095798
DM
213 $self->set_local_status({ state => 'lost_agent_lock'});
214 }
215 }
216
217 $status = $self->get_local_status();
218 $state = $status->{state};
219
220 # do work
221
222 if ($state eq 'wait_for_agent_lock') {
223
224 return 0 if $self->{shutdown_request};
79829202
DM
225
226 $self->update_lrm_status();
227
5f095798
DM
228 $haenv->sleep(5);
229
0bba8f60 230 } elsif ($state eq 'active') {
5f095798
DM
231
232 my $startime = $haenv->get_time();
233
234 my $max_time = 10;
235
236 my $shutdown = 0;
237
238 # do work (max_time seconds)
239 eval {
240 # fixme: set alert timer
241
242 if ($self->{shutdown_request}) {
243
244 # fixme: request service stop or relocate ?
245
546e2f1f 246 my $service_count = $self->active_service_count();
5f095798
DM
247
248 if ($service_count == 0) {
249
250 if ($self->{ha_agent_wd}) {
251 $haenv->watchdog_close($self->{ha_agent_wd});
252 delete $self->{ha_agent_wd};
253 }
254
255 $shutdown = 1;
256 }
c4a221bc 257 } else {
c4a221bc
DM
258
259 $self->manage_resources();
067cdf33 260
5f095798
DM
261 }
262 };
263 if (my $err = $@) {
264 $haenv->log('err', "got unexpected error - $err");
265 }
266
79829202
DM
267 $self->update_lrm_status();
268
5f095798
DM
269 return 0 if $shutdown;
270
271 $haenv->sleep_until($startime + $max_time);
272
273 } elsif ($state eq 'lost_agent_lock') {
274
275 # Note: watchdog is active an will triger soon!
276
277 # so we hope to get the lock back soon!
278
279 if ($self->{shutdown_request}) {
280
546e2f1f 281 my $service_count = $self->active_service_count();
5f095798 282
546e2f1f 283 if ($service_count > 0) {
5f095798 284 $haenv->log('err', "get shutdown request in state 'lost_agent_lock' - " .
546e2f1f 285 "detected $service_count running services");
5f095798 286
546e2f1f 287 } else {
5f095798 288
546e2f1f 289 # all services are stopped, so we can close the watchdog
5f095798 290
546e2f1f
DM
291 if ($self->{ha_agent_wd}) {
292 $haenv->watchdog_close($self->{ha_agent_wd});
293 delete $self->{ha_agent_wd};
294 }
295
296 return 0;
5f095798 297 }
5f095798
DM
298 }
299
b0bf08a9
DM
300 $haenv->sleep(5);
301
5f095798
DM
302 } else {
303
304 die "got unexpected status '$state'\n";
305
306 }
307
308 return 1;
309}
310
c4a221bc
DM
311sub manage_resources {
312 my ($self) = @_;
313
314 my $haenv = $self->{haenv};
315
316 my $nodename = $haenv->nodename();
317
c4a221bc
DM
318 my $ss = $self->{service_status};
319
320 foreach my $sid (keys %$ss) {
321 my $sd = $ss->{$sid};
322 next if !$sd->{node};
323 next if !$sd->{uid};
324 next if $sd->{node} ne $nodename;
325 my $req_state = $sd->{state};
326 next if !defined($req_state);
9c7d068b 327 next if $req_state eq 'freeze';
c4a221bc 328 eval {
e88469ba 329 $self->queue_resource_command($sid, $sd->{uid}, $req_state, $sd->{target});
c4a221bc
DM
330 };
331 if (my $err = $@) {
f31b7e94 332 $haenv->log('err', "unable to run resource agent for '$sid' - $err"); # fixme
c4a221bc
DM
333 }
334 }
335
f31b7e94 336 my $starttime = $haenv->get_time();
c4a221bc
DM
337
338 # start workers
339 my $max_workers = 4;
340
6dbf93a0 341 my $sc = $haenv->read_service_config();
f31b7e94
DM
342
343 while (($haenv->get_time() - $starttime) < 5) {
c4a221bc
DM
344 my $count = $self->check_active_workers();
345
346 foreach my $sid (keys %{$self->{workers}}) {
347 last if $count >= $max_workers;
348 my $w = $self->{workers}->{$sid};
6dbf93a0
DM
349 my $cd = $sc->{$sid};
350 if (!$cd) {
f31b7e94 351 $haenv->log('err', "missing resource configuration for '$sid'");
6dbf93a0
DM
352 next;
353 }
c4a221bc 354 if (!$w->{pid}) {
f31b7e94
DM
355 if ($haenv->can_fork()) {
356 my $pid = fork();
357 if (!defined($pid)) {
358 $haenv->log('err', "fork worker failed");
359 $count = 0; last; # abort, try later
360 } elsif ($pid == 0) {
361 # do work
362 my $res = -1;
363 eval {
364 $res = $haenv->exec_resource_agent($sid, $cd, $w->{state}, $w->{target});
365 };
366 if (my $err = $@) {
367 $haenv->log('err', $err);
368 POSIX::_exit(-1);
369 }
370 POSIX::_exit($res);
371 } else {
372 $count++;
373 $w->{pid} = $pid;
374 }
375 } else {
c4a221bc
DM
376 my $res = -1;
377 eval {
6dbf93a0 378 $res = $haenv->exec_resource_agent($sid, $cd, $w->{state}, $w->{target});
c4a221bc
DM
379 };
380 if (my $err = $@) {
f31b7e94
DM
381 $haenv->log('err', $err);
382 }
383 $self->resource_command_finished($sid, $w->{uid}, $res);
c4a221bc
DM
384 }
385 }
386 }
387
388 last if !$count;
389
f31b7e94 390 $haenv->sleep(1);
c4a221bc
DM
391 }
392}
393
394# fixme: use a queue an limit number of parallel workers?
395sub queue_resource_command {
e88469ba 396 my ($self, $sid, $uid, $state, $target) = @_;
c4a221bc
DM
397
398 if (my $w = $self->{workers}->{$sid}) {
399 return if $w->{pid}; # already started
400 # else, delete and overwrite queue entry with new command
401 delete $self->{workers}->{$sid};
402 }
403
404 $self->{workers}->{$sid} = {
405 sid => $sid,
406 uid => $uid,
407 state => $state,
408 };
e88469ba
DM
409
410 $self->{workers}->{$sid}->{target} = $target if $target;
c4a221bc
DM
411}
412
413sub check_active_workers {
414 my ($self) = @_;
415
416 # finish/count workers
417 my $count = 0;
418 foreach my $sid (keys %{$self->{workers}}) {
419 my $w = $self->{workers}->{$sid};
420 if (my $pid = $w->{pid}) {
421 # check status
422 my $waitpid = waitpid($pid, WNOHANG);
423 if (defined($waitpid) && ($waitpid == $pid)) {
424 $self->resource_command_finished($sid, $w->{uid}, $?);
425 } else {
426 $count++;
427 }
428 }
429 }
430
431 return $count;
432}
433
434sub resource_command_finished {
435 my ($self, $sid, $uid, $status) = @_;
436
437 my $haenv = $self->{haenv};
438
439 my $w = delete $self->{workers}->{$sid};
440 return if !$w; # should not happen
441
442 my $exit_code = -1;
443
444 if ($status == -1) {
0f70400d 445 $haenv->log('err', "resource agent $sid finished - failed to execute");
c4a221bc 446 } elsif (my $sig = ($status & 127)) {
0f70400d 447 $haenv->log('err', "resource agent $sid finished - got signal $sig");
c4a221bc
DM
448 } else {
449 $exit_code = ($status >> 8);
c4a221bc
DM
450 }
451
ea4443cc
TL
452 $exit_code = $self->handle_service_exitcode($sid, $w->{state}, $exit_code);
453
c4a221bc
DM
454 $self->{results}->{$uid} = {
455 sid => $w->{sid},
456 state => $w->{state},
457 exit_code => $exit_code,
458 };
459
460 my $ss = $self->{service_status};
461
462 # compute hash of valid/existing uids
463 my $valid_uids = {};
464 foreach my $sid (keys %$ss) {
465 my $sd = $ss->{$sid};
466 next if !$sd->{uid};
467 $valid_uids->{$sd->{uid}} = 1;
468 }
469
470 my $results = {};
471 foreach my $id (keys %{$self->{results}}) {
472 next if !$valid_uids->{$id};
473 $results->{$id} = $self->{results}->{$id};
474 }
475 $self->{results} = $results;
c4a221bc
DM
476}
477
ea4443cc
TL
478# processes the exit code from a finished resource agent, so that the CRM knows
479# if the LRM wants to retry an action based on the current recovery policies for
480# the failed service, or the CRM itself must try to recover from the failure.
481sub handle_service_exitcode {
482 my ($self, $sid, $cmd, $exit_code) = @_;
483
484 my $haenv = $self->{haenv};
485 my $tries = $self->{restart_tries};
486
487 my $sc = $haenv->read_service_config();
488 my $cd = $sc->{$sid};
489
490 if ($cmd eq 'started') {
491
492 if ($exit_code == 0) {
493
494 $tries->{$sid} = 0;
495
496 return $exit_code;
497
498 } elsif ($exit_code == 1) {
499
500 $tries->{$sid} = 0 if !defined($tries->{$sid});
501
502 $tries->{$sid}++;
503 if ($tries->{$sid} >= $cd->{max_restart}) {
504 $haenv->log('err', "unable to start service $sid on local node".
505 " after $tries->{$sid} retries");
506 $tries->{$sid} = 0;
507 return 1;
508 }
509
510 return 2;
511 }
512 }
513
514 return $exit_code;
515
516}
517
5f095798 5181;