]> git.proxmox.com Git - pve-ha-manager.git/blob - src/PVE/HA/LRM.pm
43b7b334d6118a0cd05713312928ec196431cc97
[pve-ha-manager.git] / src / PVE / HA / LRM.pm
1 package PVE::HA::LRM;
2
3 # Local Resource Manager
4
5 use strict;
6 use warnings;
7 use Data::Dumper;
8 use POSIX qw(:sys_wait_h);
9
10 use PVE::SafeSyslog;
11 use PVE::Tools;
12 use PVE::HA::Tools ':exit_codes';
13
14 # Server can have several states:
15
16 my $valid_states = {
17 wait_for_agent_lock => "waiting for agent lock",
18 active => "got agent_lock",
19 lost_agent_lock => "lost agent_lock",
20 };
21
22 sub new {
23 my ($this, $haenv) = @_;
24
25 my $class = ref($this) || $this;
26
27 my $self = bless {
28 haenv => $haenv,
29 status => { state => 'startup' },
30 workers => {},
31 results => {},
32 restart_tries => {},
33 shutdown_request => 0,
34 # mode can be: active, reboot, shutdown, restart
35 mode => 'active',
36 }, $class;
37
38 $self->set_local_status({ state => 'wait_for_agent_lock' });
39
40 return $self;
41 }
42
43 sub shutdown_request {
44 my ($self) = @_;
45
46 my $haenv = $self->{haenv};
47
48 my $shutdown = $haenv->is_node_shutdown();
49
50 if ($shutdown) {
51 $haenv->log('info', "shutdown LRM, stop all services");
52 $self->{mode} = 'shutdown';
53 } else {
54 $haenv->log('info', "restart LRM, freeze all services");
55 $self->{mode} = 'restart';
56 }
57
58 $self->{shutdown_request} = 1;
59
60 eval { $self->update_lrm_status(); };
61 if (my $err = $@) {
62 $self->log('err', "unable to update lrm status file - $err");
63 }
64 }
65
66 sub get_local_status {
67 my ($self) = @_;
68
69 return $self->{status};
70 }
71
72 sub set_local_status {
73 my ($self, $new) = @_;
74
75 die "invalid state '$new->{state}'" if !$valid_states->{$new->{state}};
76
77 my $haenv = $self->{haenv};
78
79 my $old = $self->{status};
80
81 # important: only update if if really changed
82 return if $old->{state} eq $new->{state};
83
84 $haenv->log('info', "status change $old->{state} => $new->{state}");
85
86 $new->{state_change_time} = $haenv->get_time();
87
88 $self->{status} = $new;
89 }
90
91 sub update_lrm_status {
92 my ($self) = @_;
93
94 my $haenv = $self->{haenv};
95
96 return 0 if !$haenv->quorate();
97
98 my $lrm_status = {
99 mode => $self->{mode},
100 results => $self->{results},
101 timestamp => $haenv->get_time(),
102 };
103
104 eval { $haenv->write_lrm_status($lrm_status); };
105 if (my $err = $@) {
106 $haenv->log('err', "unable to write lrm status file - $err");
107 return 0;
108 }
109
110 return 1;
111 }
112
113 sub get_protected_ha_agent_lock {
114 my ($self) = @_;
115
116 my $haenv = $self->{haenv};
117
118 my $count = 0;
119 my $starttime = $haenv->get_time();
120
121 for (;;) {
122
123 if ($haenv->get_ha_agent_lock()) {
124 if ($self->{ha_agent_wd}) {
125 $haenv->watchdog_update($self->{ha_agent_wd});
126 } else {
127 my $wfh = $haenv->watchdog_open();
128 $self->{ha_agent_wd} = $wfh;
129 }
130 return 1;
131 }
132
133 last if ++$count > 5; # try max 5 time
134
135 my $delay = $haenv->get_time() - $starttime;
136 last if $delay > 5; # for max 5 seconds
137
138 $haenv->sleep(1);
139 }
140
141 return 0;
142 }
143
144 sub active_service_count {
145 my ($self) = @_;
146
147 my $haenv = $self->{haenv};
148
149 my $nodename = $haenv->nodename();
150
151 my $ss = $self->{service_status};
152
153 my $count = 0;
154
155 foreach my $sid (keys %$ss) {
156 my $sd = $ss->{$sid};
157 next if !$sd->{node};
158 next if $sd->{node} ne $nodename;
159 my $req_state = $sd->{state};
160 next if !defined($req_state);
161 next if $req_state eq 'stopped';
162 next if $req_state eq 'freeze';
163
164 $count++;
165 }
166
167 return $count;
168 }
169
170 my $wrote_lrm_status_at_startup = 0;
171
172 sub do_one_iteration {
173 my ($self) = @_;
174
175 my $haenv = $self->{haenv};
176
177 if (!$wrote_lrm_status_at_startup) {
178 if ($self->update_lrm_status()) {
179 $wrote_lrm_status_at_startup = 1;
180 } else {
181 # do nothing
182 $haenv->sleep(5);
183 return $self->{shutdown_request} ? 0 : 1;
184 }
185 }
186
187 my $status = $self->get_local_status();
188 my $state = $status->{state};
189
190 my $ms = $haenv->read_manager_status();
191 $self->{service_status} = $ms->{service_status} || {};
192
193 my $fence_request = PVE::HA::Tools::count_fenced_services($self->{service_status}, $haenv->nodename());
194
195 # do state changes first
196
197 my $ctime = $haenv->get_time();
198
199 if ($state eq 'wait_for_agent_lock') {
200
201 my $service_count = $self->active_service_count();
202
203 if (!$fence_request && $service_count && $haenv->quorate()) {
204 if ($self->get_protected_ha_agent_lock()) {
205 $self->set_local_status({ state => 'active' });
206 }
207 }
208
209 } elsif ($state eq 'lost_agent_lock') {
210
211 if (!$fence_request && $haenv->quorate()) {
212 if ($self->get_protected_ha_agent_lock()) {
213 $self->set_local_status({ state => 'active' });
214 }
215 }
216
217 } elsif ($state eq 'active') {
218
219 if ($fence_request) {
220 $haenv->log('err', "node need to be fenced - releasing agent_lock\n");
221 $self->set_local_status({ state => 'lost_agent_lock'});
222 } elsif (!$self->get_protected_ha_agent_lock()) {
223 $self->set_local_status({ state => 'lost_agent_lock'});
224 }
225 }
226
227 $status = $self->get_local_status();
228 $state = $status->{state};
229
230 # do work
231
232 if ($state eq 'wait_for_agent_lock') {
233
234 return 0 if $self->{shutdown_request};
235
236 $self->update_lrm_status();
237
238 $haenv->sleep(5);
239
240 } elsif ($state eq 'active') {
241
242 my $startime = $haenv->get_time();
243
244 my $max_time = 10;
245
246 my $shutdown = 0;
247
248 # do work (max_time seconds)
249 eval {
250 # fixme: set alert timer
251
252 if ($self->{shutdown_request}) {
253
254 if ($self->{mode} eq 'restart') {
255
256 my $service_count = $self->active_service_count();
257
258 if ($service_count == 0) {
259
260 if ($self->{ha_agent_wd}) {
261 $haenv->watchdog_close($self->{ha_agent_wd});
262 delete $self->{ha_agent_wd};
263 }
264
265 $shutdown = 1;
266 }
267 } else {
268 # fixme: stop all services
269 $shutdown = 1;
270 }
271 } else {
272
273 $self->manage_resources();
274
275 }
276 };
277 if (my $err = $@) {
278 $haenv->log('err', "got unexpected error - $err");
279 }
280
281 $self->update_lrm_status();
282
283 return 0 if $shutdown;
284
285 $haenv->sleep_until($startime + $max_time);
286
287 } elsif ($state eq 'lost_agent_lock') {
288
289 # Note: watchdog is active an will triger soon!
290
291 # so we hope to get the lock back soon!
292
293 if ($self->{shutdown_request}) {
294
295 my $service_count = $self->active_service_count();
296
297 if ($service_count > 0) {
298 $haenv->log('err', "get shutdown request in state 'lost_agent_lock' - " .
299 "detected $service_count running services");
300
301 } else {
302
303 # all services are stopped, so we can close the watchdog
304
305 if ($self->{ha_agent_wd}) {
306 $haenv->watchdog_close($self->{ha_agent_wd});
307 delete $self->{ha_agent_wd};
308 }
309
310 return 0;
311 }
312 }
313
314 $haenv->sleep(5);
315
316 } else {
317
318 die "got unexpected status '$state'\n";
319
320 }
321
322 return 1;
323 }
324
325 sub manage_resources {
326 my ($self) = @_;
327
328 my $haenv = $self->{haenv};
329
330 my $nodename = $haenv->nodename();
331
332 my $ss = $self->{service_status};
333
334 foreach my $sid (keys %$ss) {
335 my $sd = $ss->{$sid};
336 next if !$sd->{node};
337 next if !$sd->{uid};
338 next if $sd->{node} ne $nodename;
339 my $req_state = $sd->{state};
340 next if !defined($req_state);
341 next if $req_state eq 'freeze';
342 eval {
343 $self->queue_resource_command($sid, $sd->{uid}, $req_state, $sd->{target});
344 };
345 if (my $err = $@) {
346 $haenv->log('err', "unable to run resource agent for '$sid' - $err"); # fixme
347 }
348 }
349
350 my $starttime = $haenv->get_time();
351
352 # start workers
353 my $max_workers = 4;
354
355 my $sc = $haenv->read_service_config();
356
357 while (($haenv->get_time() - $starttime) < 5) {
358 my $count = $self->check_active_workers();
359
360 foreach my $sid (keys %{$self->{workers}}) {
361 last if $count >= $max_workers;
362 my $w = $self->{workers}->{$sid};
363 my $cd = $sc->{$sid};
364 if (!$cd) {
365 $haenv->log('err', "missing resource configuration for '$sid'");
366 next;
367 }
368 if (!$w->{pid}) {
369 if ($haenv->can_fork()) {
370 my $pid = fork();
371 if (!defined($pid)) {
372 $haenv->log('err', "fork worker failed");
373 $count = 0; last; # abort, try later
374 } elsif ($pid == 0) {
375 # do work
376 my $res = -1;
377 eval {
378 $res = $haenv->exec_resource_agent($sid, $cd, $w->{state}, $w->{target});
379 };
380 if (my $err = $@) {
381 $haenv->log('err', $err);
382 POSIX::_exit(-1);
383 }
384 POSIX::_exit($res);
385 } else {
386 $count++;
387 $w->{pid} = $pid;
388 }
389 } else {
390 my $res = -1;
391 eval {
392 $res = $haenv->exec_resource_agent($sid, $cd, $w->{state}, $w->{target});
393 };
394 if (my $err = $@) {
395 $haenv->log('err', $err);
396 }
397 $self->resource_command_finished($sid, $w->{uid}, $res);
398 }
399 }
400 }
401
402 last if !$count;
403
404 $haenv->sleep(1);
405 }
406 }
407
408 # fixme: use a queue an limit number of parallel workers?
409 sub queue_resource_command {
410 my ($self, $sid, $uid, $state, $target) = @_;
411
412 if (my $w = $self->{workers}->{$sid}) {
413 return if $w->{pid}; # already started
414 # else, delete and overwrite queue entry with new command
415 delete $self->{workers}->{$sid};
416 }
417
418 $self->{workers}->{$sid} = {
419 sid => $sid,
420 uid => $uid,
421 state => $state,
422 };
423
424 $self->{workers}->{$sid}->{target} = $target if $target;
425 }
426
427 sub check_active_workers {
428 my ($self) = @_;
429
430 # finish/count workers
431 my $count = 0;
432 foreach my $sid (keys %{$self->{workers}}) {
433 my $w = $self->{workers}->{$sid};
434 if (my $pid = $w->{pid}) {
435 # check status
436 my $waitpid = waitpid($pid, WNOHANG);
437 if (defined($waitpid) && ($waitpid == $pid)) {
438 $self->resource_command_finished($sid, $w->{uid}, $?);
439 } else {
440 $count++;
441 }
442 }
443 }
444
445 return $count;
446 }
447
448 sub resource_command_finished {
449 my ($self, $sid, $uid, $status) = @_;
450
451 my $haenv = $self->{haenv};
452
453 my $w = delete $self->{workers}->{$sid};
454 return if !$w; # should not happen
455
456 my $exit_code = -1;
457
458 if ($status == -1) {
459 $haenv->log('err', "resource agent $sid finished - failed to execute");
460 } elsif (my $sig = ($status & 127)) {
461 $haenv->log('err', "resource agent $sid finished - got signal $sig");
462 } else {
463 $exit_code = ($status >> 8);
464 }
465
466 $exit_code = $self->handle_service_exitcode($sid, $w->{state}, $exit_code);
467
468 $self->{results}->{$uid} = {
469 sid => $w->{sid},
470 state => $w->{state},
471 exit_code => $exit_code,
472 };
473
474 my $ss = $self->{service_status};
475
476 # compute hash of valid/existing uids
477 my $valid_uids = {};
478 foreach my $sid (keys %$ss) {
479 my $sd = $ss->{$sid};
480 next if !$sd->{uid};
481 $valid_uids->{$sd->{uid}} = 1;
482 }
483
484 my $results = {};
485 foreach my $id (keys %{$self->{results}}) {
486 next if !$valid_uids->{$id};
487 $results->{$id} = $self->{results}->{$id};
488 }
489 $self->{results} = $results;
490 }
491
492 # processes the exit code from a finished resource agent, so that the CRM knows
493 # if the LRM wants to retry an action based on the current recovery policies for
494 # the failed service, or the CRM itself must try to recover from the failure.
495 sub handle_service_exitcode {
496 my ($self, $sid, $cmd, $exit_code) = @_;
497
498 my $haenv = $self->{haenv};
499 my $tries = $self->{restart_tries};
500
501 my $sc = $haenv->read_service_config();
502 my $cd = $sc->{$sid};
503
504 if ($cmd eq 'started') {
505
506 if ($exit_code == SUCCESS) {
507
508 $tries->{$sid} = 0;
509
510 return $exit_code;
511
512 } elsif ($exit_code == ERROR) {
513
514 $tries->{$sid} = 0 if !defined($tries->{$sid});
515
516 $tries->{$sid}++;
517 if ($tries->{$sid} >= $cd->{max_restart}) {
518 $haenv->log('err', "unable to start service $sid on local node".
519 " after $tries->{$sid} retries");
520 $tries->{$sid} = 0;
521 return ERROR;
522 }
523
524 # tell CRM that we retry the start
525 return ETRY_AGAIN;
526 }
527 }
528
529 return $exit_code;
530
531 }
532
533 1;