]> git.proxmox.com Git - pve-ha-manager.git/blob - src/PVE/HA/LRM.pm
Manager, LRM: sort service keys for deterministic tests
[pve-ha-manager.git] / src / PVE / HA / LRM.pm
1 package PVE::HA::LRM;
2
3 # Local Resource Manager
4
5 use strict;
6 use warnings;
7 use Data::Dumper;
8 use POSIX qw(:sys_wait_h);
9
10 use PVE::SafeSyslog;
11 use PVE::Tools;
12 use PVE::HA::Tools ':exit_codes';
13 use PVE::HA::Resources;
14
15 # Server can have several states:
16
17 my $valid_states = {
18 wait_for_agent_lock => "waiting for agent lock",
19 active => "got agent_lock",
20 lost_agent_lock => "lost agent_lock",
21 };
22
23 sub new {
24 my ($this, $haenv) = @_;
25
26 my $class = ref($this) || $this;
27
28 my $self = bless {
29 haenv => $haenv,
30 status => { state => 'startup' },
31 workers => {},
32 results => {},
33 restart_tries => {},
34 shutdown_request => 0,
35 shutdown_errors => 0,
36 # mode can be: active, reboot, shutdown, restart
37 mode => 'active',
38 }, $class;
39
40 $self->set_local_status({ state => 'wait_for_agent_lock' });
41
42 return $self;
43 }
44
45 sub shutdown_request {
46 my ($self) = @_;
47
48 return if $self->{shutdown_request}; # already in shutdown mode
49
50 my $haenv = $self->{haenv};
51
52 my $nodename = $haenv->nodename();
53
54 my $shutdown = $haenv->is_node_shutdown();
55
56 if ($shutdown) {
57 $haenv->log('info', "shutdown LRM, stop all services");
58 $self->{mode} = 'shutdown';
59
60 # queue stop jobs for all services
61
62 my $ss = $self->{service_status};
63
64 foreach my $sid (keys %$ss) {
65 my $sd = $ss->{$sid};
66 next if !$sd->{node};
67 next if $sd->{node} ne $nodename;
68 # Note: use undef uid to mark shutdown/stop jobs
69 $self->queue_resource_command($sid, undef, 'request_stop');
70 }
71
72 } else {
73 $haenv->log('info', "restart LRM, freeze all services");
74 $self->{mode} = 'restart';
75 }
76
77 $self->{shutdown_request} = 1;
78
79 eval { $self->update_lrm_status(); };
80 if (my $err = $@) {
81 $self->log('err', "unable to update lrm status file - $err");
82 }
83 }
84
85 sub get_local_status {
86 my ($self) = @_;
87
88 return $self->{status};
89 }
90
91 sub set_local_status {
92 my ($self, $new) = @_;
93
94 die "invalid state '$new->{state}'" if !$valid_states->{$new->{state}};
95
96 my $haenv = $self->{haenv};
97
98 my $old = $self->{status};
99
100 # important: only update if if really changed
101 return if $old->{state} eq $new->{state};
102
103 $haenv->log('info', "status change $old->{state} => $new->{state}");
104
105 $new->{state_change_time} = $haenv->get_time();
106
107 $self->{status} = $new;
108 }
109
110 sub update_lrm_status {
111 my ($self) = @_;
112
113 my $haenv = $self->{haenv};
114
115 return 0 if !$haenv->quorate();
116
117 my $lrm_status = {
118 state => $self->{status}->{state},
119 mode => $self->{mode},
120 results => $self->{results},
121 timestamp => $haenv->get_time(),
122 };
123
124 eval { $haenv->write_lrm_status($lrm_status); };
125 if (my $err = $@) {
126 $haenv->log('err', "unable to write lrm status file - $err");
127 return 0;
128 }
129
130 return 1;
131 }
132
133 sub get_protected_ha_agent_lock {
134 my ($self) = @_;
135
136 my $haenv = $self->{haenv};
137
138 my $count = 0;
139 my $starttime = $haenv->get_time();
140
141 for (;;) {
142
143 if ($haenv->get_ha_agent_lock()) {
144 if ($self->{ha_agent_wd}) {
145 $haenv->watchdog_update($self->{ha_agent_wd});
146 } else {
147 my $wfh = $haenv->watchdog_open();
148 $self->{ha_agent_wd} = $wfh;
149 }
150 return 1;
151 }
152
153 last if ++$count > 5; # try max 5 time
154
155 my $delay = $haenv->get_time() - $starttime;
156 last if $delay > 5; # for max 5 seconds
157
158 $haenv->sleep(1);
159 }
160
161 return 0;
162 }
163
164 sub active_service_count {
165 my ($self) = @_;
166
167 my $haenv = $self->{haenv};
168
169 my $nodename = $haenv->nodename();
170
171 my $ss = $self->{service_status};
172
173 my $count = 0;
174
175 foreach my $sid (keys %$ss) {
176 my $sd = $ss->{$sid};
177 next if !$sd->{node};
178 next if $sd->{node} ne $nodename;
179 my $req_state = $sd->{state};
180 next if !defined($req_state);
181 next if $req_state eq 'stopped';
182 next if $req_state eq 'freeze';
183 # erroneous services are not managed by HA, don't count them as active
184 next if $req_state eq 'error';
185
186 $count++;
187 }
188
189 return $count;
190 }
191
192 my $wrote_lrm_status_at_startup = 0;
193
194 sub do_one_iteration {
195 my ($self) = @_;
196
197 my $haenv = $self->{haenv};
198
199 if (!$wrote_lrm_status_at_startup) {
200 if ($self->update_lrm_status()) {
201 $wrote_lrm_status_at_startup = 1;
202 } else {
203 # do nothing
204 $haenv->sleep(5);
205 return $self->{shutdown_request} ? 0 : 1;
206 }
207 }
208
209 my $status = $self->get_local_status();
210 my $state = $status->{state};
211
212 my $ms = $haenv->read_manager_status();
213 $self->{service_status} = $ms->{service_status} || {};
214
215 my $fence_request = PVE::HA::Tools::count_fenced_services($self->{service_status}, $haenv->nodename());
216
217 # do state changes first
218
219 my $ctime = $haenv->get_time();
220
221 if ($state eq 'wait_for_agent_lock') {
222
223 my $service_count = $self->active_service_count();
224
225 if (!$fence_request && $service_count && $haenv->quorate()) {
226 if ($self->get_protected_ha_agent_lock()) {
227 $self->set_local_status({ state => 'active' });
228 }
229 }
230
231 } elsif ($state eq 'lost_agent_lock') {
232
233 if (!$fence_request && $haenv->quorate()) {
234 if ($self->get_protected_ha_agent_lock()) {
235 $self->set_local_status({ state => 'active' });
236 }
237 }
238
239 } elsif ($state eq 'active') {
240
241 if ($fence_request) {
242 $haenv->log('err', "node need to be fenced - releasing agent_lock\n");
243 $self->set_local_status({ state => 'lost_agent_lock'});
244 } elsif (!$self->get_protected_ha_agent_lock()) {
245 $self->set_local_status({ state => 'lost_agent_lock'});
246 }
247 }
248
249 $status = $self->get_local_status();
250 $state = $status->{state};
251
252 # do work
253
254 if ($state eq 'wait_for_agent_lock') {
255
256 return 0 if $self->{shutdown_request};
257
258 $self->update_lrm_status();
259
260 $haenv->sleep(5);
261
262 } elsif ($state eq 'active') {
263
264 my $startime = $haenv->get_time();
265
266 my $max_time = 10;
267
268 my $shutdown = 0;
269
270 # do work (max_time seconds)
271 eval {
272 # fixme: set alert timer
273
274 if ($self->{shutdown_request}) {
275
276 if ($self->{mode} eq 'restart') {
277
278 my $service_count = $self->active_service_count();
279
280 if ($service_count == 0) {
281
282 if ($self->run_workers() == 0) {
283 if ($self->{ha_agent_wd}) {
284 $haenv->watchdog_close($self->{ha_agent_wd});
285 delete $self->{ha_agent_wd};
286 }
287
288 $shutdown = 1;
289
290 # restart with no or freezed services, release the lock
291 $haenv->release_ha_agent_lock();
292 }
293 }
294 } else {
295
296 if ($self->run_workers() == 0) {
297 if ($self->{shutdown_errors} == 0) {
298 if ($self->{ha_agent_wd}) {
299 $haenv->watchdog_close($self->{ha_agent_wd});
300 delete $self->{ha_agent_wd};
301 }
302
303 # shutdown with all services stopped thus release the lock
304 $haenv->release_ha_agent_lock();
305 }
306
307 $shutdown = 1;
308 }
309 }
310 } else {
311
312 $self->manage_resources();
313
314 }
315 };
316 if (my $err = $@) {
317 $haenv->log('err', "got unexpected error - $err");
318 }
319
320 $self->update_lrm_status();
321
322 return 0 if $shutdown;
323
324 $haenv->sleep_until($startime + $max_time);
325
326 } elsif ($state eq 'lost_agent_lock') {
327
328 # Note: watchdog is active an will triger soon!
329
330 # so we hope to get the lock back soon!
331
332 if ($self->{shutdown_request}) {
333
334 my $service_count = $self->active_service_count();
335
336 if ($service_count > 0) {
337 $haenv->log('err', "get shutdown request in state 'lost_agent_lock' - " .
338 "detected $service_count running services");
339
340 } else {
341
342 # all services are stopped, so we can close the watchdog
343
344 if ($self->{ha_agent_wd}) {
345 $haenv->watchdog_close($self->{ha_agent_wd});
346 delete $self->{ha_agent_wd};
347 }
348
349 return 0;
350 }
351 }
352
353 $haenv->sleep(5);
354
355 } else {
356
357 die "got unexpected status '$state'\n";
358
359 }
360
361 return 1;
362 }
363
364 sub run_workers {
365 my ($self) = @_;
366
367 my $haenv = $self->{haenv};
368
369 my $starttime = $haenv->get_time();
370
371 # number of workers to start, if 0 we exec the command directly witouth forking
372 my $max_workers = $haenv->get_max_workers();
373
374 my $sc = $haenv->read_service_config();
375
376 while (($haenv->get_time() - $starttime) < 5) {
377 my $count = $self->check_active_workers();
378
379 foreach my $sid (sort keys %{$self->{workers}}) {
380 last if $count >= $max_workers && $max_workers > 0;
381
382 my $w = $self->{workers}->{$sid};
383 if (!$w->{pid}) {
384 # only fork if we may else call exec_resource_agent
385 # directly (e.g. for regression tests)
386 if ($max_workers > 0) {
387 my $pid = fork();
388 if (!defined($pid)) {
389 $haenv->log('err', "fork worker failed");
390 $count = 0; last; # abort, try later
391 } elsif ($pid == 0) {
392 $haenv->after_fork(); # cleanup
393
394 # do work
395 my $res = -1;
396 eval {
397 $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{target});
398 };
399 if (my $err = $@) {
400 $haenv->log('err', $err);
401 POSIX::_exit(-1);
402 }
403 POSIX::_exit($res);
404 } else {
405 $count++;
406 $w->{pid} = $pid;
407 }
408 } else {
409 my $res = -1;
410 eval {
411 $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{target});
412 $res = $res << 8 if $res > 0;
413 };
414 if (my $err = $@) {
415 $haenv->log('err', $err);
416 }
417 if (defined($w->{uid})) {
418 $self->resource_command_finished($sid, $w->{uid}, $res);
419 } else {
420 $self->stop_command_finished($sid, $res);
421 }
422 }
423 }
424 }
425
426 last if !$count;
427
428 $haenv->sleep(1);
429 }
430
431 return scalar(keys %{$self->{workers}});
432 }
433
434 sub manage_resources {
435 my ($self) = @_;
436
437 my $haenv = $self->{haenv};
438
439 my $nodename = $haenv->nodename();
440
441 my $ss = $self->{service_status};
442
443 foreach my $sid (keys %{$self->{restart_tries}}) {
444 delete $self->{restart_tries}->{$sid} if !$ss->{$sid};
445 }
446
447 foreach my $sid (keys %$ss) {
448 my $sd = $ss->{$sid};
449 next if !$sd->{node};
450 next if !$sd->{uid};
451 next if $sd->{node} ne $nodename;
452 my $req_state = $sd->{state};
453 next if !defined($req_state);
454 next if $req_state eq 'freeze';
455 $self->queue_resource_command($sid, $sd->{uid}, $req_state, $sd->{target});
456 }
457
458 return $self->run_workers();
459 }
460
461 sub queue_resource_command {
462 my ($self, $sid, $uid, $state, $target) = @_;
463
464 # do not queue the excatly same command twice as this may lead to
465 # an inconsistent HA state when the first command fails but the CRM
466 # does not process its failure right away and the LRM starts a second
467 # try, without the CRM knowing of it (race condition)
468 # The 'stopped' command is an exception as we do not process its result
469 # in the CRM and we want to execute it always (even with no active CRM)
470 return if $state ne 'stopped' && $uid && defined($self->{results}->{$uid});
471
472 if (my $w = $self->{workers}->{$sid}) {
473 return if $w->{pid}; # already started
474 # else, delete and overwrite queue entry with new command
475 delete $self->{workers}->{$sid};
476 }
477
478 $self->{workers}->{$sid} = {
479 sid => $sid,
480 uid => $uid,
481 state => $state,
482 };
483
484 $self->{workers}->{$sid}->{target} = $target if $target;
485 }
486
487 sub check_active_workers {
488 my ($self) = @_;
489
490 # finish/count workers
491 my $count = 0;
492 foreach my $sid (keys %{$self->{workers}}) {
493 my $w = $self->{workers}->{$sid};
494 if (my $pid = $w->{pid}) {
495 # check status
496 my $waitpid = waitpid($pid, WNOHANG);
497 if (defined($waitpid) && ($waitpid == $pid)) {
498 if (defined($w->{uid})) {
499 $self->resource_command_finished($sid, $w->{uid}, $?);
500 } else {
501 $self->stop_command_finished($sid, $?);
502 }
503 } else {
504 $count++;
505 }
506 }
507 }
508
509 return $count;
510 }
511
512 sub stop_command_finished {
513 my ($self, $sid, $status) = @_;
514
515 my $haenv = $self->{haenv};
516
517 my $w = delete $self->{workers}->{$sid};
518 return if !$w; # should not happen
519
520 my $exit_code = -1;
521
522 if ($status == -1) {
523 $haenv->log('err', "resource agent $sid finished - failed to execute");
524 } elsif (my $sig = ($status & 127)) {
525 $haenv->log('err', "resource agent $sid finished - got signal $sig");
526 } else {
527 $exit_code = ($status >> 8);
528 }
529
530 if ($exit_code != 0) {
531 $self->{shutdown_errors}++;
532 }
533 }
534
535 sub resource_command_finished {
536 my ($self, $sid, $uid, $status) = @_;
537
538 my $haenv = $self->{haenv};
539
540 my $w = delete $self->{workers}->{$sid};
541 return if !$w; # should not happen
542
543 my $exit_code = -1;
544
545 if ($status == -1) {
546 $haenv->log('err', "resource agent $sid finished - failed to execute");
547 } elsif (my $sig = ($status & 127)) {
548 $haenv->log('err', "resource agent $sid finished - got signal $sig");
549 } else {
550 $exit_code = ($status >> 8);
551 }
552
553 $exit_code = $self->handle_service_exitcode($sid, $w->{state}, $exit_code);
554
555 return if $exit_code == ETRY_AGAIN; # tell nobody, simply retry
556
557 $self->{results}->{$uid} = {
558 sid => $w->{sid},
559 state => $w->{state},
560 exit_code => $exit_code,
561 };
562
563 my $ss = $self->{service_status};
564
565 # compute hash of valid/existing uids
566 my $valid_uids = {};
567 foreach my $sid (keys %$ss) {
568 my $sd = $ss->{$sid};
569 next if !$sd->{uid};
570 $valid_uids->{$sd->{uid}} = 1;
571 }
572
573 my $results = {};
574 foreach my $id (keys %{$self->{results}}) {
575 next if !$valid_uids->{$id};
576 $results->{$id} = $self->{results}->{$id};
577 }
578 $self->{results} = $results;
579 }
580
581 # processes the exit code from a finished resource agent, so that the CRM knows
582 # if the LRM wants to retry an action based on the current recovery policies for
583 # the failed service, or the CRM itself must try to recover from the failure.
584 sub handle_service_exitcode {
585 my ($self, $sid, $cmd, $exit_code) = @_;
586
587 my $haenv = $self->{haenv};
588 my $tries = $self->{restart_tries};
589
590 my $sc = $haenv->read_service_config();
591
592 my $max_restart = 0;
593
594 if (my $cd = $sc->{$sid}) {
595 $max_restart = $cd->{max_restart};
596 }
597
598 if ($cmd eq 'started') {
599
600 if ($exit_code == SUCCESS) {
601
602 $tries->{$sid} = 0;
603
604 return $exit_code;
605
606 } elsif ($exit_code == ERROR) {
607
608 $tries->{$sid} = 0 if !defined($tries->{$sid});
609
610 if ($tries->{$sid} >= $max_restart) {
611 $haenv->log('err', "unable to start service $sid on local node".
612 " after $tries->{$sid} retries");
613 $tries->{$sid} = 0;
614 return ERROR;
615 }
616
617 $tries->{$sid}++;
618
619 $haenv->log('warning', "restart policy: retry number $tries->{$sid}" .
620 " for service '$sid'");
621 # tell CRM that we retry the start
622 return ETRY_AGAIN;
623 }
624 }
625
626 return $exit_code;
627
628 }
629
630 sub exec_resource_agent {
631 my ($self, $sid, $service_config, $cmd, @params) = @_;
632
633 # setup execution environment
634
635 $ENV{'PATH'} = '/sbin:/bin:/usr/sbin:/usr/bin';
636
637 my $haenv = $self->{haenv};
638
639 my $nodename = $haenv->nodename();
640
641 my (undef, $service_type, $service_name) = PVE::HA::Tools::parse_sid($sid);
642
643 my $plugin = PVE::HA::Resources->lookup($service_type);
644 if (!$plugin) {
645 $haenv->log('err', "service type '$service_type' not implemented");
646 return EUNKNOWN_SERVICE_TYPE;
647 }
648
649 if (!$service_config) {
650 $haenv->log('err', "missing resource configuration for '$sid'");
651 return EUNKNOWN_SERVICE;
652 }
653
654 # process error state early
655 if ($cmd eq 'error') {
656
657 $haenv->log('err', "service $sid is in an error state and needs manual " .
658 "intervention. Look up 'ERROR RECOVERY' in the documentation.");
659
660 return SUCCESS; # error always succeeds
661 }
662
663 if ($service_config->{node} ne $nodename) {
664 $haenv->log('err', "service '$sid' not on this node");
665 return EWRONG_NODE;
666 }
667
668 my $id = $service_name;
669
670 my $running = $plugin->check_running($haenv, $id);
671
672 if ($cmd eq 'started') {
673
674 return SUCCESS if $running;
675
676 $haenv->log("info", "starting service $sid");
677
678 $plugin->start($haenv, $id);
679
680 $running = $plugin->check_running($haenv, $id);
681
682 if ($running) {
683 $haenv->log("info", "service status $sid started");
684 return SUCCESS;
685 } else {
686 $haenv->log("warning", "unable to start service $sid");
687 return ERROR;
688 }
689
690 } elsif ($cmd eq 'request_stop' || $cmd eq 'stopped') {
691
692 return SUCCESS if !$running;
693
694 $haenv->log("info", "stopping service $sid");
695
696 $plugin->shutdown($haenv, $id);
697
698 $running = $plugin->check_running($haenv, $id);
699
700 if (!$running) {
701 $haenv->log("info", "service status $sid stopped");
702 return SUCCESS;
703 } else {
704 $haenv->log("info", "unable to stop stop service $sid (still running)");
705 return ERROR;
706 }
707
708 } elsif ($cmd eq 'migrate' || $cmd eq 'relocate') {
709
710 my $target = $params[0];
711 if (!defined($target)) {
712 die "$cmd '$sid' failed - missing target\n" if !defined($target);
713 return EINVALID_PARAMETER;
714 }
715
716 if ($service_config->{node} eq $target) {
717 # already there
718 return SUCCESS;
719 }
720
721 my $online = ($cmd eq 'migrate') ? 1 : 0;
722
723 my $res = $plugin->migrate($haenv, $id, $target, $online);
724
725 # something went wrong if service is still on this node
726 if (!$res) {
727 $haenv->log("err", "service $sid not moved (migration error)");
728 return ERROR;
729 }
730
731 return SUCCESS;
732
733 }
734
735 $haenv->log("err", "implement me (cmd '$cmd')");
736 return EUNKNOWN_COMMAND;
737 }
738
739
740 1;