]> git.proxmox.com Git - pve-ha-manager.git/blob - src/PVE/HA/LRM.pm
lrm: explicitly log shutdown_policy on node shutdown
[pve-ha-manager.git] / src / PVE / HA / LRM.pm
1 package PVE::HA::LRM;
2
3 # Local Resource Manager
4
5 use strict;
6 use warnings;
7 use POSIX qw(:sys_wait_h);
8
9 use PVE::SafeSyslog;
10 use PVE::Tools;
11 use PVE::HA::Tools ':exit_codes';
12 use PVE::HA::Resources;
13
14 # Server can have several states:
15
16 my $valid_states = {
17 wait_for_agent_lock => "waiting for agent lock",
18 active => "got agent_lock",
19 lost_agent_lock => "lost agent_lock",
20 };
21
22 sub new {
23 my ($this, $haenv) = @_;
24
25 my $class = ref($this) || $this;
26
27 my $self = bless {
28 haenv => $haenv,
29 status => { state => 'startup' },
30 workers => {},
31 results => {},
32 restart_tries => {},
33 shutdown_request => 0,
34 shutdown_errors => 0,
35 # mode can be: active, reboot, shutdown, restart
36 mode => 'active',
37 cluster_state_update => 0,
38 }, $class;
39
40 $self->set_local_status({ state => 'wait_for_agent_lock' });
41
42 return $self;
43 }
44
45 sub shutdown_request {
46 my ($self) = @_;
47
48 return if $self->{shutdown_request}; # already in shutdown mode
49
50 my $haenv = $self->{haenv};
51
52 my $nodename = $haenv->nodename();
53
54 my ($shutdown, $reboot) = $haenv->is_node_shutdown();
55
56 my $dc_ha_cfg = $haenv->get_ha_settings();
57 my $shutdown_policy = $dc_ha_cfg->{shutdown_policy} // 'conditional';
58
59 if ($shutdown) { # don't log this on service restart, only on node shutdown
60 $haenv->log('info', "got shutdown request with shutdown policy '$shutdown_policy'");
61 }
62
63 my $freeze_all = $reboot;
64 if ($shutdown_policy eq 'conditional') {
65 $freeze_all = $reboot;
66 } elsif ($shutdown_policy eq 'freeze') {
67 $freeze_all = 1;
68 } elsif ($shutdown_policy eq 'failover') {
69 $freeze_all = 0;
70 } else {
71 $haenv->log('err', "unkown shutdown policy '$shutdown_policy', fall back to conditional");
72 }
73
74 if ($shutdown) {
75 # *always* queue stop jobs for all services if the node shuts down,
76 # independent if it's a reboot or a poweroff, else we may corrupt
77 # services or hinder node shutdown
78 my $ss = $self->{service_status};
79
80 foreach my $sid (keys %$ss) {
81 my $sd = $ss->{$sid};
82 next if !$sd->{node};
83 next if $sd->{node} ne $nodename;
84 # Note: use undef uid to mark shutdown/stop jobs
85 $self->queue_resource_command($sid, undef, 'request_stop');
86 }
87 }
88
89 if ($shutdown) {
90 if ($freeze_all) {
91 if ($shutdown_policy eq 'conditional') {
92 $haenv->log('info', "reboot LRM, stop and freeze all services");
93 } else {
94 $haenv->log('info', "shutdown LRM, stop and freeze all services");
95 }
96 $self->{mode} = 'restart';
97 } else {
98 $haenv->log('info', "shutdown LRM, stop all services");
99 $self->{mode} = 'shutdown';
100 }
101 } else {
102 $haenv->log('info', "restart LRM, freeze all services");
103 $self->{mode} = 'restart';
104 }
105
106 $self->{shutdown_request} = 1;
107
108 eval { $self->update_lrm_status(); };
109 if (my $err = $@) {
110 $self->log('err', "unable to update lrm status file - $err");
111 }
112 }
113
114 sub get_local_status {
115 my ($self) = @_;
116
117 return $self->{status};
118 }
119
120 sub set_local_status {
121 my ($self, $new) = @_;
122
123 die "invalid state '$new->{state}'" if !$valid_states->{$new->{state}};
124
125 my $haenv = $self->{haenv};
126
127 my $old = $self->{status};
128
129 # important: only update if if really changed
130 return if $old->{state} eq $new->{state};
131
132 $haenv->log('info', "status change $old->{state} => $new->{state}");
133
134 $new->{state_change_time} = $haenv->get_time();
135
136 $self->{status} = $new;
137 }
138
139 sub update_lrm_status {
140 my ($self) = @_;
141
142 my $haenv = $self->{haenv};
143
144 return 0 if !$haenv->quorate();
145
146 my $lrm_status = {
147 state => $self->{status}->{state},
148 mode => $self->{mode},
149 results => $self->{results},
150 timestamp => $haenv->get_time(),
151 };
152
153 eval { $haenv->write_lrm_status($lrm_status); };
154 if (my $err = $@) {
155 $haenv->log('err', "unable to write lrm status file - $err");
156 return 0;
157 }
158
159 return 1;
160 }
161
162 sub update_service_status {
163 my ($self) = @_;
164
165 my $haenv = $self->{haenv};
166
167 my $ms = eval { $haenv->read_manager_status(); };
168 if (my $err = $@) {
169 $haenv->log('err', "updating service status from manager failed: $err");
170 return undef;
171 } else {
172 $self->{service_status} = $ms->{service_status} || {};
173 return 1;
174 }
175 }
176
177 sub get_protected_ha_agent_lock {
178 my ($self) = @_;
179
180 my $haenv = $self->{haenv};
181
182 my $count = 0;
183 my $starttime = $haenv->get_time();
184
185 for (;;) {
186
187 if ($haenv->get_ha_agent_lock()) {
188 if ($self->{ha_agent_wd}) {
189 $haenv->watchdog_update($self->{ha_agent_wd});
190 } else {
191 my $wfh = $haenv->watchdog_open();
192 $self->{ha_agent_wd} = $wfh;
193 }
194 return 1;
195 }
196
197 last if ++$count > 5; # try max 5 time
198
199 my $delay = $haenv->get_time() - $starttime;
200 last if $delay > 5; # for max 5 seconds
201
202 $haenv->sleep(1);
203 }
204
205 return 0;
206 }
207
208 sub active_service_count {
209 my ($self) = @_;
210
211 my $haenv = $self->{haenv};
212
213 my $nodename = $haenv->nodename();
214
215 my $ss = $self->{service_status};
216
217 my $count = 0;
218
219 foreach my $sid (keys %$ss) {
220 my $sd = $ss->{$sid};
221 next if !$sd->{node};
222 next if $sd->{node} ne $nodename;
223 my $req_state = $sd->{state};
224 next if !defined($req_state);
225 next if $req_state eq 'stopped';
226 next if $req_state eq 'freeze';
227 # erroneous services are not managed by HA, don't count them as active
228 next if $req_state eq 'error';
229
230 $count++;
231 }
232
233 return $count;
234 }
235
236 my $wrote_lrm_status_at_startup = 0;
237
238 sub do_one_iteration {
239 my ($self) = @_;
240
241 my $haenv = $self->{haenv};
242
243 $haenv->loop_start_hook();
244
245 $self->{cluster_state_update} = $haenv->cluster_state_update();
246
247 my $res = $self->work();
248
249 $haenv->loop_end_hook();
250
251 return $res;
252 }
253
254 sub work {
255 my ($self) = @_;
256
257 my $haenv = $self->{haenv};
258
259 if (!$wrote_lrm_status_at_startup) {
260 if ($self->update_lrm_status()) {
261 $wrote_lrm_status_at_startup = 1;
262 } else {
263 # do nothing
264 $haenv->sleep(5);
265 return $self->{shutdown_request} ? 0 : 1;
266 }
267 }
268
269 my $status = $self->get_local_status();
270 my $state = $status->{state};
271
272 $self->update_service_status();
273
274 my $fence_request = PVE::HA::Tools::count_fenced_services($self->{service_status}, $haenv->nodename());
275
276 # do state changes first
277
278 my $ctime = $haenv->get_time();
279
280 if ($state eq 'wait_for_agent_lock') {
281
282 my $service_count = $self->active_service_count();
283
284 if (!$fence_request && $service_count && $haenv->quorate()) {
285 if ($self->get_protected_ha_agent_lock()) {
286 $self->set_local_status({ state => 'active' });
287 }
288 }
289
290 } elsif ($state eq 'lost_agent_lock') {
291
292 if (!$fence_request && $haenv->quorate()) {
293 if ($self->get_protected_ha_agent_lock()) {
294 $self->set_local_status({ state => 'active' });
295 }
296 }
297
298 } elsif ($state eq 'active') {
299
300 if ($fence_request) {
301 $haenv->log('err', "node need to be fenced - releasing agent_lock\n");
302 $self->set_local_status({ state => 'lost_agent_lock'});
303 } elsif (!$self->get_protected_ha_agent_lock()) {
304 $self->set_local_status({ state => 'lost_agent_lock'});
305 }
306 }
307
308 $status = $self->get_local_status();
309 $state = $status->{state};
310
311 # do work
312
313 if ($state eq 'wait_for_agent_lock') {
314
315 return 0 if $self->{shutdown_request};
316
317 $self->update_lrm_status();
318
319 $haenv->sleep(5);
320
321 } elsif ($state eq 'active') {
322
323 my $startime = $haenv->get_time();
324
325 my $max_time = 10;
326
327 my $shutdown = 0;
328
329 # do work (max_time seconds)
330 eval {
331 # fixme: set alert timer
332
333 # if we could not get the current service status there's no point
334 # in doing anything, try again next round.
335 return if !$self->update_service_status();
336
337 if ($self->{shutdown_request}) {
338
339 if ($self->{mode} eq 'restart') {
340
341 my $service_count = $self->active_service_count();
342
343 if ($service_count == 0) {
344
345 if ($self->run_workers() == 0) {
346 if ($self->{ha_agent_wd}) {
347 $haenv->watchdog_close($self->{ha_agent_wd});
348 delete $self->{ha_agent_wd};
349 }
350
351 $shutdown = 1;
352
353 # restart with no or freezed services, release the lock
354 $haenv->release_ha_agent_lock();
355 }
356 }
357 } else {
358
359 if ($self->run_workers() == 0) {
360 if ($self->{shutdown_errors} == 0) {
361 if ($self->{ha_agent_wd}) {
362 $haenv->watchdog_close($self->{ha_agent_wd});
363 delete $self->{ha_agent_wd};
364 }
365
366 # shutdown with all services stopped thus release the lock
367 $haenv->release_ha_agent_lock();
368 }
369
370 $shutdown = 1;
371 }
372 }
373 } else {
374 if (!$self->{cluster_state_update}) {
375 # update failed but we could still renew our lock (cfs restart?),
376 # safely skip manage and expect to update just fine next round
377 $haenv->log('notice', "temporary inconsistent cluster state " .
378 "(cfs restart?), skip round");
379 return;
380 }
381
382 $self->manage_resources();
383
384 }
385 };
386 if (my $err = $@) {
387 $haenv->log('err', "got unexpected error - $err");
388 }
389
390 $self->update_lrm_status();
391
392 return 0 if $shutdown;
393
394 $haenv->sleep_until($startime + $max_time);
395
396 } elsif ($state eq 'lost_agent_lock') {
397
398 # Note: watchdog is active an will triger soon!
399
400 # so we hope to get the lock back soon!
401
402 if ($self->{shutdown_request}) {
403
404 my $service_count = $self->active_service_count();
405
406 if ($service_count > 0) {
407 $haenv->log('err', "get shutdown request in state 'lost_agent_lock' - " .
408 "detected $service_count running services");
409
410 } else {
411
412 # all services are stopped, so we can close the watchdog
413
414 if ($self->{ha_agent_wd}) {
415 $haenv->watchdog_close($self->{ha_agent_wd});
416 delete $self->{ha_agent_wd};
417 }
418
419 return 0;
420 }
421 }
422
423 $haenv->sleep(5);
424
425 } else {
426
427 die "got unexpected status '$state'\n";
428
429 }
430
431 return 1;
432 }
433
434 sub run_workers {
435 my ($self) = @_;
436
437 my $haenv = $self->{haenv};
438
439 my $starttime = $haenv->get_time();
440
441 # number of workers to start, if 0 we exec the command directly witouth forking
442 my $max_workers = $haenv->get_max_workers();
443
444 my $sc = $haenv->read_service_config();
445
446 while (($haenv->get_time() - $starttime) < 5) {
447 my $count = $self->check_active_workers();
448
449 foreach my $sid (sort keys %{$self->{workers}}) {
450 last if $count >= $max_workers && $max_workers > 0;
451
452 my $w = $self->{workers}->{$sid};
453 if (!$w->{pid}) {
454 # only fork if we may else call exec_resource_agent
455 # directly (e.g. for regression tests)
456 if ($max_workers > 0) {
457 my $pid = fork();
458 if (!defined($pid)) {
459 $haenv->log('err', "fork worker failed");
460 $count = 0; last; # abort, try later
461 } elsif ($pid == 0) {
462 $haenv->after_fork(); # cleanup
463
464 # do work
465 my $res = -1;
466 eval {
467 $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{target});
468 };
469 if (my $err = $@) {
470 $haenv->log('err', $err);
471 POSIX::_exit(-1);
472 }
473 POSIX::_exit($res);
474 } else {
475 $count++;
476 $w->{pid} = $pid;
477 }
478 } else {
479 my $res = -1;
480 eval {
481 $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{target});
482 $res = $res << 8 if $res > 0;
483 };
484 if (my $err = $@) {
485 $haenv->log('err', $err);
486 }
487 if (defined($w->{uid})) {
488 $self->resource_command_finished($sid, $w->{uid}, $res);
489 } else {
490 $self->stop_command_finished($sid, $res);
491 }
492 }
493 }
494 }
495
496 last if !$count;
497
498 $haenv->sleep(1);
499 }
500
501 return scalar(keys %{$self->{workers}});
502 }
503
504 sub manage_resources {
505 my ($self) = @_;
506
507 my $haenv = $self->{haenv};
508
509 my $nodename = $haenv->nodename();
510
511 my $ss = $self->{service_status};
512
513 foreach my $sid (keys %{$self->{restart_tries}}) {
514 delete $self->{restart_tries}->{$sid} if !$ss->{$sid};
515 }
516
517 foreach my $sid (keys %$ss) {
518 my $sd = $ss->{$sid};
519 next if !$sd->{node};
520 next if !$sd->{uid};
521 next if $sd->{node} ne $nodename;
522 my $req_state = $sd->{state};
523 next if !defined($req_state);
524 next if $req_state eq 'freeze';
525 $self->queue_resource_command($sid, $sd->{uid}, $req_state, $sd->{target});
526 }
527
528 return $self->run_workers();
529 }
530
531 sub queue_resource_command {
532 my ($self, $sid, $uid, $state, $target) = @_;
533
534 # do not queue the excatly same command twice as this may lead to
535 # an inconsistent HA state when the first command fails but the CRM
536 # does not process its failure right away and the LRM starts a second
537 # try, without the CRM knowing of it (race condition)
538 # The 'stopped' command is an exception as we do not process its result
539 # in the CRM and we want to execute it always (even with no active CRM)
540 return if $state ne 'stopped' && $uid && defined($self->{results}->{$uid});
541
542 if (my $w = $self->{workers}->{$sid}) {
543 return if $w->{pid}; # already started
544 # else, delete and overwrite queue entry with new command
545 delete $self->{workers}->{$sid};
546 }
547
548 $self->{workers}->{$sid} = {
549 sid => $sid,
550 uid => $uid,
551 state => $state,
552 };
553
554 $self->{workers}->{$sid}->{target} = $target if $target;
555 }
556
557 sub check_active_workers {
558 my ($self) = @_;
559
560 # finish/count workers
561 my $count = 0;
562 foreach my $sid (keys %{$self->{workers}}) {
563 my $w = $self->{workers}->{$sid};
564 if (my $pid = $w->{pid}) {
565 # check status
566 my $waitpid = waitpid($pid, WNOHANG);
567 if (defined($waitpid) && ($waitpid == $pid)) {
568 if (defined($w->{uid})) {
569 $self->resource_command_finished($sid, $w->{uid}, $?);
570 } else {
571 $self->stop_command_finished($sid, $?);
572 }
573 } else {
574 $count++;
575 }
576 }
577 }
578
579 return $count;
580 }
581
582 sub stop_command_finished {
583 my ($self, $sid, $status) = @_;
584
585 my $haenv = $self->{haenv};
586
587 my $w = delete $self->{workers}->{$sid};
588 return if !$w; # should not happen
589
590 my $exit_code = -1;
591
592 if ($status == -1) {
593 $haenv->log('err', "resource agent $sid finished - failed to execute");
594 } elsif (my $sig = ($status & 127)) {
595 $haenv->log('err', "resource agent $sid finished - got signal $sig");
596 } else {
597 $exit_code = ($status >> 8);
598 }
599
600 if ($exit_code != 0) {
601 $self->{shutdown_errors}++;
602 }
603 }
604
605 sub resource_command_finished {
606 my ($self, $sid, $uid, $status) = @_;
607
608 my $haenv = $self->{haenv};
609
610 my $w = delete $self->{workers}->{$sid};
611 return if !$w; # should not happen
612
613 my $exit_code = -1;
614
615 if ($status == -1) {
616 $haenv->log('err', "resource agent $sid finished - failed to execute");
617 } elsif (my $sig = ($status & 127)) {
618 $haenv->log('err', "resource agent $sid finished - got signal $sig");
619 } else {
620 $exit_code = ($status >> 8);
621 }
622
623 $exit_code = $self->handle_service_exitcode($sid, $w->{state}, $exit_code);
624
625 return if $exit_code == ETRY_AGAIN; # tell nobody, simply retry
626
627 $self->{results}->{$uid} = {
628 sid => $w->{sid},
629 state => $w->{state},
630 exit_code => $exit_code,
631 };
632
633 my $ss = $self->{service_status};
634
635 # compute hash of valid/existing uids
636 my $valid_uids = {};
637 foreach my $sid (keys %$ss) {
638 my $sd = $ss->{$sid};
639 next if !$sd->{uid};
640 $valid_uids->{$sd->{uid}} = 1;
641 }
642
643 my $results = {};
644 foreach my $id (keys %{$self->{results}}) {
645 next if !$valid_uids->{$id};
646 $results->{$id} = $self->{results}->{$id};
647 }
648 $self->{results} = $results;
649 }
650
651 # processes the exit code from a finished resource agent, so that the CRM knows
652 # if the LRM wants to retry an action based on the current recovery policies for
653 # the failed service, or the CRM itself must try to recover from the failure.
654 sub handle_service_exitcode {
655 my ($self, $sid, $cmd, $exit_code) = @_;
656
657 my $haenv = $self->{haenv};
658 my $tries = $self->{restart_tries};
659
660 my $sc = $haenv->read_service_config();
661
662 my $max_restart = 0;
663
664 if (my $cd = $sc->{$sid}) {
665 $max_restart = $cd->{max_restart};
666 }
667
668 if ($cmd eq 'started') {
669
670 if ($exit_code == SUCCESS) {
671
672 $tries->{$sid} = 0;
673
674 return $exit_code;
675
676 } elsif ($exit_code == ERROR) {
677
678 $tries->{$sid} = 0 if !defined($tries->{$sid});
679
680 if ($tries->{$sid} >= $max_restart) {
681 $haenv->log('err', "unable to start service $sid on local node".
682 " after $tries->{$sid} retries");
683 $tries->{$sid} = 0;
684 return ERROR;
685 }
686
687 $tries->{$sid}++;
688
689 $haenv->log('warning', "restart policy: retry number $tries->{$sid}" .
690 " for service '$sid'");
691 # tell CRM that we retry the start
692 return ETRY_AGAIN;
693 }
694 }
695
696 return $exit_code;
697
698 }
699
700 sub exec_resource_agent {
701 my ($self, $sid, $service_config, $cmd, @params) = @_;
702
703 # setup execution environment
704
705 $ENV{'PATH'} = '/sbin:/bin:/usr/sbin:/usr/bin';
706
707 my $haenv = $self->{haenv};
708
709 my $nodename = $haenv->nodename();
710
711 my (undef, $service_type, $service_name) = $haenv->parse_sid($sid);
712
713 my $plugin = PVE::HA::Resources->lookup($service_type);
714 if (!$plugin) {
715 $haenv->log('err', "service type '$service_type' not implemented");
716 return EUNKNOWN_SERVICE_TYPE;
717 }
718
719 if (!$service_config) {
720 $haenv->log('err', "missing resource configuration for '$sid'");
721 return EUNKNOWN_SERVICE;
722 }
723
724 # process error state early
725 if ($cmd eq 'error') {
726
727 $haenv->log('err', "service $sid is in an error state and needs manual " .
728 "intervention. Look up 'ERROR RECOVERY' in the documentation.");
729
730 return SUCCESS; # error always succeeds
731 }
732
733 if ($service_config->{node} ne $nodename) {
734 $haenv->log('err', "service '$sid' not on this node");
735 return EWRONG_NODE;
736 }
737
738 my $id = $service_name;
739
740 my $running = $plugin->check_running($haenv, $id);
741
742 if ($cmd eq 'started') {
743
744 return SUCCESS if $running;
745
746 $haenv->log("info", "starting service $sid");
747
748 $plugin->start($haenv, $id);
749
750 $running = $plugin->check_running($haenv, $id);
751
752 if ($running) {
753 $haenv->log("info", "service status $sid started");
754 return SUCCESS;
755 } else {
756 $haenv->log("warning", "unable to start service $sid");
757 return ERROR;
758 }
759
760 } elsif ($cmd eq 'request_stop' || $cmd eq 'stopped') {
761
762 return SUCCESS if !$running;
763
764 $haenv->log("info", "stopping service $sid");
765
766 $plugin->shutdown($haenv, $id);
767
768 $running = $plugin->check_running($haenv, $id);
769
770 if (!$running) {
771 $haenv->log("info", "service status $sid stopped");
772 return SUCCESS;
773 } else {
774 $haenv->log("info", "unable to stop stop service $sid (still running)");
775 return ERROR;
776 }
777
778 } elsif ($cmd eq 'migrate' || $cmd eq 'relocate') {
779
780 my $target = $params[0];
781 if (!defined($target)) {
782 die "$cmd '$sid' failed - missing target\n" if !defined($target);
783 return EINVALID_PARAMETER;
784 }
785
786 if ($service_config->{node} eq $target) {
787 # already there
788 return SUCCESS;
789 }
790
791 my $online = ($cmd eq 'migrate') ? 1 : 0;
792
793 my $res = $plugin->migrate($haenv, $id, $target, $online);
794
795 # something went wrong if service is still on this node
796 if (!$res) {
797 $haenv->log("err", "service $sid not moved (migration error)");
798 return ERROR;
799 }
800
801 return SUCCESS;
802
803 }
804
805 $haenv->log("err", "implement me (cmd '$cmd')");
806 return EUNKNOWN_COMMAND;
807 }
808
809
810 1;