]> git.proxmox.com Git - pve-ha-manager.git/blob - src/PVE/HA/LRM.pm
LRM shutdown: factor out shutdown type to reuse message
[pve-ha-manager.git] / src / PVE / HA / LRM.pm
1 package PVE::HA::LRM;
2
3 # Local Resource Manager
4
5 use strict;
6 use warnings;
7 use POSIX qw(:sys_wait_h);
8
9 use PVE::SafeSyslog;
10 use PVE::Tools;
11 use PVE::HA::Tools ':exit_codes';
12 use PVE::HA::Resources;
13
14 # Server can have several states:
15
16 my $valid_states = {
17 wait_for_agent_lock => "waiting for agent lock",
18 active => "got agent_lock",
19 lost_agent_lock => "lost agent_lock",
20 };
21
22 sub new {
23 my ($this, $haenv) = @_;
24
25 my $class = ref($this) || $this;
26
27 my $self = bless {
28 haenv => $haenv,
29 status => { state => 'startup' },
30 workers => {},
31 results => {},
32 restart_tries => {},
33 shutdown_request => 0,
34 shutdown_errors => 0,
35 # mode can be: active, reboot, shutdown, restart
36 mode => 'active',
37 cluster_state_update => 0,
38 }, $class;
39
40 $self->set_local_status({ state => 'wait_for_agent_lock' });
41
42 return $self;
43 }
44
45 sub shutdown_request {
46 my ($self) = @_;
47
48 return if $self->{shutdown_request}; # already in shutdown mode
49
50 my $haenv = $self->{haenv};
51
52 my $nodename = $haenv->nodename();
53
54 my ($shutdown, $reboot) = $haenv->is_node_shutdown();
55
56 my $dc_ha_cfg = $haenv->get_ha_settings();
57 my $shutdown_policy = $dc_ha_cfg->{shutdown_policy} // 'conditional';
58
59 if ($shutdown) { # don't log this on service restart, only on node shutdown
60 $haenv->log('info', "got shutdown request with shutdown policy '$shutdown_policy'");
61 }
62
63 my $freeze_all;
64 if ($shutdown_policy eq 'conditional') {
65 $freeze_all = $reboot;
66 } elsif ($shutdown_policy eq 'freeze') {
67 $freeze_all = 1;
68 } elsif ($shutdown_policy eq 'failover') {
69 $freeze_all = 0;
70 } else {
71 $haenv->log('err', "unknown shutdown policy '$shutdown_policy', fall back to conditional");
72 $freeze_all = $reboot;
73 }
74
75 if ($shutdown) {
76 # *always* queue stop jobs for all services if the node shuts down,
77 # independent if it's a reboot or a poweroff, else we may corrupt
78 # services or hinder node shutdown
79 my $ss = $self->{service_status};
80
81 foreach my $sid (keys %$ss) {
82 my $sd = $ss->{$sid};
83 next if !$sd->{node};
84 next if $sd->{node} ne $nodename;
85 # Note: use undef uid to mark shutdown/stop jobs
86 $self->queue_resource_command($sid, undef, 'request_stop');
87 }
88 }
89
90 if ($shutdown) {
91 my $shutdown_type = $reboot ? 'reboot' : 'shutdown';
92 if ($freeze_all) {
93 $haenv->log('info', "$shutdown_type LRM, stop and freeze all services");
94 $self->{mode} = 'restart';
95 } else {
96 $haenv->log('info', "shutdown LRM, stop all services");
97 $self->{mode} = 'shutdown';
98 }
99 } else {
100 $haenv->log('info', "restart LRM, freeze all services");
101 $self->{mode} = 'restart';
102 }
103
104 $self->{shutdown_request} = 1;
105
106 eval { $self->update_lrm_status() or die "not quorate?\n"; };
107 if (my $err = $@) {
108 $self->log('err', "unable to update lrm status file - $err");
109 }
110 }
111
112 sub get_local_status {
113 my ($self) = @_;
114
115 return $self->{status};
116 }
117
118 sub set_local_status {
119 my ($self, $new) = @_;
120
121 die "invalid state '$new->{state}'" if !$valid_states->{$new->{state}};
122
123 my $haenv = $self->{haenv};
124
125 my $old = $self->{status};
126
127 # important: only update if if really changed
128 return if $old->{state} eq $new->{state};
129
130 $haenv->log('info', "status change $old->{state} => $new->{state}");
131
132 $new->{state_change_time} = $haenv->get_time();
133
134 $self->{status} = $new;
135 }
136
137 sub update_lrm_status {
138 my ($self) = @_;
139
140 my $haenv = $self->{haenv};
141
142 return 0 if !$haenv->quorate();
143
144 my $lrm_status = {
145 state => $self->{status}->{state},
146 mode => $self->{mode},
147 results => $self->{results},
148 timestamp => $haenv->get_time(),
149 };
150
151 eval { $haenv->write_lrm_status($lrm_status); };
152 if (my $err = $@) {
153 $haenv->log('err', "unable to write lrm status file - $err");
154 return 0;
155 }
156
157 return 1;
158 }
159
160 sub update_service_status {
161 my ($self) = @_;
162
163 my $haenv = $self->{haenv};
164
165 my $ms = eval { $haenv->read_manager_status(); };
166 if (my $err = $@) {
167 $haenv->log('err', "updating service status from manager failed: $err");
168 return undef;
169 } else {
170 $self->{service_status} = $ms->{service_status} || {};
171 return 1;
172 }
173 }
174
175 sub get_protected_ha_agent_lock {
176 my ($self) = @_;
177
178 my $haenv = $self->{haenv};
179
180 my $count = 0;
181 my $starttime = $haenv->get_time();
182
183 for (;;) {
184
185 if ($haenv->get_ha_agent_lock()) {
186 if ($self->{ha_agent_wd}) {
187 $haenv->watchdog_update($self->{ha_agent_wd});
188 } else {
189 my $wfh = $haenv->watchdog_open();
190 $self->{ha_agent_wd} = $wfh;
191 }
192 return 1;
193 }
194
195 last if ++$count > 5; # try max 5 time
196
197 my $delay = $haenv->get_time() - $starttime;
198 last if $delay > 5; # for max 5 seconds
199
200 $haenv->sleep(1);
201 }
202
203 return 0;
204 }
205
206 sub active_service_count {
207 my ($self) = @_;
208
209 my $haenv = $self->{haenv};
210
211 my $nodename = $haenv->nodename();
212
213 my $ss = $self->{service_status};
214
215 my $count = 0;
216
217 foreach my $sid (keys %$ss) {
218 my $sd = $ss->{$sid};
219 next if !$sd->{node};
220 next if $sd->{node} ne $nodename;
221 my $req_state = $sd->{state};
222 next if !defined($req_state);
223 next if $req_state eq 'stopped';
224 next if $req_state eq 'freeze';
225 # erroneous services are not managed by HA, don't count them as active
226 next if $req_state eq 'error';
227
228 $count++;
229 }
230
231 return $count;
232 }
233
234 my $wrote_lrm_status_at_startup = 0;
235
236 sub do_one_iteration {
237 my ($self) = @_;
238
239 my $haenv = $self->{haenv};
240
241 $haenv->loop_start_hook();
242
243 $self->{cluster_state_update} = $haenv->cluster_state_update();
244
245 my $res = $self->work();
246
247 $haenv->loop_end_hook();
248
249 return $res;
250 }
251
252 sub work {
253 my ($self) = @_;
254
255 my $haenv = $self->{haenv};
256
257 if (!$wrote_lrm_status_at_startup) {
258 if ($self->update_lrm_status()) {
259 $wrote_lrm_status_at_startup = 1;
260 } else {
261 # do nothing
262 $haenv->sleep(5);
263 return $self->{shutdown_request} ? 0 : 1;
264 }
265 }
266
267 my $status = $self->get_local_status();
268 my $state = $status->{state};
269
270 $self->update_service_status();
271
272 my $fence_request = PVE::HA::Tools::count_fenced_services($self->{service_status}, $haenv->nodename());
273
274 # do state changes first
275
276 my $ctime = $haenv->get_time();
277
278 if ($state eq 'wait_for_agent_lock') {
279
280 my $service_count = $self->active_service_count();
281
282 if (!$fence_request && $service_count && $haenv->quorate()) {
283 if ($self->get_protected_ha_agent_lock()) {
284 $self->set_local_status({ state => 'active' });
285 }
286 }
287
288 } elsif ($state eq 'lost_agent_lock') {
289
290 if (!$fence_request && $haenv->quorate()) {
291 if ($self->get_protected_ha_agent_lock()) {
292 $self->set_local_status({ state => 'active' });
293 }
294 }
295
296 } elsif ($state eq 'active') {
297
298 if ($fence_request) {
299 $haenv->log('err', "node need to be fenced - releasing agent_lock\n");
300 $self->set_local_status({ state => 'lost_agent_lock'});
301 } elsif (!$self->get_protected_ha_agent_lock()) {
302 $self->set_local_status({ state => 'lost_agent_lock'});
303 }
304 }
305
306 $status = $self->get_local_status();
307 $state = $status->{state};
308
309 # do work
310
311 if ($state eq 'wait_for_agent_lock') {
312
313 return 0 if $self->{shutdown_request};
314
315 $self->update_lrm_status();
316
317 $haenv->sleep(5);
318
319 } elsif ($state eq 'active') {
320
321 my $startime = $haenv->get_time();
322
323 my $max_time = 10;
324
325 my $shutdown = 0;
326
327 # do work (max_time seconds)
328 eval {
329 # fixme: set alert timer
330
331 # if we could not get the current service status there's no point
332 # in doing anything, try again next round.
333 return if !$self->update_service_status();
334
335 if ($self->{shutdown_request}) {
336
337 if ($self->{mode} eq 'restart') {
338
339 my $service_count = $self->active_service_count();
340
341 if ($service_count == 0) {
342
343 if ($self->run_workers() == 0) {
344 if ($self->{ha_agent_wd}) {
345 $haenv->watchdog_close($self->{ha_agent_wd});
346 delete $self->{ha_agent_wd};
347 }
348
349 $shutdown = 1;
350
351 # restart with no or freezed services, release the lock
352 $haenv->release_ha_agent_lock();
353 }
354 }
355 } else {
356
357 if ($self->run_workers() == 0) {
358 if ($self->{shutdown_errors} == 0) {
359 if ($self->{ha_agent_wd}) {
360 $haenv->watchdog_close($self->{ha_agent_wd});
361 delete $self->{ha_agent_wd};
362 }
363
364 # shutdown with all services stopped thus release the lock
365 $haenv->release_ha_agent_lock();
366 }
367
368 $shutdown = 1;
369 }
370 }
371 } else {
372 if (!$self->{cluster_state_update}) {
373 # update failed but we could still renew our lock (cfs restart?),
374 # safely skip manage and expect to update just fine next round
375 $haenv->log('notice', "temporary inconsistent cluster state " .
376 "(cfs restart?), skip round");
377 return;
378 }
379
380 $self->manage_resources();
381
382 }
383 };
384 if (my $err = $@) {
385 $haenv->log('err', "got unexpected error - $err");
386 }
387
388 $self->update_lrm_status();
389
390 return 0 if $shutdown;
391
392 $haenv->sleep_until($startime + $max_time);
393
394 } elsif ($state eq 'lost_agent_lock') {
395
396 # Note: watchdog is active an will triger soon!
397
398 # so we hope to get the lock back soon!
399
400 if ($self->{shutdown_request}) {
401
402 my $service_count = $self->active_service_count();
403
404 if ($service_count > 0) {
405 $haenv->log('err', "get shutdown request in state 'lost_agent_lock' - " .
406 "detected $service_count running services");
407
408 if ($self->{mode} eq 'restart') {
409 my $state_mt = $self->{status}->{state_change_time};
410
411 # watchdog should have already triggered, so either it's set
412 # set to noboot or it failed. As we are in restart mode, and
413 # have infinity stoptimeout -> exit now - we don't touch services
414 # or change state, so this is save, relatively speaking
415 if (($haenv->get_time() - $state_mt) > 90) {
416 $haenv->log('err', "lost agent lock and restart request for over 90 seconds - giving up!");
417 return 0;
418 }
419 }
420 } else {
421
422 # all services are stopped, so we can close the watchdog
423
424 if ($self->{ha_agent_wd}) {
425 $haenv->watchdog_close($self->{ha_agent_wd});
426 delete $self->{ha_agent_wd};
427 }
428
429 return 0;
430 }
431 }
432
433 $haenv->sleep(5);
434
435 } else {
436
437 die "got unexpected status '$state'\n";
438
439 }
440
441 return 1;
442 }
443
444 sub run_workers {
445 my ($self) = @_;
446
447 my $haenv = $self->{haenv};
448
449 my $starttime = $haenv->get_time();
450
451 # number of workers to start, if 0 we exec the command directly witouth forking
452 my $max_workers = $haenv->get_max_workers();
453
454 my $sc = $haenv->read_service_config();
455
456 while (($haenv->get_time() - $starttime) < 5) {
457 my $count = $self->check_active_workers();
458
459 foreach my $sid (sort keys %{$self->{workers}}) {
460 last if $count >= $max_workers && $max_workers > 0;
461
462 my $w = $self->{workers}->{$sid};
463 if (!$w->{pid}) {
464 # only fork if we may else call exec_resource_agent
465 # directly (e.g. for regression tests)
466 if ($max_workers > 0) {
467 my $pid = fork();
468 if (!defined($pid)) {
469 $haenv->log('err', "fork worker failed");
470 $count = 0; last; # abort, try later
471 } elsif ($pid == 0) {
472 $haenv->after_fork(); # cleanup
473
474 # do work
475 my $res = -1;
476 eval {
477 $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{params});
478 };
479 if (my $err = $@) {
480 $haenv->log('err', $err);
481 POSIX::_exit(-1);
482 }
483 POSIX::_exit($res);
484 } else {
485 $count++;
486 $w->{pid} = $pid;
487 }
488 } else {
489 my $res = -1;
490 eval {
491 $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{params});
492 $res = $res << 8 if $res > 0;
493 };
494 if (my $err = $@) {
495 $haenv->log('err', $err);
496 }
497 if (defined($w->{uid})) {
498 $self->resource_command_finished($sid, $w->{uid}, $res);
499 } else {
500 $self->stop_command_finished($sid, $res);
501 }
502 }
503 }
504 }
505
506 last if !$count;
507
508 $haenv->sleep(1);
509 }
510
511 return scalar(keys %{$self->{workers}});
512 }
513
514 sub manage_resources {
515 my ($self) = @_;
516
517 my $haenv = $self->{haenv};
518
519 my $nodename = $haenv->nodename();
520
521 my $ss = $self->{service_status};
522
523 foreach my $sid (keys %{$self->{restart_tries}}) {
524 delete $self->{restart_tries}->{$sid} if !$ss->{$sid};
525 }
526
527 foreach my $sid (keys %$ss) {
528 my $sd = $ss->{$sid};
529 next if !$sd->{node};
530 next if !$sd->{uid};
531 next if $sd->{node} ne $nodename;
532 my $req_state = $sd->{state};
533 next if !defined($req_state);
534 next if $req_state eq 'freeze';
535 $self->queue_resource_command($sid, $sd->{uid}, $req_state, {'target' => $sd->{target}});
536 }
537
538 return $self->run_workers();
539 }
540
541 sub queue_resource_command {
542 my ($self, $sid, $uid, $state, $params) = @_;
543
544 # do not queue the excatly same command twice as this may lead to
545 # an inconsistent HA state when the first command fails but the CRM
546 # does not process its failure right away and the LRM starts a second
547 # try, without the CRM knowing of it (race condition)
548 # The 'stopped' command is an exception as we do not process its result
549 # in the CRM and we want to execute it always (even with no active CRM)
550 return if $state ne 'stopped' && $uid && defined($self->{results}->{$uid});
551
552 if (my $w = $self->{workers}->{$sid}) {
553 return if $w->{pid}; # already started
554 # else, delete and overwrite queue entry with new command
555 delete $self->{workers}->{$sid};
556 }
557
558 $self->{workers}->{$sid} = {
559 sid => $sid,
560 uid => $uid,
561 state => $state,
562 };
563
564 $self->{workers}->{$sid}->{params} = $params if $params;
565 }
566
567 sub check_active_workers {
568 my ($self) = @_;
569
570 # finish/count workers
571 my $count = 0;
572 foreach my $sid (keys %{$self->{workers}}) {
573 my $w = $self->{workers}->{$sid};
574 if (my $pid = $w->{pid}) {
575 # check status
576 my $waitpid = waitpid($pid, WNOHANG);
577 if (defined($waitpid) && ($waitpid == $pid)) {
578 if (defined($w->{uid})) {
579 $self->resource_command_finished($sid, $w->{uid}, $?);
580 } else {
581 $self->stop_command_finished($sid, $?);
582 }
583 } else {
584 $count++;
585 }
586 }
587 }
588
589 return $count;
590 }
591
592 sub stop_command_finished {
593 my ($self, $sid, $status) = @_;
594
595 my $haenv = $self->{haenv};
596
597 my $w = delete $self->{workers}->{$sid};
598 return if !$w; # should not happen
599
600 my $exit_code = -1;
601
602 if ($status == -1) {
603 $haenv->log('err', "resource agent $sid finished - failed to execute");
604 } elsif (my $sig = ($status & 127)) {
605 $haenv->log('err', "resource agent $sid finished - got signal $sig");
606 } else {
607 $exit_code = ($status >> 8);
608 }
609
610 if ($exit_code != 0) {
611 $self->{shutdown_errors}++;
612 }
613 }
614
615 sub resource_command_finished {
616 my ($self, $sid, $uid, $status) = @_;
617
618 my $haenv = $self->{haenv};
619
620 my $w = delete $self->{workers}->{$sid};
621 return if !$w; # should not happen
622
623 my $exit_code = -1;
624
625 if ($status == -1) {
626 $haenv->log('err', "resource agent $sid finished - failed to execute");
627 } elsif (my $sig = ($status & 127)) {
628 $haenv->log('err', "resource agent $sid finished - got signal $sig");
629 } else {
630 $exit_code = ($status >> 8);
631 }
632
633 $exit_code = $self->handle_service_exitcode($sid, $w->{state}, $exit_code);
634
635 return if $exit_code == ETRY_AGAIN; # tell nobody, simply retry
636
637 $self->{results}->{$uid} = {
638 sid => $w->{sid},
639 state => $w->{state},
640 exit_code => $exit_code,
641 };
642
643 my $ss = $self->{service_status};
644
645 # compute hash of valid/existing uids
646 my $valid_uids = {};
647 foreach my $sid (keys %$ss) {
648 my $sd = $ss->{$sid};
649 next if !$sd->{uid};
650 $valid_uids->{$sd->{uid}} = 1;
651 }
652
653 my $results = {};
654 foreach my $id (keys %{$self->{results}}) {
655 next if !$valid_uids->{$id};
656 $results->{$id} = $self->{results}->{$id};
657 }
658 $self->{results} = $results;
659 }
660
661 # processes the exit code from a finished resource agent, so that the CRM knows
662 # if the LRM wants to retry an action based on the current recovery policies for
663 # the failed service, or the CRM itself must try to recover from the failure.
664 sub handle_service_exitcode {
665 my ($self, $sid, $cmd, $exit_code) = @_;
666
667 my $haenv = $self->{haenv};
668 my $tries = $self->{restart_tries};
669
670 my $sc = $haenv->read_service_config();
671
672 my $max_restart = 0;
673
674 if (my $cd = $sc->{$sid}) {
675 $max_restart = $cd->{max_restart};
676 }
677
678 if ($cmd eq 'started') {
679
680 if ($exit_code == SUCCESS) {
681
682 $tries->{$sid} = 0;
683
684 return $exit_code;
685
686 } elsif ($exit_code == ERROR) {
687
688 $tries->{$sid} = 0 if !defined($tries->{$sid});
689
690 if ($tries->{$sid} >= $max_restart) {
691 $haenv->log('err', "unable to start service $sid on local node".
692 " after $tries->{$sid} retries");
693 $tries->{$sid} = 0;
694 return ERROR;
695 }
696
697 $tries->{$sid}++;
698
699 $haenv->log('warning', "restart policy: retry number $tries->{$sid}" .
700 " for service '$sid'");
701 # tell CRM that we retry the start
702 return ETRY_AGAIN;
703 }
704 }
705
706 return $exit_code;
707
708 }
709
710 sub exec_resource_agent {
711 my ($self, $sid, $service_config, $cmd, $params) = @_;
712
713 # setup execution environment
714
715 $ENV{'PATH'} = '/sbin:/bin:/usr/sbin:/usr/bin';
716
717 my $haenv = $self->{haenv};
718
719 my $nodename = $haenv->nodename();
720
721 my (undef, $service_type, $service_name) = $haenv->parse_sid($sid);
722
723 my $plugin = PVE::HA::Resources->lookup($service_type);
724 if (!$plugin) {
725 $haenv->log('err', "service type '$service_type' not implemented");
726 return EUNKNOWN_SERVICE_TYPE;
727 }
728
729 if (!$service_config) {
730 $haenv->log('err', "missing resource configuration for '$sid'");
731 return EUNKNOWN_SERVICE;
732 }
733
734 # process error state early
735 if ($cmd eq 'error') {
736
737 $haenv->log('err', "service $sid is in an error state and needs manual " .
738 "intervention. Look up 'ERROR RECOVERY' in the documentation.");
739
740 return SUCCESS; # error always succeeds
741 }
742
743 if ($service_config->{node} ne $nodename) {
744 $haenv->log('err', "service '$sid' not on this node");
745 return EWRONG_NODE;
746 }
747
748 my $id = $service_name;
749
750 my $running = $plugin->check_running($haenv, $id);
751
752 if ($cmd eq 'started') {
753
754 return SUCCESS if $running;
755
756 $haenv->log("info", "starting service $sid");
757
758 $plugin->start($haenv, $id);
759
760 $running = $plugin->check_running($haenv, $id);
761
762 if ($running) {
763 $haenv->log("info", "service status $sid started");
764 return SUCCESS;
765 } else {
766 $haenv->log("warning", "unable to start service $sid");
767 return ERROR;
768 }
769
770 } elsif ($cmd eq 'request_stop' || $cmd eq 'stopped') {
771
772 return SUCCESS if !$running;
773
774 $haenv->log("info", "stopping service $sid");
775
776 $plugin->shutdown($haenv, $id);
777
778 $running = $plugin->check_running($haenv, $id);
779
780 if (!$running) {
781 $haenv->log("info", "service status $sid stopped");
782 return SUCCESS;
783 } else {
784 $haenv->log("info", "unable to stop stop service $sid (still running)");
785 return ERROR;
786 }
787
788 } elsif ($cmd eq 'migrate' || $cmd eq 'relocate') {
789
790 my $target = $params->{target};
791 if (!defined($target)) {
792 die "$cmd '$sid' failed - missing target\n" if !defined($target);
793 return EINVALID_PARAMETER;
794 }
795
796 if ($service_config->{node} eq $target) {
797 # already there
798 return SUCCESS;
799 }
800
801 my $online = ($cmd eq 'migrate') ? 1 : 0;
802
803 my $res = $plugin->migrate($haenv, $id, $target, $online);
804
805 # something went wrong if service is still on this node
806 if (!$res) {
807 $haenv->log("err", "service $sid not moved (migration error)");
808 return ERROR;
809 }
810
811 return SUCCESS;
812
813 }
814
815 $haenv->log("err", "implement me (cmd '$cmd')");
816 return EUNKNOWN_COMMAND;
817 }
818
819
820 1;