]> git.proxmox.com Git - pve-ha-manager.git/blob - src/PVE/HA/LRM.pm
afca084b7b38e1edf8915036e5b32d51ea84def7
[pve-ha-manager.git] / src / PVE / HA / LRM.pm
1 package PVE::HA::LRM;
2
3 # Local Resource Manager
4
5 use strict;
6 use warnings;
7 use POSIX qw(:sys_wait_h);
8
9 use PVE::SafeSyslog;
10 use PVE::Tools;
11 use PVE::HA::Tools ':exit_codes';
12 use PVE::HA::Resources;
13
14 # Server can have several states:
15
16 my $valid_states = {
17 wait_for_agent_lock => "waiting for agent lock",
18 active => "got agent_lock",
19 lost_agent_lock => "lost agent_lock",
20 };
21
22 sub new {
23 my ($this, $haenv) = @_;
24
25 my $class = ref($this) || $this;
26
27 my $self = bless {
28 haenv => $haenv,
29 status => { state => 'startup' },
30 workers => {},
31 results => {},
32 restart_tries => {},
33 shutdown_request => 0,
34 shutdown_errors => 0,
35 # mode can be: active, reboot, shutdown, restart
36 mode => 'active',
37 cluster_state_update => 0,
38 }, $class;
39
40 $self->set_local_status({ state => 'wait_for_agent_lock' });
41
42 return $self;
43 }
44
45 sub shutdown_request {
46 my ($self) = @_;
47
48 return if $self->{shutdown_request}; # already in shutdown mode
49
50 my $haenv = $self->{haenv};
51
52 my $nodename = $haenv->nodename();
53
54 my ($shutdown, $reboot) = $haenv->is_node_shutdown();
55
56 if ($shutdown) {
57 # *always* queue stop jobs for all services if the node shuts down,
58 # independent if it's a reboot or a poweroff, else we may corrupt
59 # services or hinder node shutdown
60 my $ss = $self->{service_status};
61
62 foreach my $sid (keys %$ss) {
63 my $sd = $ss->{$sid};
64 next if !$sd->{node};
65 next if $sd->{node} ne $nodename;
66 # Note: use undef uid to mark shutdown/stop jobs
67 $self->queue_resource_command($sid, undef, 'request_stop');
68 }
69 }
70
71 if ($shutdown) {
72 if ($reboot) {
73 $haenv->log('info', "reboot LRM, stop and freeze all services");
74 $self->{mode} = 'restart';
75 } else {
76 $haenv->log('info', "shutdown LRM, stop all services");
77 $self->{mode} = 'shutdown';
78 }
79 } else {
80 $haenv->log('info', "restart LRM, freeze all services");
81 $self->{mode} = 'restart';
82 }
83
84 $self->{shutdown_request} = 1;
85
86 eval { $self->update_lrm_status(); };
87 if (my $err = $@) {
88 $self->log('err', "unable to update lrm status file - $err");
89 }
90 }
91
92 sub get_local_status {
93 my ($self) = @_;
94
95 return $self->{status};
96 }
97
98 sub set_local_status {
99 my ($self, $new) = @_;
100
101 die "invalid state '$new->{state}'" if !$valid_states->{$new->{state}};
102
103 my $haenv = $self->{haenv};
104
105 my $old = $self->{status};
106
107 # important: only update if if really changed
108 return if $old->{state} eq $new->{state};
109
110 $haenv->log('info', "status change $old->{state} => $new->{state}");
111
112 $new->{state_change_time} = $haenv->get_time();
113
114 $self->{status} = $new;
115 }
116
117 sub update_lrm_status {
118 my ($self) = @_;
119
120 my $haenv = $self->{haenv};
121
122 return 0 if !$haenv->quorate();
123
124 my $lrm_status = {
125 state => $self->{status}->{state},
126 mode => $self->{mode},
127 results => $self->{results},
128 timestamp => $haenv->get_time(),
129 };
130
131 eval { $haenv->write_lrm_status($lrm_status); };
132 if (my $err = $@) {
133 $haenv->log('err', "unable to write lrm status file - $err");
134 return 0;
135 }
136
137 return 1;
138 }
139
140 sub update_service_status {
141 my ($self) = @_;
142
143 my $haenv = $self->{haenv};
144
145 my $ms = eval { $haenv->read_manager_status(); };
146 if (my $err = $@) {
147 $haenv->log('err', "updating service status from manager failed: $err");
148 return undef;
149 } else {
150 $self->{service_status} = $ms->{service_status} || {};
151 return 1;
152 }
153 }
154
155 sub get_protected_ha_agent_lock {
156 my ($self) = @_;
157
158 my $haenv = $self->{haenv};
159
160 my $count = 0;
161 my $starttime = $haenv->get_time();
162
163 for (;;) {
164
165 if ($haenv->get_ha_agent_lock()) {
166 if ($self->{ha_agent_wd}) {
167 $haenv->watchdog_update($self->{ha_agent_wd});
168 } else {
169 my $wfh = $haenv->watchdog_open();
170 $self->{ha_agent_wd} = $wfh;
171 }
172 return 1;
173 }
174
175 last if ++$count > 5; # try max 5 time
176
177 my $delay = $haenv->get_time() - $starttime;
178 last if $delay > 5; # for max 5 seconds
179
180 $haenv->sleep(1);
181 }
182
183 return 0;
184 }
185
186 sub active_service_count {
187 my ($self) = @_;
188
189 my $haenv = $self->{haenv};
190
191 my $nodename = $haenv->nodename();
192
193 my $ss = $self->{service_status};
194
195 my $count = 0;
196
197 foreach my $sid (keys %$ss) {
198 my $sd = $ss->{$sid};
199 next if !$sd->{node};
200 next if $sd->{node} ne $nodename;
201 my $req_state = $sd->{state};
202 next if !defined($req_state);
203 next if $req_state eq 'stopped';
204 next if $req_state eq 'freeze';
205 # erroneous services are not managed by HA, don't count them as active
206 next if $req_state eq 'error';
207
208 $count++;
209 }
210
211 return $count;
212 }
213
214 my $wrote_lrm_status_at_startup = 0;
215
216 sub do_one_iteration {
217 my ($self) = @_;
218
219 my $haenv = $self->{haenv};
220
221 $haenv->loop_start_hook();
222
223 $self->{cluster_state_update} = $haenv->cluster_state_update();
224
225 my $res = $self->work();
226
227 $haenv->loop_end_hook();
228
229 return $res;
230 }
231
232 sub work {
233 my ($self) = @_;
234
235 my $haenv = $self->{haenv};
236
237 if (!$wrote_lrm_status_at_startup) {
238 if ($self->update_lrm_status()) {
239 $wrote_lrm_status_at_startup = 1;
240 } else {
241 # do nothing
242 $haenv->sleep(5);
243 return $self->{shutdown_request} ? 0 : 1;
244 }
245 }
246
247 my $status = $self->get_local_status();
248 my $state = $status->{state};
249
250 $self->update_service_status();
251
252 my $fence_request = PVE::HA::Tools::count_fenced_services($self->{service_status}, $haenv->nodename());
253
254 # do state changes first
255
256 my $ctime = $haenv->get_time();
257
258 if ($state eq 'wait_for_agent_lock') {
259
260 my $service_count = $self->active_service_count();
261
262 if (!$fence_request && $service_count && $haenv->quorate()) {
263 if ($self->get_protected_ha_agent_lock()) {
264 $self->set_local_status({ state => 'active' });
265 }
266 }
267
268 } elsif ($state eq 'lost_agent_lock') {
269
270 if (!$fence_request && $haenv->quorate()) {
271 if ($self->get_protected_ha_agent_lock()) {
272 $self->set_local_status({ state => 'active' });
273 }
274 }
275
276 } elsif ($state eq 'active') {
277
278 if ($fence_request) {
279 $haenv->log('err', "node need to be fenced - releasing agent_lock\n");
280 $self->set_local_status({ state => 'lost_agent_lock'});
281 } elsif (!$self->get_protected_ha_agent_lock()) {
282 $self->set_local_status({ state => 'lost_agent_lock'});
283 }
284 }
285
286 $status = $self->get_local_status();
287 $state = $status->{state};
288
289 # do work
290
291 if ($state eq 'wait_for_agent_lock') {
292
293 return 0 if $self->{shutdown_request};
294
295 $self->update_lrm_status();
296
297 $haenv->sleep(5);
298
299 } elsif ($state eq 'active') {
300
301 my $startime = $haenv->get_time();
302
303 my $max_time = 10;
304
305 my $shutdown = 0;
306
307 # do work (max_time seconds)
308 eval {
309 # fixme: set alert timer
310
311 # if we could not get the current service status there's no point
312 # in doing anything, try again next round.
313 return if !$self->update_service_status();
314
315 if ($self->{shutdown_request}) {
316
317 if ($self->{mode} eq 'restart') {
318
319 my $service_count = $self->active_service_count();
320
321 if ($service_count == 0) {
322
323 if ($self->run_workers() == 0) {
324 if ($self->{ha_agent_wd}) {
325 $haenv->watchdog_close($self->{ha_agent_wd});
326 delete $self->{ha_agent_wd};
327 }
328
329 $shutdown = 1;
330
331 # restart with no or freezed services, release the lock
332 $haenv->release_ha_agent_lock();
333 }
334 }
335 } else {
336
337 if ($self->run_workers() == 0) {
338 if ($self->{shutdown_errors} == 0) {
339 if ($self->{ha_agent_wd}) {
340 $haenv->watchdog_close($self->{ha_agent_wd});
341 delete $self->{ha_agent_wd};
342 }
343
344 # shutdown with all services stopped thus release the lock
345 $haenv->release_ha_agent_lock();
346 }
347
348 $shutdown = 1;
349 }
350 }
351 } else {
352
353 $self->manage_resources();
354
355 }
356 };
357 if (my $err = $@) {
358 $haenv->log('err', "got unexpected error - $err");
359 }
360
361 $self->update_lrm_status();
362
363 return 0 if $shutdown;
364
365 $haenv->sleep_until($startime + $max_time);
366
367 } elsif ($state eq 'lost_agent_lock') {
368
369 # Note: watchdog is active an will triger soon!
370
371 # so we hope to get the lock back soon!
372
373 if ($self->{shutdown_request}) {
374
375 my $service_count = $self->active_service_count();
376
377 if ($service_count > 0) {
378 $haenv->log('err', "get shutdown request in state 'lost_agent_lock' - " .
379 "detected $service_count running services");
380
381 } else {
382
383 # all services are stopped, so we can close the watchdog
384
385 if ($self->{ha_agent_wd}) {
386 $haenv->watchdog_close($self->{ha_agent_wd});
387 delete $self->{ha_agent_wd};
388 }
389
390 return 0;
391 }
392 }
393
394 $haenv->sleep(5);
395
396 } else {
397
398 die "got unexpected status '$state'\n";
399
400 }
401
402 return 1;
403 }
404
405 sub run_workers {
406 my ($self) = @_;
407
408 my $haenv = $self->{haenv};
409
410 my $starttime = $haenv->get_time();
411
412 # number of workers to start, if 0 we exec the command directly witouth forking
413 my $max_workers = $haenv->get_max_workers();
414
415 my $sc = $haenv->read_service_config();
416
417 while (($haenv->get_time() - $starttime) < 5) {
418 my $count = $self->check_active_workers();
419
420 foreach my $sid (sort keys %{$self->{workers}}) {
421 last if $count >= $max_workers && $max_workers > 0;
422
423 my $w = $self->{workers}->{$sid};
424 if (!$w->{pid}) {
425 # only fork if we may else call exec_resource_agent
426 # directly (e.g. for regression tests)
427 if ($max_workers > 0) {
428 my $pid = fork();
429 if (!defined($pid)) {
430 $haenv->log('err', "fork worker failed");
431 $count = 0; last; # abort, try later
432 } elsif ($pid == 0) {
433 $haenv->after_fork(); # cleanup
434
435 # do work
436 my $res = -1;
437 eval {
438 $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{target});
439 };
440 if (my $err = $@) {
441 $haenv->log('err', $err);
442 POSIX::_exit(-1);
443 }
444 POSIX::_exit($res);
445 } else {
446 $count++;
447 $w->{pid} = $pid;
448 }
449 } else {
450 my $res = -1;
451 eval {
452 $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{target});
453 $res = $res << 8 if $res > 0;
454 };
455 if (my $err = $@) {
456 $haenv->log('err', $err);
457 }
458 if (defined($w->{uid})) {
459 $self->resource_command_finished($sid, $w->{uid}, $res);
460 } else {
461 $self->stop_command_finished($sid, $res);
462 }
463 }
464 }
465 }
466
467 last if !$count;
468
469 $haenv->sleep(1);
470 }
471
472 return scalar(keys %{$self->{workers}});
473 }
474
475 sub manage_resources {
476 my ($self) = @_;
477
478 my $haenv = $self->{haenv};
479
480 my $nodename = $haenv->nodename();
481
482 my $ss = $self->{service_status};
483
484 foreach my $sid (keys %{$self->{restart_tries}}) {
485 delete $self->{restart_tries}->{$sid} if !$ss->{$sid};
486 }
487
488 foreach my $sid (keys %$ss) {
489 my $sd = $ss->{$sid};
490 next if !$sd->{node};
491 next if !$sd->{uid};
492 next if $sd->{node} ne $nodename;
493 my $req_state = $sd->{state};
494 next if !defined($req_state);
495 next if $req_state eq 'freeze';
496 $self->queue_resource_command($sid, $sd->{uid}, $req_state, $sd->{target});
497 }
498
499 return $self->run_workers();
500 }
501
502 sub queue_resource_command {
503 my ($self, $sid, $uid, $state, $target) = @_;
504
505 # do not queue the excatly same command twice as this may lead to
506 # an inconsistent HA state when the first command fails but the CRM
507 # does not process its failure right away and the LRM starts a second
508 # try, without the CRM knowing of it (race condition)
509 # The 'stopped' command is an exception as we do not process its result
510 # in the CRM and we want to execute it always (even with no active CRM)
511 return if $state ne 'stopped' && $uid && defined($self->{results}->{$uid});
512
513 if (my $w = $self->{workers}->{$sid}) {
514 return if $w->{pid}; # already started
515 # else, delete and overwrite queue entry with new command
516 delete $self->{workers}->{$sid};
517 }
518
519 $self->{workers}->{$sid} = {
520 sid => $sid,
521 uid => $uid,
522 state => $state,
523 };
524
525 $self->{workers}->{$sid}->{target} = $target if $target;
526 }
527
528 sub check_active_workers {
529 my ($self) = @_;
530
531 # finish/count workers
532 my $count = 0;
533 foreach my $sid (keys %{$self->{workers}}) {
534 my $w = $self->{workers}->{$sid};
535 if (my $pid = $w->{pid}) {
536 # check status
537 my $waitpid = waitpid($pid, WNOHANG);
538 if (defined($waitpid) && ($waitpid == $pid)) {
539 if (defined($w->{uid})) {
540 $self->resource_command_finished($sid, $w->{uid}, $?);
541 } else {
542 $self->stop_command_finished($sid, $?);
543 }
544 } else {
545 $count++;
546 }
547 }
548 }
549
550 return $count;
551 }
552
553 sub stop_command_finished {
554 my ($self, $sid, $status) = @_;
555
556 my $haenv = $self->{haenv};
557
558 my $w = delete $self->{workers}->{$sid};
559 return if !$w; # should not happen
560
561 my $exit_code = -1;
562
563 if ($status == -1) {
564 $haenv->log('err', "resource agent $sid finished - failed to execute");
565 } elsif (my $sig = ($status & 127)) {
566 $haenv->log('err', "resource agent $sid finished - got signal $sig");
567 } else {
568 $exit_code = ($status >> 8);
569 }
570
571 if ($exit_code != 0) {
572 $self->{shutdown_errors}++;
573 }
574 }
575
576 sub resource_command_finished {
577 my ($self, $sid, $uid, $status) = @_;
578
579 my $haenv = $self->{haenv};
580
581 my $w = delete $self->{workers}->{$sid};
582 return if !$w; # should not happen
583
584 my $exit_code = -1;
585
586 if ($status == -1) {
587 $haenv->log('err', "resource agent $sid finished - failed to execute");
588 } elsif (my $sig = ($status & 127)) {
589 $haenv->log('err', "resource agent $sid finished - got signal $sig");
590 } else {
591 $exit_code = ($status >> 8);
592 }
593
594 $exit_code = $self->handle_service_exitcode($sid, $w->{state}, $exit_code);
595
596 return if $exit_code == ETRY_AGAIN; # tell nobody, simply retry
597
598 $self->{results}->{$uid} = {
599 sid => $w->{sid},
600 state => $w->{state},
601 exit_code => $exit_code,
602 };
603
604 my $ss = $self->{service_status};
605
606 # compute hash of valid/existing uids
607 my $valid_uids = {};
608 foreach my $sid (keys %$ss) {
609 my $sd = $ss->{$sid};
610 next if !$sd->{uid};
611 $valid_uids->{$sd->{uid}} = 1;
612 }
613
614 my $results = {};
615 foreach my $id (keys %{$self->{results}}) {
616 next if !$valid_uids->{$id};
617 $results->{$id} = $self->{results}->{$id};
618 }
619 $self->{results} = $results;
620 }
621
622 # processes the exit code from a finished resource agent, so that the CRM knows
623 # if the LRM wants to retry an action based on the current recovery policies for
624 # the failed service, or the CRM itself must try to recover from the failure.
625 sub handle_service_exitcode {
626 my ($self, $sid, $cmd, $exit_code) = @_;
627
628 my $haenv = $self->{haenv};
629 my $tries = $self->{restart_tries};
630
631 my $sc = $haenv->read_service_config();
632
633 my $max_restart = 0;
634
635 if (my $cd = $sc->{$sid}) {
636 $max_restart = $cd->{max_restart};
637 }
638
639 if ($cmd eq 'started') {
640
641 if ($exit_code == SUCCESS) {
642
643 $tries->{$sid} = 0;
644
645 return $exit_code;
646
647 } elsif ($exit_code == ERROR) {
648
649 $tries->{$sid} = 0 if !defined($tries->{$sid});
650
651 if ($tries->{$sid} >= $max_restart) {
652 $haenv->log('err', "unable to start service $sid on local node".
653 " after $tries->{$sid} retries");
654 $tries->{$sid} = 0;
655 return ERROR;
656 }
657
658 $tries->{$sid}++;
659
660 $haenv->log('warning', "restart policy: retry number $tries->{$sid}" .
661 " for service '$sid'");
662 # tell CRM that we retry the start
663 return ETRY_AGAIN;
664 }
665 }
666
667 return $exit_code;
668
669 }
670
671 sub exec_resource_agent {
672 my ($self, $sid, $service_config, $cmd, @params) = @_;
673
674 # setup execution environment
675
676 $ENV{'PATH'} = '/sbin:/bin:/usr/sbin:/usr/bin';
677
678 my $haenv = $self->{haenv};
679
680 my $nodename = $haenv->nodename();
681
682 my (undef, $service_type, $service_name) = PVE::HA::Tools::parse_sid($sid);
683
684 my $plugin = PVE::HA::Resources->lookup($service_type);
685 if (!$plugin) {
686 $haenv->log('err', "service type '$service_type' not implemented");
687 return EUNKNOWN_SERVICE_TYPE;
688 }
689
690 if (!$service_config) {
691 $haenv->log('err', "missing resource configuration for '$sid'");
692 return EUNKNOWN_SERVICE;
693 }
694
695 # process error state early
696 if ($cmd eq 'error') {
697
698 $haenv->log('err', "service $sid is in an error state and needs manual " .
699 "intervention. Look up 'ERROR RECOVERY' in the documentation.");
700
701 return SUCCESS; # error always succeeds
702 }
703
704 if ($service_config->{node} ne $nodename) {
705 $haenv->log('err', "service '$sid' not on this node");
706 return EWRONG_NODE;
707 }
708
709 my $id = $service_name;
710
711 my $running = $plugin->check_running($haenv, $id);
712
713 if ($cmd eq 'started') {
714
715 return SUCCESS if $running;
716
717 $haenv->log("info", "starting service $sid");
718
719 $plugin->start($haenv, $id);
720
721 $running = $plugin->check_running($haenv, $id);
722
723 if ($running) {
724 $haenv->log("info", "service status $sid started");
725 return SUCCESS;
726 } else {
727 $haenv->log("warning", "unable to start service $sid");
728 return ERROR;
729 }
730
731 } elsif ($cmd eq 'request_stop' || $cmd eq 'stopped') {
732
733 return SUCCESS if !$running;
734
735 $haenv->log("info", "stopping service $sid");
736
737 $plugin->shutdown($haenv, $id);
738
739 $running = $plugin->check_running($haenv, $id);
740
741 if (!$running) {
742 $haenv->log("info", "service status $sid stopped");
743 return SUCCESS;
744 } else {
745 $haenv->log("info", "unable to stop stop service $sid (still running)");
746 return ERROR;
747 }
748
749 } elsif ($cmd eq 'migrate' || $cmd eq 'relocate') {
750
751 my $target = $params[0];
752 if (!defined($target)) {
753 die "$cmd '$sid' failed - missing target\n" if !defined($target);
754 return EINVALID_PARAMETER;
755 }
756
757 if ($service_config->{node} eq $target) {
758 # already there
759 return SUCCESS;
760 }
761
762 my $online = ($cmd eq 'migrate') ? 1 : 0;
763
764 my $res = $plugin->migrate($haenv, $id, $target, $online);
765
766 # something went wrong if service is still on this node
767 if (!$res) {
768 $haenv->log("err", "service $sid not moved (migration error)");
769 return ERROR;
770 }
771
772 return SUCCESS;
773
774 }
775
776 $haenv->log("err", "implement me (cmd '$cmd')");
777 return EUNKNOWN_COMMAND;
778 }
779
780
781 1;