]> git.proxmox.com Git - pve-ha-manager.git/blob - src/PVE/HA/LRM.pm
e3f44f727e7afb4e80802026de21fe57f471427c
[pve-ha-manager.git] / src / PVE / HA / LRM.pm
1 package PVE::HA::LRM;
2
3 # Local Resource Manager
4
5 use strict;
6 use warnings;
7 use POSIX qw(:sys_wait_h);
8
9 use PVE::SafeSyslog;
10 use PVE::Tools;
11 use PVE::HA::Tools ':exit_codes';
12 use PVE::HA::Resources;
13
14 # Server can have several states:
15
16 my $valid_states = {
17 wait_for_agent_lock => "waiting for agent lock",
18 active => "got agent_lock",
19 maintenance => "going into maintenance",
20 lost_agent_lock => "lost agent_lock",
21 };
22
23 # we sleep ~10s per 'active' round, so if no services is available for >= 10 min we'd go in wait
24 # state giving up the watchdog and the LRM lock voluntary, ensuring the WD can do no harm
25 my $max_active_idle_rounds = 60;
26
27 sub new {
28 my ($this, $haenv) = @_;
29
30 my $class = ref($this) || $this;
31
32 my $self = bless {
33 haenv => $haenv,
34 status => { state => 'startup' },
35 workers => {},
36 results => {},
37 restart_tries => {},
38 shutdown_request => 0,
39 shutdown_errors => 0,
40 # mode can be: active, reboot, shutdown, restart
41 mode => 'active',
42 cluster_state_update => 0,
43 active_idle_rounds => 0,
44 }, $class;
45
46 $self->set_local_status({ state => 'wait_for_agent_lock' });
47
48 return $self;
49 }
50
51 sub shutdown_request {
52 my ($self) = @_;
53
54 return if $self->{shutdown_request}; # already in shutdown mode
55
56 my $haenv = $self->{haenv};
57
58 my $nodename = $haenv->nodename();
59
60 my ($shutdown, $reboot) = $haenv->is_node_shutdown();
61
62 my $dc_cfg = $haenv->get_datacenter_settings();
63 my $shutdown_policy = $dc_cfg->{ha}->{shutdown_policy} // 'conditional';
64
65 if ($shutdown) { # don't log this on service restart, only on node shutdown
66 $haenv->log('info', "got shutdown request with shutdown policy '$shutdown_policy'");
67 }
68
69 my $freeze_all;
70 my $maintenance;
71 if ($shutdown_policy eq 'conditional') {
72 $freeze_all = $reboot;
73 } elsif ($shutdown_policy eq 'freeze') {
74 $freeze_all = 1;
75 } elsif ($shutdown_policy eq 'failover') {
76 $freeze_all = 0;
77 } elsif ($shutdown_policy eq 'migrate') {
78 $maintenance = 1;
79 } else {
80 $haenv->log('err', "unknown shutdown policy '$shutdown_policy', fall back to conditional");
81 $freeze_all = $reboot;
82 }
83
84 if ($maintenance) {
85 # we get marked as unaivalable by the manager, then all services will
86 # be migrated away, we'll still have the same "can we exit" clause than
87 # a normal shutdown -> no running service on this node
88 # FIXME: after X minutes, add shutdown command for remaining services,
89 # e.g., if they have no alternative node???
90 } elsif ($shutdown) {
91 # *always* queue stop jobs for all services if the node shuts down,
92 # independent if it's a reboot or a poweroff, else we may corrupt
93 # services or hinder node shutdown
94 my $ss = $self->{service_status};
95
96 foreach my $sid (keys %$ss) {
97 my $sd = $ss->{$sid};
98 next if !$sd->{node};
99 next if $sd->{node} ne $nodename;
100 # Note: use undef uid to mark shutdown/stop jobs
101 $self->queue_resource_command($sid, undef, 'request_stop');
102 }
103 }
104
105 if ($shutdown) {
106 my $shutdown_type = $reboot ? 'reboot' : 'shutdown';
107 if ($maintenance) {
108 $haenv->log('info', "$shutdown_type LRM, doing maintenance, removing this node from active list");
109 $self->{mode} = 'maintenance';
110 } elsif ($freeze_all) {
111 $haenv->log('info', "$shutdown_type LRM, stop and freeze all services");
112 $self->{mode} = 'restart';
113 } else {
114 $haenv->log('info', "shutdown LRM, stop all services");
115 $self->{mode} = 'shutdown';
116 }
117 } else {
118 $haenv->log('info', "restart LRM, freeze all services");
119 $self->{mode} = 'restart';
120 }
121
122 $self->{shutdown_request} = $haenv->get_time();
123
124 eval { $self->update_lrm_status() or die "not quorate?\n"; };
125 if (my $err = $@) {
126 $haenv->log('err', "unable to update lrm status file - $err");
127 }
128 }
129
130 sub get_local_status {
131 my ($self) = @_;
132
133 return $self->{status};
134 }
135
136 sub set_local_status {
137 my ($self, $new) = @_;
138
139 die "invalid state '$new->{state}'" if !$valid_states->{$new->{state}};
140
141 my $haenv = $self->{haenv};
142
143 my $old = $self->{status};
144
145 # important: only update if if really changed
146 return if $old->{state} eq $new->{state};
147
148 $haenv->log('info', "status change $old->{state} => $new->{state}");
149
150 $new->{state_change_time} = $haenv->get_time();
151
152 $self->{status} = $new;
153 }
154
155 sub update_lrm_status {
156 my ($self) = @_;
157
158 my $haenv = $self->{haenv};
159
160 return 0 if !$haenv->quorate();
161
162 my $lrm_status = {
163 state => $self->{status}->{state},
164 mode => $self->{mode},
165 results => $self->{results},
166 timestamp => $haenv->get_time(),
167 };
168
169 eval { $haenv->write_lrm_status($lrm_status); };
170 if (my $err = $@) {
171 $haenv->log('err', "unable to write lrm status file - $err");
172 return 0;
173 }
174
175 return 1;
176 }
177
178 sub update_service_status {
179 my ($self) = @_;
180
181 my $haenv = $self->{haenv};
182
183 my $ms = eval { $haenv->read_manager_status(); };
184 if (my $err = $@) {
185 $haenv->log('err', "updating service status from manager failed: $err");
186 return undef;
187 } else {
188 $self->{service_status} = $ms->{service_status} || {};
189 my $nodename = $haenv->nodename();
190 $self->{node_status} = $ms->{node_status}->{$nodename} || 'unknown';
191
192 # FIXME: method name is a bit confusing for doing this, either rename or move
193 if (!$self->{shutdown_request}) {
194 my $request = $ms->{node_request}->{$nodename} // {};
195 if ($request->{maintenance}) {
196 $self->{mode} = 'maintenance';
197 } elsif ($self->{mode} eq 'maintenance') {
198 $self->{mode} = 'active';
199 }
200 }
201
202 return 1;
203 }
204 }
205
206 sub get_protected_ha_agent_lock {
207 my ($self) = @_;
208
209 my $haenv = $self->{haenv};
210
211 my $count = 0;
212 my $starttime = $haenv->get_time();
213
214 for (;;) {
215
216 if ($haenv->get_ha_agent_lock()) {
217 if ($self->{ha_agent_wd}) {
218 $haenv->watchdog_update($self->{ha_agent_wd});
219 } else {
220 my $wfh = $haenv->watchdog_open();
221 $self->{ha_agent_wd} = $wfh;
222 }
223 return 1;
224 }
225
226 last if ++$count > 5; # try max 5 time
227
228 my $delay = $haenv->get_time() - $starttime;
229 last if $delay > 5; # for max 5 seconds
230
231 $haenv->sleep(1);
232 }
233
234 return 0;
235 }
236
237 # only cares if any service has the local node as their node, independent of which req.state it is
238 sub has_configured_service_on_local_node {
239 my ($self) = @_;
240
241 my $haenv = $self->{haenv};
242 my $nodename = $haenv->nodename();
243
244 my $ss = $self->{service_status};
245 foreach my $sid (keys %$ss) {
246 my $sd = $ss->{$sid};
247 next if !$sd->{node} || $sd->{node} ne $nodename;
248
249 return 1;
250 }
251 return 0;
252 }
253
254 sub is_fence_requested {
255 my ($self) = @_;
256
257 my $haenv = $self->{haenv};
258
259 my $nodename = $haenv->nodename();
260 my $ss = $self->{service_status};
261
262 my $fenced_services = PVE::HA::Tools::count_fenced_services($ss, $nodename);
263
264 return $fenced_services || $self->{node_status} eq 'fence';
265 }
266
267 sub is_maintenance_requested {
268 my ($self) = @_;
269
270 # shutdown maintenance or manual request
271 return $self->{mode} eq 'maintenance';
272 }
273
274 sub active_service_count {
275 my ($self) = @_;
276
277 my $haenv = $self->{haenv};
278 my $nodename = $haenv->nodename();
279
280 my $ss = $self->{service_status};
281
282 my $count = 0;
283 foreach my $sid (keys %$ss) {
284 my $sd = $ss->{$sid};
285 next if !$sd->{node};
286 next if $sd->{node} ne $nodename;
287 my $req_state = $sd->{state};
288 next if !defined($req_state);
289 next if $req_state eq 'stopped';
290 # NOTE: 'ignored' ones are already dropped by the manager from service_status
291 next if $req_state eq 'freeze';
292 # erroneous services are not managed by HA, don't count them as active
293 next if $req_state eq 'error';
294 # request_start is for (optional) better node selection for stop -> started transition
295 next if $req_state eq 'request_start';
296
297 $count++;
298 }
299
300 return $count;
301 }
302
303 my $wrote_lrm_status_at_startup = 0;
304
305 sub do_one_iteration {
306 my ($self) = @_;
307
308 my $haenv = $self->{haenv};
309
310 $haenv->loop_start_hook();
311
312 $self->{cluster_state_update} = $haenv->cluster_state_update();
313
314 my $res = $self->work();
315
316 $haenv->loop_end_hook();
317
318 return $res;
319 }
320
321 # NOTE: this is disabling the self-fence mechanism, so it must NOT be called with active services
322 # It's normally *only* OK on graceful shutdown (with no services, or all services frozen)
323 my sub give_up_watchdog_protection {
324 my ($self) = @_;
325
326 if ($self->{ha_agent_wd}) {
327 $self->{haenv}->watchdog_close($self->{ha_agent_wd});
328 delete $self->{ha_agent_wd}; # only delete after close!
329 }
330 }
331
332 sub work {
333 my ($self) = @_;
334
335 my $haenv = $self->{haenv};
336
337 if (!$wrote_lrm_status_at_startup) {
338 if ($self->update_lrm_status()) {
339 $wrote_lrm_status_at_startup = 1;
340 } else {
341 # do nothing
342 $haenv->sleep(5);
343 return $self->{shutdown_request} ? 0 : 1;
344 }
345 }
346
347 my $status = $self->get_local_status();
348 my $state = $status->{state};
349
350 $self->update_service_status();
351
352 my $fence_request = $self->is_fence_requested();
353
354 # do state changes first
355
356 my $ctime = $haenv->get_time();
357
358 if ($state eq 'wait_for_agent_lock') {
359
360 my $service_count = $self->active_service_count();
361
362 if (!$fence_request && $service_count && $haenv->quorate()) {
363 if ($self->get_protected_ha_agent_lock()) {
364 $self->set_local_status({ state => 'active' });
365 }
366 }
367
368 } elsif ($state eq 'lost_agent_lock') {
369
370 if (!$fence_request && $haenv->quorate()) {
371 if ($self->get_protected_ha_agent_lock()) {
372 $self->set_local_status({ state => 'active' });
373 }
374 }
375
376 } elsif ($state eq 'active') {
377
378 if ($fence_request) {
379 $haenv->log('err', "node need to be fenced - releasing agent_lock\n");
380 $self->set_local_status({ state => 'lost_agent_lock'});
381 } elsif (!$self->get_protected_ha_agent_lock()) {
382 $self->set_local_status({ state => 'lost_agent_lock'});
383 } elsif ($self->is_maintenance_requested()) {
384 $self->set_local_status({ state => 'maintenance'});
385 } else {
386 if (!$self->has_configured_service_on_local_node() && !$self->run_workers()) {
387 # no active service configured for this node and all (old) workers are done
388 $self->{active_idle_rounds}++;
389 if ($self->{active_idle_rounds} > $max_active_idle_rounds) {
390 $haenv->log('info', "node had no service configured for $max_active_idle_rounds rounds, going idle.\n");
391 # safety: no active service & no running worker for quite some time -> OK
392 $haenv->release_ha_agent_lock();
393 give_up_watchdog_protection($self);
394 $self->set_local_status({ state => 'wait_for_agent_lock'});
395 $self->{active_idle_rounds} = 0;
396 }
397 } elsif ($self->{active_idle_rounds}) {
398 $self->{active_idle_rounds} = 0;
399 }
400 }
401 } elsif ($state eq 'maintenance') {
402
403 if ($fence_request) {
404 $haenv->log('err', "node need to be fenced during maintenance mode - releasing agent_lock\n");
405 $self->set_local_status({ state => 'lost_agent_lock'});
406 } elsif ($self->active_service_count() || $self->run_workers()) {
407 # keep the lock and watchdog as long as not all services cleared the node
408 if (!$self->get_protected_ha_agent_lock()) {
409 $self->set_local_status({ state => 'lost_agent_lock'});
410 }
411 } elsif (!$self->is_maintenance_requested()) {
412 # empty && no maintenance mode && not exited -> need to switch active again
413 if ($self->get_protected_ha_agent_lock()) {
414 $self->set_local_status({ state => 'active' });
415 } else {
416 $self->set_local_status({ state => 'lost_agent_lock'});
417 }
418 }
419 }
420
421 $status = $self->get_local_status();
422 $state = $status->{state};
423
424 # do work
425
426 if ($state eq 'wait_for_agent_lock') {
427
428 return 0 if $self->{shutdown_request};
429
430 $self->update_lrm_status();
431
432 $haenv->sleep(5);
433
434 } elsif ($state eq 'active') {
435
436 my $startime = $haenv->get_time();
437
438 my $max_time = 10;
439
440 my $shutdown = 0;
441
442 # do work (max_time seconds)
443 eval {
444 # fixme: set alert timer
445
446 # if we could not get the current service status there's no point
447 # in doing anything, try again next round.
448 return if !$self->update_service_status();
449
450 if ($self->{shutdown_request}) {
451
452 if ($self->{mode} eq 'restart') {
453 # catch exited workers to update service state
454 my $workers = $self->run_workers();
455 my $service_count = $self->active_service_count();
456
457 if ($service_count == 0 && $workers == 0) {
458 # safety: no active services or workers -> OK
459 give_up_watchdog_protection($self);
460 $shutdown = 1;
461
462 # restart with no or freezed services, release the lock
463 $haenv->release_ha_agent_lock();
464 }
465 } else {
466
467 if ($self->run_workers() == 0) {
468 if ($self->{shutdown_errors} == 0) {
469 # safety: no active services and LRM shutdown -> OK
470 give_up_watchdog_protection($self);
471
472 # shutdown with all services stopped thus release the lock
473 $haenv->release_ha_agent_lock();
474 }
475
476 $shutdown = 1;
477 }
478 }
479 } else {
480 if (!$self->{cluster_state_update}) {
481 # update failed but we could still renew our lock (cfs restart?),
482 # safely skip manage and expect to update just fine next round
483 $haenv->log('notice', "temporary inconsistent cluster state " .
484 "(cfs restart?), skip round");
485 return;
486 }
487
488 $self->manage_resources();
489
490 }
491 };
492 if (my $err = $@) {
493 $haenv->log('err', "got unexpected error - $err");
494 }
495
496 $self->update_lrm_status();
497
498 return 0 if $shutdown;
499
500 $haenv->sleep_until($startime + $max_time);
501
502 } elsif ($state eq 'lost_agent_lock') {
503
504 # NOTE: watchdog is active an will trigger soon!
505 # so we hope to get the lock back soon!
506 if ($self->{shutdown_request}) {
507
508 my $service_count = $self->active_service_count();
509
510 if ($service_count > 0) {
511 $haenv->log('err', "get shutdown request in state 'lost_agent_lock' - " .
512 "detected $service_count running services");
513
514 if ($self->{mode} eq 'restart') {
515 my $state_mt = $self->{status}->{state_change_time};
516
517 # watchdog should have already triggered, so either it's set
518 # set to noboot or it failed. As we are in restart mode, and
519 # have infinity stoptimeout -> exit now - we don't touch services
520 # or change state, so this is save, relatively speaking
521 if (($haenv->get_time() - $state_mt) > 90) {
522 $haenv->log('err', "lost agent lock and restart request for over 90 seconds - giving up!");
523 return 0;
524 }
525 }
526 } else {
527 # safety: all services are stopped, so we can close the watchdog
528 give_up_watchdog_protection($self);
529
530 return 0;
531 }
532 }
533
534 $haenv->sleep(5);
535
536 } elsif ($state eq 'maintenance') {
537
538 my $startime = $haenv->get_time();
539 return if !$self->update_service_status();
540
541 # wait until all active services moved away
542 my $service_count = $self->active_service_count();
543
544 my $exit_lrm = 0;
545
546 if ($service_count == 0 && $self->run_workers() == 0) {
547 # safety: going into maintenance and all active services got moved -> OK
548 give_up_watchdog_protection($self);
549
550 if ($self->{shutdown_request}) {
551 $exit_lrm = 1;
552 }
553
554 # maintenance mode with no or only frozen services anymore, release the lock
555 $haenv->release_ha_agent_lock();
556 }
557
558 $self->manage_resources() if !$exit_lrm;
559
560 $self->update_lrm_status();
561
562 return 0 if $exit_lrm;
563
564 $haenv->sleep_until($startime + 5);
565
566 } else {
567
568 die "got unexpected status '$state'\n";
569
570 }
571
572 return 1;
573 }
574
575 sub run_workers {
576 my ($self) = @_;
577
578 my $haenv = $self->{haenv};
579
580 my $starttime = $haenv->get_time();
581
582 # number of workers to start, if 0 we exec the command directly witouth forking
583 my $max_workers = $haenv->get_max_workers();
584 my $sc = $haenv->read_service_config();
585
586 my $worker = $self->{workers};
587 # we only got limited time but want to ensure that every queued worker is scheduled
588 # eventually, so sort by the count a worker was seen here in this loop
589 my $fair_sorter = sub {
590 $worker->{$b}->{start_tries} <=> $worker->{$a}->{start_tries} || $a cmp $b
591 };
592
593 while (($haenv->get_time() - $starttime) <= 8) {
594 my $count = $self->check_active_workers();
595
596 for my $sid (sort $fair_sorter grep { !$worker->{$_}->{pid} } keys %$worker) {
597 my $w = $worker->{$sid};
598 # higher try-count means higher priority especially compared to newly queued jobs, so
599 # count every try to avoid starvation
600 $w->{start_tries}++;
601 # FIXME: should be last and ensure that check_active_workers is called sooner
602 next if $count >= $max_workers && $max_workers > 0;
603
604 # only fork if we may, else call exec_resource_agent directly (e.g. for tests)
605 if ($max_workers > 0) {
606 my $pid = fork();
607 if (!defined($pid)) {
608 $haenv->log('err', "forking worker failed - $!");
609 $count = 0; last; # abort, try later
610 } elsif ($pid == 0) {
611 $haenv->after_fork(); # cleanup
612
613 # do work
614 my $res = -1;
615 eval {
616 $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{params});
617 };
618 if (my $err = $@) {
619 $haenv->log('err', $err);
620 POSIX::_exit(-1);
621 }
622 POSIX::_exit($res);
623 } else {
624 $count++;
625 $w->{pid} = $pid;
626 }
627 } else {
628 my $res = -1;
629 eval {
630 $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{params});
631 $res = $res << 8 if $res > 0;
632 };
633 if (my $err = $@) {
634 $haenv->log('err', $err);
635 }
636 if (defined($w->{uid})) {
637 $self->resource_command_finished($sid, $w->{uid}, $res);
638 } else {
639 $self->stop_command_finished($sid, $res);
640 }
641 }
642 }
643
644 last if !$count;
645
646 $haenv->sleep(1);
647 }
648
649 return scalar(keys %{$self->{workers}});
650 }
651
652 sub manage_resources {
653 my ($self) = @_;
654
655 my $haenv = $self->{haenv};
656
657 my $nodename = $haenv->nodename();
658
659 my $ss = $self->{service_status};
660
661 foreach my $sid (keys %{$self->{restart_tries}}) {
662 delete $self->{restart_tries}->{$sid} if !$ss->{$sid};
663 }
664
665 foreach my $sid (keys %$ss) {
666 my $sd = $ss->{$sid};
667 next if !$sd->{node} || !$sd->{uid};
668 next if $sd->{node} ne $nodename;
669 my $request_state = $sd->{state};
670 next if !defined($request_state);
671 # can only happen for restricted groups where the failed node itself needs to be the
672 # reocvery target. Always let the master first do so, it will then marked as 'stopped' and
673 # we can just continue normally. But we must NOT do anything with it while still in recovery
674 next if $request_state eq 'recovery';
675 next if $request_state eq 'freeze';
676 # intermediate step for optional better node selection on stop -> start request state change
677 next if $request_state eq 'request_start';
678
679 $self->queue_resource_command($sid, $sd->{uid}, $request_state, {
680 'target' => $sd->{target},
681 'timeout' => $sd->{timeout},
682 });
683 }
684
685 return $self->run_workers();
686 }
687
688 sub queue_resource_command {
689 my ($self, $sid, $uid, $state, $params) = @_;
690
691 # do not queue the exact same command twice as this may lead to an inconsistent HA state when
692 # the first command fails but the CRM does not process its failure right away and the LRM starts
693 # a second try, without the CRM knowing of it (race condition) The 'stopped' command is an
694 # exception as we do not process its result in the CRM and we want to execute it always (even
695 # with no active CRM)
696 return if $state ne 'stopped' && $uid && defined($self->{results}->{$uid});
697
698 if (my $w = $self->{workers}->{$sid}) {
699 return if $w->{pid}; # already started
700 # else, delete and overwrite queue entry with new command
701 delete $self->{workers}->{$sid};
702 }
703
704 $self->{workers}->{$sid} = {
705 sid => $sid,
706 uid => $uid,
707 state => $state,
708 start_tries => 0,
709 };
710
711 $self->{workers}->{$sid}->{params} = $params if $params;
712 }
713
714 sub check_active_workers {
715 my ($self) = @_;
716
717 # finish/count workers
718 my $count = 0;
719 foreach my $sid (keys %{$self->{workers}}) {
720 my $w = $self->{workers}->{$sid};
721 my $pid = $w->{pid} || next;
722
723 my $waitpid = waitpid($pid, WNOHANG); # check status
724 if (defined($waitpid) && ($waitpid == $pid)) {
725 if (defined($w->{uid})) {
726 $self->resource_command_finished($sid, $w->{uid}, $?);
727 } else {
728 $self->stop_command_finished($sid, $?);
729 }
730 } else {
731 $count++; # still active
732 }
733 }
734
735 return $count;
736 }
737
738 sub stop_command_finished {
739 my ($self, $sid, $status) = @_;
740
741 my $haenv = $self->{haenv};
742
743 my $w = delete $self->{workers}->{$sid};
744 return if !$w; # should not happen
745
746 my $exit_code = -1;
747
748 if ($status == -1) {
749 $haenv->log('err', "resource agent $sid finished - failed to execute");
750 } elsif (my $sig = ($status & 127)) {
751 $haenv->log('err', "resource agent $sid finished - got signal $sig");
752 } else {
753 $exit_code = ($status >> 8);
754 }
755
756 if ($exit_code != 0) {
757 $self->{shutdown_errors}++;
758 }
759 }
760
761 sub resource_command_finished {
762 my ($self, $sid, $uid, $status) = @_;
763
764 my $haenv = $self->{haenv};
765
766 my $w = delete $self->{workers}->{$sid};
767 return if !$w; # should not happen
768
769 my $exit_code = -1;
770
771 if ($status == -1) {
772 $haenv->log('err', "resource agent $sid finished - failed to execute");
773 } elsif (my $sig = ($status & 127)) {
774 $haenv->log('err', "resource agent $sid finished - got signal $sig");
775 } else {
776 $exit_code = ($status >> 8);
777 }
778
779 $exit_code = $self->handle_service_exitcode($sid, $w->{state}, $exit_code);
780
781 return if $exit_code == ETRY_AGAIN; # tell nobody, simply retry
782
783 $self->{results}->{$uid} = {
784 sid => $w->{sid},
785 state => $w->{state},
786 exit_code => $exit_code,
787 };
788
789 my $ss = $self->{service_status};
790
791 # compute hash of valid/existing uids
792 my $valid_uids = {};
793 foreach my $sid (keys %$ss) {
794 my $sd = $ss->{$sid};
795 next if !$sd->{uid};
796 $valid_uids->{$sd->{uid}} = 1;
797 }
798
799 my $results = {};
800 foreach my $id (keys %{$self->{results}}) {
801 next if !$valid_uids->{$id};
802 $results->{$id} = $self->{results}->{$id};
803 }
804 $self->{results} = $results;
805 }
806
807 # processes the exit code from a finished resource agent, so that the CRM knows
808 # if the LRM wants to retry an action based on the current recovery policies for
809 # the failed service, or the CRM itself must try to recover from the failure.
810 sub handle_service_exitcode {
811 my ($self, $sid, $cmd, $exit_code) = @_;
812
813 my $haenv = $self->{haenv};
814 my $tries = $self->{restart_tries};
815
816 my $sc = $haenv->read_service_config();
817
818 my $max_restart = 0;
819
820 if (my $cd = $sc->{$sid}) {
821 $max_restart = $cd->{max_restart};
822 }
823
824 if ($cmd eq 'started') {
825
826 if ($exit_code == SUCCESS) {
827
828 $tries->{$sid} = 0;
829
830 return $exit_code;
831
832 } elsif ($exit_code == ERROR) {
833
834 $tries->{$sid} = 0 if !defined($tries->{$sid});
835
836 if ($tries->{$sid} >= $max_restart) {
837 $haenv->log('err', "unable to start service $sid on local node".
838 " after $tries->{$sid} retries");
839 $tries->{$sid} = 0;
840 return ERROR;
841 }
842
843 $tries->{$sid}++;
844
845 $haenv->log('warning', "restart policy: retry number $tries->{$sid}" .
846 " for service '$sid'");
847 # tell CRM that we retry the start
848 return ETRY_AGAIN;
849 }
850 }
851
852 return $exit_code;
853
854 }
855
856 sub exec_resource_agent {
857 my ($self, $sid, $service_config, $cmd, $params) = @_;
858
859 # setup execution environment
860
861 $ENV{'PATH'} = '/sbin:/bin:/usr/sbin:/usr/bin';
862
863 my $haenv = $self->{haenv};
864
865 my $nodename = $haenv->nodename();
866
867 my (undef, $service_type, $service_name) = $haenv->parse_sid($sid);
868
869 my $plugin = PVE::HA::Resources->lookup($service_type);
870 if (!$plugin) {
871 $haenv->log('err', "service type '$service_type' not implemented");
872 return EUNKNOWN_SERVICE_TYPE;
873 }
874
875 if (!$service_config) {
876 $haenv->log('err', "missing resource configuration for '$sid'");
877 return EUNKNOWN_SERVICE;
878 }
879
880 # process error state early
881 if ($cmd eq 'error') {
882 $haenv->log('err', "service $sid is in an error state and needs manual " .
883 "intervention. Look up 'ERROR RECOVERY' in the documentation.");
884
885 return SUCCESS; # error always succeeds
886 }
887
888 if ($service_config->{node} ne $nodename) {
889 $haenv->log('err', "service '$sid' not on this node");
890 return EWRONG_NODE;
891 }
892
893 my $id = $service_name;
894
895 my $running = $plugin->check_running($haenv, $id);
896
897 if ($cmd eq 'started') {
898
899 return SUCCESS if $running;
900
901 $haenv->log("info", "starting service $sid");
902
903 $plugin->start($haenv, $id);
904
905 $running = $plugin->check_running($haenv, $id);
906
907 if ($running) {
908 $haenv->log("info", "service status $sid started");
909 return SUCCESS;
910 } else {
911 $haenv->log("warning", "unable to start service $sid");
912 return ERROR;
913 }
914
915 } elsif ($cmd eq 'request_stop' || $cmd eq 'stopped') {
916
917 return SUCCESS if !$running;
918
919 if (defined($params->{timeout})) {
920 $haenv->log("info", "stopping service $sid (timeout=$params->{timeout})");
921 } else {
922 $haenv->log("info", "stopping service $sid");
923 }
924
925 $plugin->shutdown($haenv, $id, $params->{timeout});
926
927 $running = $plugin->check_running($haenv, $id);
928
929 if (!$running) {
930 $haenv->log("info", "service status $sid stopped");
931 return SUCCESS;
932 } else {
933 $haenv->log("info", "unable to stop stop service $sid (still running)");
934 return ERROR;
935 }
936
937 } elsif ($cmd eq 'migrate' || $cmd eq 'relocate' || $cmd eq 'request_start_balance') {
938
939 my $target = $params->{target};
940 if (!defined($target)) {
941 die "$cmd '$sid' failed - missing target\n" if !defined($target);
942 return EINVALID_PARAMETER;
943 }
944
945 if ($service_config->{node} eq $target) {
946 # already there
947 return SUCCESS;
948 }
949
950 my $online = ($cmd eq 'migrate') ? 1 : 0;
951
952 my $res = $plugin->migrate($haenv, $id, $target, $online);
953
954 # something went wrong if service is still on this node
955 if (!$res) {
956 $haenv->log("err", "service $sid not moved (migration error)");
957 return ERROR;
958 }
959
960 return SUCCESS;
961
962 }
963
964 $haenv->log("err", "implement me (cmd '$cmd')");
965 return EUNKNOWN_COMMAND;
966 }
967
968
969 1;