]> git.proxmox.com Git - pve-ha-manager.git/blob - src/PVE/HA/LRM.pm
request start: allow to auto-rebalance on a new start request
[pve-ha-manager.git] / src / PVE / HA / LRM.pm
1 package PVE::HA::LRM;
2
3 # Local Resource Manager
4
5 use strict;
6 use warnings;
7 use POSIX qw(:sys_wait_h);
8
9 use PVE::SafeSyslog;
10 use PVE::Tools;
11 use PVE::HA::Tools ':exit_codes';
12 use PVE::HA::Resources;
13
14 # Server can have several states:
15
16 my $valid_states = {
17 wait_for_agent_lock => "waiting for agent lock",
18 active => "got agent_lock",
19 maintenance => "going into maintenance",
20 lost_agent_lock => "lost agent_lock",
21 };
22
23 # we sleep ~10s per 'active' round, so if no services is available for >= 10 min we'd go in wait
24 # state giving up the watchdog and the LRM lock voluntary, ensuring the WD can do no harm
25 my $max_active_idle_rounds = 60;
26
27 sub new {
28 my ($this, $haenv) = @_;
29
30 my $class = ref($this) || $this;
31
32 my $self = bless {
33 haenv => $haenv,
34 status => { state => 'startup' },
35 workers => {},
36 results => {},
37 restart_tries => {},
38 shutdown_request => 0,
39 shutdown_errors => 0,
40 # mode can be: active, reboot, shutdown, restart
41 mode => 'active',
42 cluster_state_update => 0,
43 active_idle_rounds => 0,
44 }, $class;
45
46 $self->set_local_status({ state => 'wait_for_agent_lock' });
47
48 return $self;
49 }
50
51 sub shutdown_request {
52 my ($self) = @_;
53
54 return if $self->{shutdown_request}; # already in shutdown mode
55
56 my $haenv = $self->{haenv};
57
58 my $nodename = $haenv->nodename();
59
60 my ($shutdown, $reboot) = $haenv->is_node_shutdown();
61
62 my $dc_cfg = $haenv->get_datacenter_settings();
63 my $shutdown_policy = $dc_cfg->{ha}->{shutdown_policy} // 'conditional';
64
65 if ($shutdown) { # don't log this on service restart, only on node shutdown
66 $haenv->log('info', "got shutdown request with shutdown policy '$shutdown_policy'");
67 }
68
69 my $freeze_all;
70 my $maintenance;
71 if ($shutdown_policy eq 'conditional') {
72 $freeze_all = $reboot;
73 } elsif ($shutdown_policy eq 'freeze') {
74 $freeze_all = 1;
75 } elsif ($shutdown_policy eq 'failover') {
76 $freeze_all = 0;
77 } elsif ($shutdown_policy eq 'migrate') {
78 $maintenance = 1;
79 } else {
80 $haenv->log('err', "unknown shutdown policy '$shutdown_policy', fall back to conditional");
81 $freeze_all = $reboot;
82 }
83
84 if ($maintenance) {
85 # we get marked as unaivalable by the manager, then all services will
86 # be migrated away, we'll still have the same "can we exit" clause than
87 # a normal shutdown -> no running service on this node
88 # FIXME: after X minutes, add shutdown command for remaining services,
89 # e.g., if they have no alternative node???
90 } elsif ($shutdown) {
91 # *always* queue stop jobs for all services if the node shuts down,
92 # independent if it's a reboot or a poweroff, else we may corrupt
93 # services or hinder node shutdown
94 my $ss = $self->{service_status};
95
96 foreach my $sid (keys %$ss) {
97 my $sd = $ss->{$sid};
98 next if !$sd->{node};
99 next if $sd->{node} ne $nodename;
100 # Note: use undef uid to mark shutdown/stop jobs
101 $self->queue_resource_command($sid, undef, 'request_stop');
102 }
103 }
104
105 if ($shutdown) {
106 my $shutdown_type = $reboot ? 'reboot' : 'shutdown';
107 if ($maintenance) {
108 $haenv->log('info', "$shutdown_type LRM, doing maintenance, removing this node from active list");
109 $self->{mode} = 'maintenance';
110 } elsif ($freeze_all) {
111 $haenv->log('info', "$shutdown_type LRM, stop and freeze all services");
112 $self->{mode} = 'restart';
113 } else {
114 $haenv->log('info', "shutdown LRM, stop all services");
115 $self->{mode} = 'shutdown';
116 }
117 } else {
118 $haenv->log('info', "restart LRM, freeze all services");
119 $self->{mode} = 'restart';
120 }
121
122 $self->{shutdown_request} = $haenv->get_time();
123
124 eval { $self->update_lrm_status() or die "not quorate?\n"; };
125 if (my $err = $@) {
126 $haenv->log('err', "unable to update lrm status file - $err");
127 }
128 }
129
130 sub get_local_status {
131 my ($self) = @_;
132
133 return $self->{status};
134 }
135
136 sub set_local_status {
137 my ($self, $new) = @_;
138
139 die "invalid state '$new->{state}'" if !$valid_states->{$new->{state}};
140
141 my $haenv = $self->{haenv};
142
143 my $old = $self->{status};
144
145 # important: only update if if really changed
146 return if $old->{state} eq $new->{state};
147
148 $haenv->log('info', "status change $old->{state} => $new->{state}");
149
150 $new->{state_change_time} = $haenv->get_time();
151
152 $self->{status} = $new;
153 }
154
155 sub update_lrm_status {
156 my ($self) = @_;
157
158 my $haenv = $self->{haenv};
159
160 return 0 if !$haenv->quorate();
161
162 my $lrm_status = {
163 state => $self->{status}->{state},
164 mode => $self->{mode},
165 results => $self->{results},
166 timestamp => $haenv->get_time(),
167 };
168
169 eval { $haenv->write_lrm_status($lrm_status); };
170 if (my $err = $@) {
171 $haenv->log('err', "unable to write lrm status file - $err");
172 return 0;
173 }
174
175 return 1;
176 }
177
178 sub update_service_status {
179 my ($self) = @_;
180
181 my $haenv = $self->{haenv};
182
183 my $ms = eval { $haenv->read_manager_status(); };
184 if (my $err = $@) {
185 $haenv->log('err', "updating service status from manager failed: $err");
186 return undef;
187 } else {
188 $self->{service_status} = $ms->{service_status} || {};
189 my $nodename = $haenv->nodename();
190 $self->{node_status} = $ms->{node_status}->{$nodename} || 'unknown';
191 return 1;
192 }
193 }
194
195 sub get_protected_ha_agent_lock {
196 my ($self) = @_;
197
198 my $haenv = $self->{haenv};
199
200 my $count = 0;
201 my $starttime = $haenv->get_time();
202
203 for (;;) {
204
205 if ($haenv->get_ha_agent_lock()) {
206 if ($self->{ha_agent_wd}) {
207 $haenv->watchdog_update($self->{ha_agent_wd});
208 } else {
209 my $wfh = $haenv->watchdog_open();
210 $self->{ha_agent_wd} = $wfh;
211 }
212 return 1;
213 }
214
215 last if ++$count > 5; # try max 5 time
216
217 my $delay = $haenv->get_time() - $starttime;
218 last if $delay > 5; # for max 5 seconds
219
220 $haenv->sleep(1);
221 }
222
223 return 0;
224 }
225
226 # only cares if any service has the local node as their node, independent of which req.state it is
227 sub has_configured_service_on_local_node {
228 my ($self) = @_;
229
230 my $haenv = $self->{haenv};
231 my $nodename = $haenv->nodename();
232
233 my $ss = $self->{service_status};
234 foreach my $sid (keys %$ss) {
235 my $sd = $ss->{$sid};
236 next if !$sd->{node} || $sd->{node} ne $nodename;
237
238 return 1;
239 }
240 return 0;
241 }
242
243 sub is_fence_requested {
244 my ($self) = @_;
245
246 my $haenv = $self->{haenv};
247
248 my $nodename = $haenv->nodename();
249 my $ss = $self->{service_status};
250
251 my $fenced_services = PVE::HA::Tools::count_fenced_services($ss, $nodename);
252
253 return $fenced_services || $self->{node_status} eq 'fence';
254 }
255
256 sub active_service_count {
257 my ($self) = @_;
258
259 my $haenv = $self->{haenv};
260 my $nodename = $haenv->nodename();
261
262 my $ss = $self->{service_status};
263
264 my $count = 0;
265 foreach my $sid (keys %$ss) {
266 my $sd = $ss->{$sid};
267 next if !$sd->{node};
268 next if $sd->{node} ne $nodename;
269 my $req_state = $sd->{state};
270 next if !defined($req_state);
271 next if $req_state eq 'stopped';
272 # NOTE: 'ignored' ones are already dropped by the manager from service_status
273 next if $req_state eq 'freeze';
274 # erroneous services are not managed by HA, don't count them as active
275 next if $req_state eq 'error';
276 # request_start is for (optional) better node selection for stop -> started transition
277 next if $req_state eq 'request_start';
278
279 $count++;
280 }
281
282 return $count;
283 }
284
285 my $wrote_lrm_status_at_startup = 0;
286
287 sub do_one_iteration {
288 my ($self) = @_;
289
290 my $haenv = $self->{haenv};
291
292 $haenv->loop_start_hook();
293
294 $self->{cluster_state_update} = $haenv->cluster_state_update();
295
296 my $res = $self->work();
297
298 $haenv->loop_end_hook();
299
300 return $res;
301 }
302
303 # NOTE: this is disabling the self-fence mechanism, so it must NOT be called with active services
304 # It's normally *only* OK on graceful shutdown (with no services, or all services frozen)
305 my sub give_up_watchdog_protection {
306 my ($self) = @_;
307
308 if ($self->{ha_agent_wd}) {
309 $self->{haenv}->watchdog_close($self->{ha_agent_wd});
310 delete $self->{ha_agent_wd}; # only delete after close!
311 }
312 }
313
314 sub work {
315 my ($self) = @_;
316
317 my $haenv = $self->{haenv};
318
319 if (!$wrote_lrm_status_at_startup) {
320 if ($self->update_lrm_status()) {
321 $wrote_lrm_status_at_startup = 1;
322 } else {
323 # do nothing
324 $haenv->sleep(5);
325 return $self->{shutdown_request} ? 0 : 1;
326 }
327 }
328
329 my $status = $self->get_local_status();
330 my $state = $status->{state};
331
332 $self->update_service_status();
333
334 my $fence_request = $self->is_fence_requested();
335
336 # do state changes first
337
338 my $ctime = $haenv->get_time();
339
340 if ($state eq 'wait_for_agent_lock') {
341
342 my $service_count = $self->active_service_count();
343
344 if (!$fence_request && $service_count && $haenv->quorate()) {
345 if ($self->get_protected_ha_agent_lock()) {
346 $self->set_local_status({ state => 'active' });
347 }
348 }
349
350 } elsif ($state eq 'lost_agent_lock') {
351
352 if (!$fence_request && $haenv->quorate()) {
353 if ($self->get_protected_ha_agent_lock()) {
354 $self->set_local_status({ state => 'active' });
355 }
356 }
357
358 } elsif ($state eq 'active') {
359
360 if ($fence_request) {
361 $haenv->log('err', "node need to be fenced - releasing agent_lock\n");
362 $self->set_local_status({ state => 'lost_agent_lock'});
363 } elsif (!$self->get_protected_ha_agent_lock()) {
364 $self->set_local_status({ state => 'lost_agent_lock'});
365 } elsif ($self->{mode} eq 'maintenance') {
366 $self->set_local_status({ state => 'maintenance'});
367 } else {
368 if (!$self->has_configured_service_on_local_node() && !$self->run_workers()) {
369 # no active service configured for this node and all (old) workers are done
370 $self->{active_idle_rounds}++;
371 if ($self->{active_idle_rounds} > $max_active_idle_rounds) {
372 $haenv->log('info', "node had no service configured for $max_active_idle_rounds rounds, going idle.\n");
373 # safety: no active service & no running worker for quite some time -> OK
374 $haenv->release_ha_agent_lock();
375 give_up_watchdog_protection($self);
376 $self->set_local_status({ state => 'wait_for_agent_lock'});
377 $self->{active_idle_rounds} = 0;
378 }
379 } elsif ($self->{active_idle_rounds}) {
380 $self->{active_idle_rounds} = 0;
381 }
382 }
383 } elsif ($state eq 'maintenance') {
384
385 if ($fence_request) {
386 $haenv->log('err', "node need to be fenced during maintenance mode - releasing agent_lock\n");
387 $self->set_local_status({ state => 'lost_agent_lock'});
388 } elsif (!$self->get_protected_ha_agent_lock()) {
389 $self->set_local_status({ state => 'lost_agent_lock'});
390 }
391 }
392
393 $status = $self->get_local_status();
394 $state = $status->{state};
395
396 # do work
397
398 if ($state eq 'wait_for_agent_lock') {
399
400 return 0 if $self->{shutdown_request};
401
402 $self->update_lrm_status();
403
404 $haenv->sleep(5);
405
406 } elsif ($state eq 'active') {
407
408 my $startime = $haenv->get_time();
409
410 my $max_time = 10;
411
412 my $shutdown = 0;
413
414 # do work (max_time seconds)
415 eval {
416 # fixme: set alert timer
417
418 # if we could not get the current service status there's no point
419 # in doing anything, try again next round.
420 return if !$self->update_service_status();
421
422 if ($self->{shutdown_request}) {
423
424 if ($self->{mode} eq 'restart') {
425 # catch exited workers to update service state
426 my $workers = $self->run_workers();
427 my $service_count = $self->active_service_count();
428
429 if ($service_count == 0 && $workers == 0) {
430 # safety: no active services or workers -> OK
431 give_up_watchdog_protection($self);
432 $shutdown = 1;
433
434 # restart with no or freezed services, release the lock
435 $haenv->release_ha_agent_lock();
436 }
437 } else {
438
439 if ($self->run_workers() == 0) {
440 if ($self->{shutdown_errors} == 0) {
441 # safety: no active services and LRM shutdown -> OK
442 give_up_watchdog_protection($self);
443
444 # shutdown with all services stopped thus release the lock
445 $haenv->release_ha_agent_lock();
446 }
447
448 $shutdown = 1;
449 }
450 }
451 } else {
452 if (!$self->{cluster_state_update}) {
453 # update failed but we could still renew our lock (cfs restart?),
454 # safely skip manage and expect to update just fine next round
455 $haenv->log('notice', "temporary inconsistent cluster state " .
456 "(cfs restart?), skip round");
457 return;
458 }
459
460 $self->manage_resources();
461
462 }
463 };
464 if (my $err = $@) {
465 $haenv->log('err', "got unexpected error - $err");
466 }
467
468 $self->update_lrm_status();
469
470 return 0 if $shutdown;
471
472 $haenv->sleep_until($startime + $max_time);
473
474 } elsif ($state eq 'lost_agent_lock') {
475
476 # NOTE: watchdog is active an will trigger soon!
477 # so we hope to get the lock back soon!
478 if ($self->{shutdown_request}) {
479
480 my $service_count = $self->active_service_count();
481
482 if ($service_count > 0) {
483 $haenv->log('err', "get shutdown request in state 'lost_agent_lock' - " .
484 "detected $service_count running services");
485
486 if ($self->{mode} eq 'restart') {
487 my $state_mt = $self->{status}->{state_change_time};
488
489 # watchdog should have already triggered, so either it's set
490 # set to noboot or it failed. As we are in restart mode, and
491 # have infinity stoptimeout -> exit now - we don't touch services
492 # or change state, so this is save, relatively speaking
493 if (($haenv->get_time() - $state_mt) > 90) {
494 $haenv->log('err', "lost agent lock and restart request for over 90 seconds - giving up!");
495 return 0;
496 }
497 }
498 } else {
499 # safety: all services are stopped, so we can close the watchdog
500 give_up_watchdog_protection($self);
501
502 return 0;
503 }
504 }
505
506 $haenv->sleep(5);
507
508 } elsif ($state eq 'maintenance') {
509
510 my $startime = $haenv->get_time();
511 return if !$self->update_service_status();
512
513 # wait until all active services moved away
514 my $service_count = $self->active_service_count();
515
516 my $exit_lrm = 0;
517
518 if ($self->{shutdown_request}) {
519 if ($service_count == 0 && $self->run_workers() == 0) {
520 # safety: going into maintenance and all active services got moved -> OK
521 give_up_watchdog_protection($self);
522
523 $exit_lrm = 1;
524
525 # restart with no or freezed services, release the lock
526 $haenv->release_ha_agent_lock();
527 }
528 }
529
530 $self->manage_resources() if !$exit_lrm;
531
532 $self->update_lrm_status();
533
534 return 0 if $exit_lrm;
535
536 $haenv->sleep_until($startime + 5);
537
538 } else {
539
540 die "got unexpected status '$state'\n";
541
542 }
543
544 return 1;
545 }
546
547 sub run_workers {
548 my ($self) = @_;
549
550 my $haenv = $self->{haenv};
551
552 my $starttime = $haenv->get_time();
553
554 # number of workers to start, if 0 we exec the command directly witouth forking
555 my $max_workers = $haenv->get_max_workers();
556 my $sc = $haenv->read_service_config();
557
558 my $worker = $self->{workers};
559 # we only got limited time but want to ensure that every queued worker is scheduled
560 # eventually, so sort by the count a worker was seen here in this loop
561 my $fair_sorter = sub {
562 $worker->{$b}->{start_tries} <=> $worker->{$a}->{start_tries} || $a cmp $b
563 };
564
565 while (($haenv->get_time() - $starttime) <= 8) {
566 my $count = $self->check_active_workers();
567
568 for my $sid (sort $fair_sorter grep { !$worker->{$_}->{pid} } keys %$worker) {
569 my $w = $worker->{$sid};
570 # higher try-count means higher priority especially compared to newly queued jobs, so
571 # count every try to avoid starvation
572 $w->{start_tries}++;
573 # FIXME: should be last and ensure that check_active_workers is called sooner
574 next if $count >= $max_workers && $max_workers > 0;
575
576 # only fork if we may, else call exec_resource_agent directly (e.g. for tests)
577 if ($max_workers > 0) {
578 my $pid = fork();
579 if (!defined($pid)) {
580 $haenv->log('err', "forking worker failed - $!");
581 $count = 0; last; # abort, try later
582 } elsif ($pid == 0) {
583 $haenv->after_fork(); # cleanup
584
585 # do work
586 my $res = -1;
587 eval {
588 $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{params});
589 };
590 if (my $err = $@) {
591 $haenv->log('err', $err);
592 POSIX::_exit(-1);
593 }
594 POSIX::_exit($res);
595 } else {
596 $count++;
597 $w->{pid} = $pid;
598 }
599 } else {
600 my $res = -1;
601 eval {
602 $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{params});
603 $res = $res << 8 if $res > 0;
604 };
605 if (my $err = $@) {
606 $haenv->log('err', $err);
607 }
608 if (defined($w->{uid})) {
609 $self->resource_command_finished($sid, $w->{uid}, $res);
610 } else {
611 $self->stop_command_finished($sid, $res);
612 }
613 }
614 }
615
616 last if !$count;
617
618 $haenv->sleep(1);
619 }
620
621 return scalar(keys %{$self->{workers}});
622 }
623
624 sub manage_resources {
625 my ($self) = @_;
626
627 my $haenv = $self->{haenv};
628
629 my $nodename = $haenv->nodename();
630
631 my $ss = $self->{service_status};
632
633 foreach my $sid (keys %{$self->{restart_tries}}) {
634 delete $self->{restart_tries}->{$sid} if !$ss->{$sid};
635 }
636
637 foreach my $sid (keys %$ss) {
638 my $sd = $ss->{$sid};
639 next if !$sd->{node} || !$sd->{uid};
640 next if $sd->{node} ne $nodename;
641 my $request_state = $sd->{state};
642 next if !defined($request_state);
643 # can only happen for restricted groups where the failed node itself needs to be the
644 # reocvery target. Always let the master first do so, it will then marked as 'stopped' and
645 # we can just continue normally. But we must NOT do anything with it while still in recovery
646 next if $request_state eq 'recovery';
647 next if $request_state eq 'freeze';
648 # intermediate step for optional better node selection on stop -> start request state change
649 next if $request_state eq 'request_start';
650
651 $self->queue_resource_command($sid, $sd->{uid}, $request_state, {
652 'target' => $sd->{target},
653 'timeout' => $sd->{timeout},
654 });
655 }
656
657 return $self->run_workers();
658 }
659
660 sub queue_resource_command {
661 my ($self, $sid, $uid, $state, $params) = @_;
662
663 # do not queue the exact same command twice as this may lead to an inconsistent HA state when
664 # the first command fails but the CRM does not process its failure right away and the LRM starts
665 # a second try, without the CRM knowing of it (race condition) The 'stopped' command is an
666 # exception as we do not process its result in the CRM and we want to execute it always (even
667 # with no active CRM)
668 return if $state ne 'stopped' && $uid && defined($self->{results}->{$uid});
669
670 if (my $w = $self->{workers}->{$sid}) {
671 return if $w->{pid}; # already started
672 # else, delete and overwrite queue entry with new command
673 delete $self->{workers}->{$sid};
674 }
675
676 $self->{workers}->{$sid} = {
677 sid => $sid,
678 uid => $uid,
679 state => $state,
680 start_tries => 0,
681 };
682
683 $self->{workers}->{$sid}->{params} = $params if $params;
684 }
685
686 sub check_active_workers {
687 my ($self) = @_;
688
689 # finish/count workers
690 my $count = 0;
691 foreach my $sid (keys %{$self->{workers}}) {
692 my $w = $self->{workers}->{$sid};
693 my $pid = $w->{pid} || next;
694
695 my $waitpid = waitpid($pid, WNOHANG); # check status
696 if (defined($waitpid) && ($waitpid == $pid)) {
697 if (defined($w->{uid})) {
698 $self->resource_command_finished($sid, $w->{uid}, $?);
699 } else {
700 $self->stop_command_finished($sid, $?);
701 }
702 } else {
703 $count++; # still active
704 }
705 }
706
707 return $count;
708 }
709
710 sub stop_command_finished {
711 my ($self, $sid, $status) = @_;
712
713 my $haenv = $self->{haenv};
714
715 my $w = delete $self->{workers}->{$sid};
716 return if !$w; # should not happen
717
718 my $exit_code = -1;
719
720 if ($status == -1) {
721 $haenv->log('err', "resource agent $sid finished - failed to execute");
722 } elsif (my $sig = ($status & 127)) {
723 $haenv->log('err', "resource agent $sid finished - got signal $sig");
724 } else {
725 $exit_code = ($status >> 8);
726 }
727
728 if ($exit_code != 0) {
729 $self->{shutdown_errors}++;
730 }
731 }
732
733 sub resource_command_finished {
734 my ($self, $sid, $uid, $status) = @_;
735
736 my $haenv = $self->{haenv};
737
738 my $w = delete $self->{workers}->{$sid};
739 return if !$w; # should not happen
740
741 my $exit_code = -1;
742
743 if ($status == -1) {
744 $haenv->log('err', "resource agent $sid finished - failed to execute");
745 } elsif (my $sig = ($status & 127)) {
746 $haenv->log('err', "resource agent $sid finished - got signal $sig");
747 } else {
748 $exit_code = ($status >> 8);
749 }
750
751 $exit_code = $self->handle_service_exitcode($sid, $w->{state}, $exit_code);
752
753 return if $exit_code == ETRY_AGAIN; # tell nobody, simply retry
754
755 $self->{results}->{$uid} = {
756 sid => $w->{sid},
757 state => $w->{state},
758 exit_code => $exit_code,
759 };
760
761 my $ss = $self->{service_status};
762
763 # compute hash of valid/existing uids
764 my $valid_uids = {};
765 foreach my $sid (keys %$ss) {
766 my $sd = $ss->{$sid};
767 next if !$sd->{uid};
768 $valid_uids->{$sd->{uid}} = 1;
769 }
770
771 my $results = {};
772 foreach my $id (keys %{$self->{results}}) {
773 next if !$valid_uids->{$id};
774 $results->{$id} = $self->{results}->{$id};
775 }
776 $self->{results} = $results;
777 }
778
779 # processes the exit code from a finished resource agent, so that the CRM knows
780 # if the LRM wants to retry an action based on the current recovery policies for
781 # the failed service, or the CRM itself must try to recover from the failure.
782 sub handle_service_exitcode {
783 my ($self, $sid, $cmd, $exit_code) = @_;
784
785 my $haenv = $self->{haenv};
786 my $tries = $self->{restart_tries};
787
788 my $sc = $haenv->read_service_config();
789
790 my $max_restart = 0;
791
792 if (my $cd = $sc->{$sid}) {
793 $max_restart = $cd->{max_restart};
794 }
795
796 if ($cmd eq 'started') {
797
798 if ($exit_code == SUCCESS) {
799
800 $tries->{$sid} = 0;
801
802 return $exit_code;
803
804 } elsif ($exit_code == ERROR) {
805
806 $tries->{$sid} = 0 if !defined($tries->{$sid});
807
808 if ($tries->{$sid} >= $max_restart) {
809 $haenv->log('err', "unable to start service $sid on local node".
810 " after $tries->{$sid} retries");
811 $tries->{$sid} = 0;
812 return ERROR;
813 }
814
815 $tries->{$sid}++;
816
817 $haenv->log('warning', "restart policy: retry number $tries->{$sid}" .
818 " for service '$sid'");
819 # tell CRM that we retry the start
820 return ETRY_AGAIN;
821 }
822 }
823
824 return $exit_code;
825
826 }
827
828 sub exec_resource_agent {
829 my ($self, $sid, $service_config, $cmd, $params) = @_;
830
831 # setup execution environment
832
833 $ENV{'PATH'} = '/sbin:/bin:/usr/sbin:/usr/bin';
834
835 my $haenv = $self->{haenv};
836
837 my $nodename = $haenv->nodename();
838
839 my (undef, $service_type, $service_name) = $haenv->parse_sid($sid);
840
841 my $plugin = PVE::HA::Resources->lookup($service_type);
842 if (!$plugin) {
843 $haenv->log('err', "service type '$service_type' not implemented");
844 return EUNKNOWN_SERVICE_TYPE;
845 }
846
847 if (!$service_config) {
848 $haenv->log('err', "missing resource configuration for '$sid'");
849 return EUNKNOWN_SERVICE;
850 }
851
852 # process error state early
853 if ($cmd eq 'error') {
854 $haenv->log('err', "service $sid is in an error state and needs manual " .
855 "intervention. Look up 'ERROR RECOVERY' in the documentation.");
856
857 return SUCCESS; # error always succeeds
858 }
859
860 if ($service_config->{node} ne $nodename) {
861 $haenv->log('err', "service '$sid' not on this node");
862 return EWRONG_NODE;
863 }
864
865 my $id = $service_name;
866
867 my $running = $plugin->check_running($haenv, $id);
868
869 if ($cmd eq 'started') {
870
871 return SUCCESS if $running;
872
873 $haenv->log("info", "starting service $sid");
874
875 $plugin->start($haenv, $id);
876
877 $running = $plugin->check_running($haenv, $id);
878
879 if ($running) {
880 $haenv->log("info", "service status $sid started");
881 return SUCCESS;
882 } else {
883 $haenv->log("warning", "unable to start service $sid");
884 return ERROR;
885 }
886
887 } elsif ($cmd eq 'request_stop' || $cmd eq 'stopped') {
888
889 return SUCCESS if !$running;
890
891 if (defined($params->{timeout})) {
892 $haenv->log("info", "stopping service $sid (timeout=$params->{timeout})");
893 } else {
894 $haenv->log("info", "stopping service $sid");
895 }
896
897 $plugin->shutdown($haenv, $id, $params->{timeout});
898
899 $running = $plugin->check_running($haenv, $id);
900
901 if (!$running) {
902 $haenv->log("info", "service status $sid stopped");
903 return SUCCESS;
904 } else {
905 $haenv->log("info", "unable to stop stop service $sid (still running)");
906 return ERROR;
907 }
908
909 } elsif ($cmd eq 'migrate' || $cmd eq 'relocate' || $cmd eq 'request_start_balance') {
910
911 my $target = $params->{target};
912 if (!defined($target)) {
913 die "$cmd '$sid' failed - missing target\n" if !defined($target);
914 return EINVALID_PARAMETER;
915 }
916
917 if ($service_config->{node} eq $target) {
918 # already there
919 return SUCCESS;
920 }
921
922 my $online = ($cmd eq 'migrate') ? 1 : 0;
923
924 my $res = $plugin->migrate($haenv, $id, $target, $online);
925
926 # something went wrong if service is still on this node
927 if (!$res) {
928 $haenv->log("err", "service $sid not moved (migration error)");
929 return ERROR;
930 }
931
932 return SUCCESS;
933
934 }
935
936 $haenv->log("err", "implement me (cmd '$cmd')");
937 return EUNKNOWN_COMMAND;
938 }
939
940
941 1;