]> git.proxmox.com Git - pve-ha-manager.git/blame - src/PVE/HA/LRM.pm
env: datacenter config: include crs (cluster-resource-scheduling) setting
[pve-ha-manager.git] / src / PVE / HA / LRM.pm
CommitLineData
5f095798
DM
1package PVE::HA::LRM;
2
3# Local Resource Manager
4
5use strict;
6use warnings;
c4a221bc 7use POSIX qw(:sys_wait_h);
5f095798
DM
8
9use PVE::SafeSyslog;
10use PVE::Tools;
a89ff919 11use PVE::HA::Tools ':exit_codes';
2a045f55 12use PVE::HA::Resources;
5f095798
DM
13
14# Server can have several states:
15
16my $valid_states = {
ec911edd 17 wait_for_agent_lock => "waiting for agent lock",
0bba8f60 18 active => "got agent_lock",
99278e06 19 maintenance => "going into maintenance",
5f095798
DM
20 lost_agent_lock => "lost agent_lock",
21};
22
21051707 23# we sleep ~10s per 'active' round, so if no services is available for >= 10 min we'd go in wait
4ee32601 24# state giving up the watchdog and the LRM lock voluntary, ensuring the WD can do no harm
21051707
TL
25my $max_active_idle_rounds = 60;
26
5f095798
DM
27sub new {
28 my ($this, $haenv) = @_;
29
30 my $class = ref($this) || $this;
31
32 my $self = bless {
33 haenv => $haenv,
34 status => { state => 'startup' },
c4a221bc
DM
35 workers => {},
36 results => {},
ea4443cc 37 restart_tries => {},
067cdf33 38 shutdown_request => 0,
116dea30 39 shutdown_errors => 0,
9c7d068b
DM
40 # mode can be: active, reboot, shutdown, restart
41 mode => 'active',
3df15380 42 cluster_state_update => 0,
21051707 43 active_idle_rounds => 0,
5f095798
DM
44 }, $class;
45
289e4784 46 $self->set_local_status({ state => 'wait_for_agent_lock' });
9c7d068b 47
5f095798
DM
48 return $self;
49}
50
51sub shutdown_request {
52 my ($self) = @_;
53
f1be5b3a
DM
54 return if $self->{shutdown_request}; # already in shutdown mode
55
499f06e3
DM
56 my $haenv = $self->{haenv};
57
116dea30
DM
58 my $nodename = $haenv->nodename();
59
f65f41b9 60 my ($shutdown, $reboot) = $haenv->is_node_shutdown();
499f06e3 61
7c142d68
FE
62 my $dc_cfg = $haenv->get_datacenter_settings();
63 my $shutdown_policy = $dc_cfg->{ha}->{shutdown_policy} // 'conditional';
ba15a9b9 64
7a20d688
TL
65 if ($shutdown) { # don't log this on service restart, only on node shutdown
66 $haenv->log('info', "got shutdown request with shutdown policy '$shutdown_policy'");
67 }
68
d2236278 69 my $freeze_all;
99278e06 70 my $maintenance;
ba15a9b9
TL
71 if ($shutdown_policy eq 'conditional') {
72 $freeze_all = $reboot;
73 } elsif ($shutdown_policy eq 'freeze') {
74 $freeze_all = 1;
75 } elsif ($shutdown_policy eq 'failover') {
76 $freeze_all = 0;
99278e06
TL
77 } elsif ($shutdown_policy eq 'migrate') {
78 $maintenance = 1;
ba15a9b9 79 } else {
d2236278
TL
80 $haenv->log('err', "unknown shutdown policy '$shutdown_policy', fall back to conditional");
81 $freeze_all = $reboot;
ba15a9b9
TL
82 }
83
99278e06
TL
84 if ($maintenance) {
85 # we get marked as unaivalable by the manager, then all services will
86 # be migrated away, we'll still have the same "can we exit" clause than
87 # a normal shutdown -> no running service on this node
88 # FIXME: after X minutes, add shutdown command for remaining services,
89 # e.g., if they have no alternative node???
90 } elsif ($shutdown) {
f65f41b9
TL
91 # *always* queue stop jobs for all services if the node shuts down,
92 # independent if it's a reboot or a poweroff, else we may corrupt
93 # services or hinder node shutdown
116dea30
DM
94 my $ss = $self->{service_status};
95
96 foreach my $sid (keys %$ss) {
97 my $sd = $ss->{$sid};
98 next if !$sd->{node};
99 next if $sd->{node} ne $nodename;
c0edbd7e 100 # Note: use undef uid to mark shutdown/stop jobs
116dea30
DM
101 $self->queue_resource_command($sid, undef, 'request_stop');
102 }
f65f41b9 103 }
116dea30 104
f65f41b9 105 if ($shutdown) {
41236dcf 106 my $shutdown_type = $reboot ? 'reboot' : 'shutdown';
99278e06
TL
107 if ($maintenance) {
108 $haenv->log('info', "$shutdown_type LRM, doing maintenance, removing this node from active list");
109 $self->{mode} = 'maintenance';
110 } elsif ($freeze_all) {
41236dcf 111 $haenv->log('info', "$shutdown_type LRM, stop and freeze all services");
f65f41b9
TL
112 $self->{mode} = 'restart';
113 } else {
114 $haenv->log('info', "shutdown LRM, stop all services");
115 $self->{mode} = 'shutdown';
116 }
499f06e3
DM
117 } else {
118 $haenv->log('info', "restart LRM, freeze all services");
119 $self->{mode} = 'restart';
120 }
9c7d068b 121
99278e06 122 $self->{shutdown_request} = $haenv->get_time();
9c7d068b 123
a19f2576 124 eval { $self->update_lrm_status() or die "not quorate?\n"; };
9c7d068b 125 if (my $err = $@) {
a31c6fe5 126 $haenv->log('err', "unable to update lrm status file - $err");
9c7d068b 127 }
5f095798
DM
128}
129
130sub get_local_status {
131 my ($self) = @_;
132
133 return $self->{status};
134}
135
136sub set_local_status {
137 my ($self, $new) = @_;
138
139 die "invalid state '$new->{state}'" if !$valid_states->{$new->{state}};
140
141 my $haenv = $self->{haenv};
142
143 my $old = $self->{status};
144
289e4784 145 # important: only update if if really changed
5f095798
DM
146 return if $old->{state} eq $new->{state};
147
0bba8f60 148 $haenv->log('info', "status change $old->{state} => $new->{state}");
5f095798
DM
149
150 $new->{state_change_time} = $haenv->get_time();
151
152 $self->{status} = $new;
153}
154
9c7d068b
DM
155sub update_lrm_status {
156 my ($self) = @_;
157
5bd7aa54
DM
158 my $haenv = $self->{haenv};
159
79829202 160 return 0 if !$haenv->quorate();
289e4784
TL
161
162 my $lrm_status = {
331a9f00 163 state => $self->{status}->{state},
9c7d068b
DM
164 mode => $self->{mode},
165 results => $self->{results},
aa330d1c 166 timestamp => $haenv->get_time(),
9c7d068b 167 };
289e4784 168
5bd7aa54
DM
169 eval { $haenv->write_lrm_status($lrm_status); };
170 if (my $err = $@) {
171 $haenv->log('err', "unable to write lrm status file - $err");
172 return 0;
173 }
174
175 return 1;
9c7d068b
DM
176}
177
8e940b68
TL
178sub update_service_status {
179 my ($self) = @_;
180
181 my $haenv = $self->{haenv};
182
183 my $ms = eval { $haenv->read_manager_status(); };
184 if (my $err = $@) {
185 $haenv->log('err', "updating service status from manager failed: $err");
186 return undef;
187 } else {
188 $self->{service_status} = $ms->{service_status} || {};
30fc7cee
TL
189 my $nodename = $haenv->nodename();
190 $self->{node_status} = $ms->{node_status}->{$nodename} || 'unknown';
8e940b68
TL
191 return 1;
192 }
193}
194
5f095798
DM
195sub get_protected_ha_agent_lock {
196 my ($self) = @_;
197
198 my $haenv = $self->{haenv};
199
200 my $count = 0;
201 my $starttime = $haenv->get_time();
202
203 for (;;) {
289e4784 204
5f095798
DM
205 if ($haenv->get_ha_agent_lock()) {
206 if ($self->{ha_agent_wd}) {
207 $haenv->watchdog_update($self->{ha_agent_wd});
208 } else {
209 my $wfh = $haenv->watchdog_open();
210 $self->{ha_agent_wd} = $wfh;
211 }
212 return 1;
213 }
289e4784 214
5f095798
DM
215 last if ++$count > 5; # try max 5 time
216
217 my $delay = $haenv->get_time() - $starttime;
218 last if $delay > 5; # for max 5 seconds
219
220 $haenv->sleep(1);
221 }
289e4784 222
5f095798
DM
223 return 0;
224}
225
21051707
TL
226# only cares if any service has the local node as their node, independent of which req.state it is
227sub has_configured_service_on_local_node {
228 my ($self) = @_;
229
230 my $haenv = $self->{haenv};
231 my $nodename = $haenv->nodename();
232
233 my $ss = $self->{service_status};
234 foreach my $sid (keys %$ss) {
235 my $sd = $ss->{$sid};
236 next if !$sd->{node} || $sd->{node} ne $nodename;
237
238 return 1;
239 }
240 return 0;
241}
242
303490d8
TL
243sub is_fence_requested {
244 my ($self) = @_;
245
246 my $haenv = $self->{haenv};
30fc7cee 247
303490d8
TL
248 my $nodename = $haenv->nodename();
249 my $ss = $self->{service_status};
250
251 my $fenced_services = PVE::HA::Tools::count_fenced_services($ss, $nodename);
252
30fc7cee 253 return $fenced_services || $self->{node_status} eq 'fence';
303490d8
TL
254}
255
546e2f1f
DM
256sub active_service_count {
257 my ($self) = @_;
289e4784 258
546e2f1f 259 my $haenv = $self->{haenv};
546e2f1f
DM
260 my $nodename = $haenv->nodename();
261
262 my $ss = $self->{service_status};
263
264 my $count = 0;
546e2f1f
DM
265 foreach my $sid (keys %$ss) {
266 my $sd = $ss->{$sid};
267 next if !$sd->{node};
268 next if $sd->{node} ne $nodename;
269 my $req_state = $sd->{state};
270 next if !defined($req_state);
271 next if $req_state eq 'stopped';
d54c04bd 272 # NOTE: 'ignored' ones are already dropped by the manager from service_status
9c7d068b 273 next if $req_state eq 'freeze';
38545741
TL
274 # erroneous services are not managed by HA, don't count them as active
275 next if $req_state eq 'error';
546e2f1f
DM
276
277 $count++;
278 }
289e4784 279
546e2f1f
DM
280 return $count;
281}
5bd7aa54
DM
282
283my $wrote_lrm_status_at_startup = 0;
284
5f095798
DM
285sub do_one_iteration {
286 my ($self) = @_;
287
288 my $haenv = $self->{haenv};
289
da6f0416
TL
290 $haenv->loop_start_hook();
291
3df15380
TL
292 $self->{cluster_state_update} = $haenv->cluster_state_update();
293
da6f0416
TL
294 my $res = $self->work();
295
296 $haenv->loop_end_hook();
297
298 return $res;
299}
300
abc1499b
TL
301# NOTE: this is disabling the self-fence mechanism, so it must NOT be called with active services
302# It's normally *only* OK on graceful shutdown (with no services, or all services frozen)
303my sub give_up_watchdog_protection {
304 my ($self) = @_;
305
306 if ($self->{ha_agent_wd}) {
307 $self->{haenv}->watchdog_close($self->{ha_agent_wd});
308 delete $self->{ha_agent_wd}; # only delete after close!
309 }
310}
311
da6f0416
TL
312sub work {
313 my ($self) = @_;
314
315 my $haenv = $self->{haenv};
316
c5ec095f 317 if (!$wrote_lrm_status_at_startup) {
79829202 318 if ($self->update_lrm_status()) {
c5ec095f
DM
319 $wrote_lrm_status_at_startup = 1;
320 } else {
321 # do nothing
322 $haenv->sleep(5);
323 return $self->{shutdown_request} ? 0 : 1;
324 }
5bd7aa54 325 }
289e4784 326
5f095798
DM
327 my $status = $self->get_local_status();
328 my $state = $status->{state};
329
8e940b68 330 $self->update_service_status();
067cdf33 331
303490d8 332 my $fence_request = $self->is_fence_requested();
289e4784
TL
333
334 # do state changes first
5f095798
DM
335
336 my $ctime = $haenv->get_time();
337
b0bf08a9 338 if ($state eq 'wait_for_agent_lock') {
5f095798 339
546e2f1f 340 my $service_count = $self->active_service_count();
5f095798 341
067cdf33 342 if (!$fence_request && $service_count && $haenv->quorate()) {
0bba8f60
DM
343 if ($self->get_protected_ha_agent_lock()) {
344 $self->set_local_status({ state => 'active' });
5f095798
DM
345 }
346 }
289e4784 347
5f095798
DM
348 } elsif ($state eq 'lost_agent_lock') {
349
067cdf33 350 if (!$fence_request && $haenv->quorate()) {
0bba8f60
DM
351 if ($self->get_protected_ha_agent_lock()) {
352 $self->set_local_status({ state => 'active' });
5f095798
DM
353 }
354 }
355
0bba8f60 356 } elsif ($state eq 'active') {
5f095798 357
289e4784 358 if ($fence_request) {
067cdf33 359 $haenv->log('err', "node need to be fenced - releasing agent_lock\n");
289e4784 360 $self->set_local_status({ state => 'lost_agent_lock'});
067cdf33 361 } elsif (!$self->get_protected_ha_agent_lock()) {
5f095798 362 $self->set_local_status({ state => 'lost_agent_lock'});
99278e06
TL
363 } elsif ($self->{mode} eq 'maintenance') {
364 $self->set_local_status({ state => 'maintenance'});
21051707
TL
365 } else {
366 if (!$self->has_configured_service_on_local_node() && !$self->run_workers()) {
367 # no active service configured for this node and all (old) workers are done
368 $self->{active_idle_rounds}++;
369 if ($self->{active_idle_rounds} > $max_active_idle_rounds) {
370 $haenv->log('info', "node had no service configured for $max_active_idle_rounds rounds, going idle.\n");
371 # safety: no active service & no running worker for quite some time -> OK
372 $haenv->release_ha_agent_lock();
373 give_up_watchdog_protection($self);
374 $self->set_local_status({ state => 'wait_for_agent_lock'});
375 $self->{active_idle_rounds} = 0;
376 }
377 } elsif ($self->{active_idle_rounds}) {
378 $self->{active_idle_rounds} = 0;
379 }
99278e06
TL
380 }
381 } elsif ($state eq 'maintenance') {
382
383 if ($fence_request) {
384 $haenv->log('err', "node need to be fenced during maintenance mode - releasing agent_lock\n");
385 $self->set_local_status({ state => 'lost_agent_lock'});
386 } elsif (!$self->get_protected_ha_agent_lock()) {
387 $self->set_local_status({ state => 'lost_agent_lock'});
5f095798
DM
388 }
389 }
390
391 $status = $self->get_local_status();
392 $state = $status->{state};
393
394 # do work
395
396 if ($state eq 'wait_for_agent_lock') {
397
398 return 0 if $self->{shutdown_request};
289e4784 399
79829202 400 $self->update_lrm_status();
289e4784 401
5f095798 402 $haenv->sleep(5);
289e4784 403
0bba8f60 404 } elsif ($state eq 'active') {
5f095798
DM
405
406 my $startime = $haenv->get_time();
407
408 my $max_time = 10;
409
410 my $shutdown = 0;
411
412 # do work (max_time seconds)
413 eval {
414 # fixme: set alert timer
415
8e940b68
TL
416 # if we could not get the current service status there's no point
417 # in doing anything, try again next round.
418 return if !$self->update_service_status();
419
5f095798
DM
420 if ($self->{shutdown_request}) {
421
499f06e3 422 if ($self->{mode} eq 'restart') {
ad645699
FG
423 # catch exited workers to update service state
424 my $workers = $self->run_workers();
499f06e3 425 my $service_count = $self->active_service_count();
5f095798 426
ad645699
FG
427 if ($service_count == 0 && $workers == 0) {
428 # safety: no active services or workers -> OK
429 give_up_watchdog_protection($self);
430 $shutdown = 1;
e23f674c 431
ad645699
FG
432 # restart with no or freezed services, release the lock
433 $haenv->release_ha_agent_lock();
116dea30
DM
434 }
435 } else {
436
437 if ($self->run_workers() == 0) {
438 if ($self->{shutdown_errors} == 0) {
abc1499b
TL
439 # safety: no active services and LRM shutdown -> OK
440 give_up_watchdog_protection($self);
0e5b1a43
TL
441
442 # shutdown with all services stopped thus release the lock
443 $haenv->release_ha_agent_lock();
499f06e3 444 }
5f095798 445
499f06e3
DM
446 $shutdown = 1;
447 }
5f095798 448 }
c4a221bc 449 } else {
724bd3f3
TL
450 if (!$self->{cluster_state_update}) {
451 # update failed but we could still renew our lock (cfs restart?),
452 # safely skip manage and expect to update just fine next round
453 $haenv->log('notice', "temporary inconsistent cluster state " .
454 "(cfs restart?), skip round");
455 return;
456 }
c4a221bc
DM
457
458 $self->manage_resources();
067cdf33 459
5f095798
DM
460 }
461 };
462 if (my $err = $@) {
463 $haenv->log('err', "got unexpected error - $err");
464 }
465
79829202 466 $self->update_lrm_status();
289e4784 467
5f095798
DM
468 return 0 if $shutdown;
469
470 $haenv->sleep_until($startime + $max_time);
471
472 } elsif ($state eq 'lost_agent_lock') {
289e4784 473
abc1499b 474 # NOTE: watchdog is active an will trigger soon!
5f095798 475 # so we hope to get the lock back soon!
5f095798
DM
476 if ($self->{shutdown_request}) {
477
546e2f1f 478 my $service_count = $self->active_service_count();
5f095798 479
546e2f1f 480 if ($service_count > 0) {
289e4784 481 $haenv->log('err', "get shutdown request in state 'lost_agent_lock' - " .
546e2f1f 482 "detected $service_count running services");
5f095798 483
c5c7faf6
TL
484 if ($self->{mode} eq 'restart') {
485 my $state_mt = $self->{status}->{state_change_time};
486
487 # watchdog should have already triggered, so either it's set
488 # set to noboot or it failed. As we are in restart mode, and
489 # have infinity stoptimeout -> exit now - we don't touch services
490 # or change state, so this is save, relatively speaking
491 if (($haenv->get_time() - $state_mt) > 90) {
492 $haenv->log('err', "lost agent lock and restart request for over 90 seconds - giving up!");
493 return 0;
494 }
495 }
546e2f1f 496 } else {
abc1499b
TL
497 # safety: all services are stopped, so we can close the watchdog
498 give_up_watchdog_protection($self);
289e4784 499
546e2f1f 500 return 0;
5f095798 501 }
5f095798
DM
502 }
503
b0bf08a9
DM
504 $haenv->sleep(5);
505
99278e06
TL
506 } elsif ($state eq 'maintenance') {
507
508 my $startime = $haenv->get_time();
509 return if !$self->update_service_status();
510
511 # wait until all active services moved away
512 my $service_count = $self->active_service_count();
513
514 my $exit_lrm = 0;
515
516 if ($self->{shutdown_request}) {
517 if ($service_count == 0 && $self->run_workers() == 0) {
abc1499b
TL
518 # safety: going into maintenance and all active services got moved -> OK
519 give_up_watchdog_protection($self);
99278e06
TL
520
521 $exit_lrm = 1;
522
523 # restart with no or freezed services, release the lock
524 $haenv->release_ha_agent_lock();
525 }
526 }
527
528 $self->manage_resources() if !$exit_lrm;
529
530 $self->update_lrm_status();
531
532 return 0 if $exit_lrm;
533
534 $haenv->sleep_until($startime + 5);
535
5f095798
DM
536 } else {
537
538 die "got unexpected status '$state'\n";
539
540 }
541
542 return 1;
543}
544
116dea30 545sub run_workers {
c4a221bc
DM
546 my ($self) = @_;
547
548 my $haenv = $self->{haenv};
549
f31b7e94 550 my $starttime = $haenv->get_time();
c4a221bc 551
a28fa330
TL
552 # number of workers to start, if 0 we exec the command directly witouth forking
553 my $max_workers = $haenv->get_max_workers();
6dbf93a0 554 my $sc = $haenv->read_service_config();
f31b7e94 555
65c1fbac
TL
556 my $worker = $self->{workers};
557 # we only got limited time but want to ensure that every queued worker is scheduled
558 # eventually, so sort by the count a worker was seen here in this loop
559 my $fair_sorter = sub {
560 $worker->{$b}->{start_tries} <=> $worker->{$a}->{start_tries} || $a cmp $b
561 };
562
eef4f863 563 while (($haenv->get_time() - $starttime) <= 8) {
f613e426 564 my $count = $self->check_active_workers();
c4a221bc 565
65c1fbac
TL
566 for my $sid (sort $fair_sorter grep { !$worker->{$_}->{pid} } keys %$worker) {
567 my $w = $worker->{$sid};
568 # higher try-count means higher priority especially compared to newly queued jobs, so
569 # count every try to avoid starvation
570 $w->{start_tries}++;
571 next if $count >= $max_workers && $max_workers > 0;
f613e426
TL
572
573 # only fork if we may, else call exec_resource_agent directly (e.g. for tests)
574 if ($max_workers > 0) {
575 my $pid = fork();
576 if (!defined($pid)) {
577 $haenv->log('err', "forking worker failed - $!");
578 $count = 0; last; # abort, try later
579 } elsif ($pid == 0) {
580 $haenv->after_fork(); # cleanup
581
582 # do work
c4a221bc
DM
583 my $res = -1;
584 eval {
3ac1ee6b 585 $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{params});
c4a221bc
DM
586 };
587 if (my $err = $@) {
f31b7e94 588 $haenv->log('err', $err);
f613e426 589 POSIX::_exit(-1);
116dea30 590 }
f613e426
TL
591 POSIX::_exit($res);
592 } else {
593 $count++;
594 $w->{pid} = $pid;
595 }
596 } else {
597 my $res = -1;
598 eval {
599 $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{params});
600 $res = $res << 8 if $res > 0;
601 };
602 if (my $err = $@) {
603 $haenv->log('err', $err);
604 }
605 if (defined($w->{uid})) {
606 $self->resource_command_finished($sid, $w->{uid}, $res);
607 } else {
608 $self->stop_command_finished($sid, $res);
c4a221bc
DM
609 }
610 }
611 }
612
613 last if !$count;
614
f31b7e94 615 $haenv->sleep(1);
c4a221bc 616 }
116dea30
DM
617
618 return scalar(keys %{$self->{workers}});
619}
620
621sub manage_resources {
622 my ($self) = @_;
623
624 my $haenv = $self->{haenv};
625
626 my $nodename = $haenv->nodename();
627
628 my $ss = $self->{service_status};
629
5a28da91
TL
630 foreach my $sid (keys %{$self->{restart_tries}}) {
631 delete $self->{restart_tries}->{$sid} if !$ss->{$sid};
632 }
633
116dea30
DM
634 foreach my $sid (keys %$ss) {
635 my $sd = $ss->{$sid};
b538340c 636 next if !$sd->{node} || !$sd->{uid};
116dea30 637 next if $sd->{node} ne $nodename;
b538340c
TL
638 my $request_state = $sd->{state};
639 next if !defined($request_state);
90a24755
TL
640 # can only happen for restricted groups where the failed node itself needs to be the
641 # reocvery target. Always let the master first do so, it will then marked as 'stopped' and
642 # we can just continue normally. But we must NOT do anything with it while still in recovery
b538340c
TL
643 next if $request_state eq 'recovery';
644 next if $request_state eq 'freeze';
90a24755 645
b538340c 646 $self->queue_resource_command($sid, $sd->{uid}, $request_state, {
90a24755
TL
647 'target' => $sd->{target},
648 'timeout' => $sd->{timeout},
649 });
116dea30
DM
650 }
651
652 return $self->run_workers();
c4a221bc
DM
653}
654
c4a221bc 655sub queue_resource_command {
3ac1ee6b 656 my ($self, $sid, $uid, $state, $params) = @_;
c4a221bc 657
b538340c
TL
658 # do not queue the exact same command twice as this may lead to an inconsistent HA state when
659 # the first command fails but the CRM does not process its failure right away and the LRM starts
660 # a second try, without the CRM knowing of it (race condition) The 'stopped' command is an
661 # exception as we do not process its result in the CRM and we want to execute it always (even
662 # with no active CRM)
35cbb764
TL
663 return if $state ne 'stopped' && $uid && defined($self->{results}->{$uid});
664
c4a221bc
DM
665 if (my $w = $self->{workers}->{$sid}) {
666 return if $w->{pid}; # already started
667 # else, delete and overwrite queue entry with new command
668 delete $self->{workers}->{$sid};
669 }
670
671 $self->{workers}->{$sid} = {
672 sid => $sid,
673 uid => $uid,
674 state => $state,
65c1fbac 675 start_tries => 0,
c4a221bc 676 };
e88469ba 677
3ac1ee6b 678 $self->{workers}->{$sid}->{params} = $params if $params;
c4a221bc
DM
679}
680
681sub check_active_workers {
682 my ($self) = @_;
683
684 # finish/count workers
685 my $count = 0;
686 foreach my $sid (keys %{$self->{workers}}) {
687 my $w = $self->{workers}->{$sid};
b538340c
TL
688 my $pid = $w->{pid} || next;
689
690 my $waitpid = waitpid($pid, WNOHANG); # check status
691 if (defined($waitpid) && ($waitpid == $pid)) {
692 if (defined($w->{uid})) {
693 $self->resource_command_finished($sid, $w->{uid}, $?);
c4a221bc 694 } else {
b538340c 695 $self->stop_command_finished($sid, $?);
c4a221bc 696 }
b538340c
TL
697 } else {
698 $count++; # still active
c4a221bc
DM
699 }
700 }
289e4784 701
c4a221bc
DM
702 return $count;
703}
704
116dea30
DM
705sub stop_command_finished {
706 my ($self, $sid, $status) = @_;
707
708 my $haenv = $self->{haenv};
709
710 my $w = delete $self->{workers}->{$sid};
711 return if !$w; # should not happen
712
713 my $exit_code = -1;
714
715 if ($status == -1) {
716 $haenv->log('err', "resource agent $sid finished - failed to execute");
717 } elsif (my $sig = ($status & 127)) {
718 $haenv->log('err', "resource agent $sid finished - got signal $sig");
719 } else {
720 $exit_code = ($status >> 8);
721 }
722
723 if ($exit_code != 0) {
724 $self->{shutdown_errors}++;
725 }
726}
727
c4a221bc
DM
728sub resource_command_finished {
729 my ($self, $sid, $uid, $status) = @_;
730
731 my $haenv = $self->{haenv};
732
733 my $w = delete $self->{workers}->{$sid};
734 return if !$w; # should not happen
735
736 my $exit_code = -1;
737
738 if ($status == -1) {
289e4784 739 $haenv->log('err', "resource agent $sid finished - failed to execute");
c4a221bc 740 } elsif (my $sig = ($status & 127)) {
0f70400d 741 $haenv->log('err', "resource agent $sid finished - got signal $sig");
c4a221bc
DM
742 } else {
743 $exit_code = ($status >> 8);
c4a221bc
DM
744 }
745
ea4443cc
TL
746 $exit_code = $self->handle_service_exitcode($sid, $w->{state}, $exit_code);
747
280ee5d5
DM
748 return if $exit_code == ETRY_AGAIN; # tell nobody, simply retry
749
c4a221bc
DM
750 $self->{results}->{$uid} = {
751 sid => $w->{sid},
752 state => $w->{state},
753 exit_code => $exit_code,
754 };
755
756 my $ss = $self->{service_status};
757
758 # compute hash of valid/existing uids
759 my $valid_uids = {};
760 foreach my $sid (keys %$ss) {
761 my $sd = $ss->{$sid};
762 next if !$sd->{uid};
763 $valid_uids->{$sd->{uid}} = 1;
764 }
765
766 my $results = {};
767 foreach my $id (keys %{$self->{results}}) {
768 next if !$valid_uids->{$id};
769 $results->{$id} = $self->{results}->{$id};
770 }
771 $self->{results} = $results;
c4a221bc
DM
772}
773
ea4443cc
TL
774# processes the exit code from a finished resource agent, so that the CRM knows
775# if the LRM wants to retry an action based on the current recovery policies for
776# the failed service, or the CRM itself must try to recover from the failure.
777sub handle_service_exitcode {
778 my ($self, $sid, $cmd, $exit_code) = @_;
779
780 my $haenv = $self->{haenv};
781 my $tries = $self->{restart_tries};
782
783 my $sc = $haenv->read_service_config();
aaabde6a
DM
784
785 my $max_restart = 0;
786
787 if (my $cd = $sc->{$sid}) {
788 $max_restart = $cd->{max_restart};
789 }
ea4443cc
TL
790
791 if ($cmd eq 'started') {
792
a89ff919 793 if ($exit_code == SUCCESS) {
ea4443cc
TL
794
795 $tries->{$sid} = 0;
796
797 return $exit_code;
798
a89ff919 799 } elsif ($exit_code == ERROR) {
ea4443cc
TL
800
801 $tries->{$sid} = 0 if !defined($tries->{$sid});
802
aaabde6a 803 if ($tries->{$sid} >= $max_restart) {
ea4443cc
TL
804 $haenv->log('err', "unable to start service $sid on local node".
805 " after $tries->{$sid} retries");
806 $tries->{$sid} = 0;
a89ff919 807 return ERROR;
ea4443cc
TL
808 }
809
e9e1cd68
TL
810 $tries->{$sid}++;
811
812 $haenv->log('warning', "restart policy: retry number $tries->{$sid}" .
813 " for service '$sid'");
a89ff919
TL
814 # tell CRM that we retry the start
815 return ETRY_AGAIN;
ea4443cc
TL
816 }
817 }
818
819 return $exit_code;
820
821}
822
2a045f55 823sub exec_resource_agent {
3ac1ee6b 824 my ($self, $sid, $service_config, $cmd, $params) = @_;
2a045f55
TL
825
826 # setup execution environment
827
828 $ENV{'PATH'} = '/sbin:/bin:/usr/sbin:/usr/bin';
829
2a045f55
TL
830 my $haenv = $self->{haenv};
831
832 my $nodename = $haenv->nodename();
833
0087839a 834 my (undef, $service_type, $service_name) = $haenv->parse_sid($sid);
2a045f55
TL
835
836 my $plugin = PVE::HA::Resources->lookup($service_type);
837 if (!$plugin) {
838 $haenv->log('err', "service type '$service_type' not implemented");
839 return EUNKNOWN_SERVICE_TYPE;
840 }
841
aaabde6a
DM
842 if (!$service_config) {
843 $haenv->log('err', "missing resource configuration for '$sid'");
844 return EUNKNOWN_SERVICE;
845 }
846
d338a56f
TL
847 # process error state early
848 if ($cmd eq 'error') {
d338a56f
TL
849 $haenv->log('err', "service $sid is in an error state and needs manual " .
850 "intervention. Look up 'ERROR RECOVERY' in the documentation.");
851
852 return SUCCESS; # error always succeeds
853 }
854
2a045f55
TL
855 if ($service_config->{node} ne $nodename) {
856 $haenv->log('err', "service '$sid' not on this node");
857 return EWRONG_NODE;
858 }
859
860 my $id = $service_name;
861
862 my $running = $plugin->check_running($haenv, $id);
863
864 if ($cmd eq 'started') {
865
866 return SUCCESS if $running;
867
868 $haenv->log("info", "starting service $sid");
869
870 $plugin->start($haenv, $id);
871
872 $running = $plugin->check_running($haenv, $id);
873
874 if ($running) {
875 $haenv->log("info", "service status $sid started");
876 return SUCCESS;
877 } else {
878 $haenv->log("warning", "unable to start service $sid");
879 return ERROR;
880 }
881
882 } elsif ($cmd eq 'request_stop' || $cmd eq 'stopped') {
883
884 return SUCCESS if !$running;
885
e4ef317d
FE
886 if (defined($params->{timeout})) {
887 $haenv->log("info", "stopping service $sid (timeout=$params->{timeout})");
888 } else {
889 $haenv->log("info", "stopping service $sid");
890 }
2a045f55 891
e4ef317d 892 $plugin->shutdown($haenv, $id, $params->{timeout});
2a045f55
TL
893
894 $running = $plugin->check_running($haenv, $id);
895
896 if (!$running) {
897 $haenv->log("info", "service status $sid stopped");
898 return SUCCESS;
899 } else {
900 $haenv->log("info", "unable to stop stop service $sid (still running)");
901 return ERROR;
902 }
903
904 } elsif ($cmd eq 'migrate' || $cmd eq 'relocate') {
905
3ac1ee6b 906 my $target = $params->{target};
2a045f55
TL
907 if (!defined($target)) {
908 die "$cmd '$sid' failed - missing target\n" if !defined($target);
909 return EINVALID_PARAMETER;
910 }
911
912 if ($service_config->{node} eq $target) {
913 # already there
914 return SUCCESS;
915 }
916
917 my $online = ($cmd eq 'migrate') ? 1 : 0;
918
ea28f873 919 my $res = $plugin->migrate($haenv, $id, $target, $online);
2a045f55
TL
920
921 # something went wrong if service is still on this node
ea28f873 922 if (!$res) {
2a045f55
TL
923 $haenv->log("err", "service $sid not moved (migration error)");
924 return ERROR;
925 }
926
927 return SUCCESS;
928
2a045f55
TL
929 }
930
931 $haenv->log("err", "implement me (cmd '$cmd')");
932 return EUNKNOWN_COMMAND;
933}
934
935
5f095798 9361;