]>
Commit | Line | Data |
---|---|---|
1 | package PVE::HA::LRM; | |
2 | ||
3 | # Local Resource Manager | |
4 | ||
5 | use strict; | |
6 | use warnings; | |
7 | use POSIX qw(:sys_wait_h); | |
8 | ||
9 | use PVE::SafeSyslog; | |
10 | use PVE::Tools; | |
11 | use PVE::HA::Tools ':exit_codes'; | |
12 | use PVE::HA::Resources; | |
13 | ||
14 | # Server can have several states: | |
15 | ||
16 | my $valid_states = { | |
17 | wait_for_agent_lock => "waiting for agent lock", | |
18 | active => "got agent_lock", | |
19 | maintenance => "going into maintenance", | |
20 | lost_agent_lock => "lost agent_lock", | |
21 | }; | |
22 | ||
23 | # we sleep ~10s per 'active' round, so if no services is available for >= 10 min we'd go in wait | |
24 | # state giving up the watchdog and the LRM lock voluntary, ensuring the WD can do no harm | |
25 | my $max_active_idle_rounds = 60; | |
26 | ||
27 | sub new { | |
28 | my ($this, $haenv) = @_; | |
29 | ||
30 | my $class = ref($this) || $this; | |
31 | ||
32 | my $self = bless { | |
33 | haenv => $haenv, | |
34 | status => { state => 'startup' }, | |
35 | workers => {}, | |
36 | results => {}, | |
37 | restart_tries => {}, | |
38 | shutdown_request => 0, | |
39 | shutdown_errors => 0, | |
40 | # mode can be: active, reboot, shutdown, restart | |
41 | mode => 'active', | |
42 | cluster_state_update => 0, | |
43 | active_idle_rounds => 0, | |
44 | }, $class; | |
45 | ||
46 | $self->set_local_status({ state => 'wait_for_agent_lock' }); | |
47 | ||
48 | return $self; | |
49 | } | |
50 | ||
51 | sub shutdown_request { | |
52 | my ($self) = @_; | |
53 | ||
54 | return if $self->{shutdown_request}; # already in shutdown mode | |
55 | ||
56 | my $haenv = $self->{haenv}; | |
57 | ||
58 | my $nodename = $haenv->nodename(); | |
59 | ||
60 | my ($shutdown, $reboot) = $haenv->is_node_shutdown(); | |
61 | ||
62 | my $dc_cfg = $haenv->get_datacenter_settings(); | |
63 | my $shutdown_policy = $dc_cfg->{ha}->{shutdown_policy} // 'conditional'; | |
64 | ||
65 | if ($shutdown) { # don't log this on service restart, only on node shutdown | |
66 | $haenv->log('info', "got shutdown request with shutdown policy '$shutdown_policy'"); | |
67 | } | |
68 | ||
69 | my $freeze_all; | |
70 | my $maintenance; | |
71 | if ($shutdown_policy eq 'conditional') { | |
72 | $freeze_all = $reboot; | |
73 | } elsif ($shutdown_policy eq 'freeze') { | |
74 | $freeze_all = 1; | |
75 | } elsif ($shutdown_policy eq 'failover') { | |
76 | $freeze_all = 0; | |
77 | } elsif ($shutdown_policy eq 'migrate') { | |
78 | $maintenance = 1; | |
79 | } else { | |
80 | $haenv->log('err', "unknown shutdown policy '$shutdown_policy', fall back to conditional"); | |
81 | $freeze_all = $reboot; | |
82 | } | |
83 | ||
84 | if ($maintenance) { | |
85 | # we get marked as unaivalable by the manager, then all services will | |
86 | # be migrated away, we'll still have the same "can we exit" clause than | |
87 | # a normal shutdown -> no running service on this node | |
88 | # FIXME: after X minutes, add shutdown command for remaining services, | |
89 | # e.g., if they have no alternative node??? | |
90 | } elsif ($shutdown) { | |
91 | # *always* queue stop jobs for all services if the node shuts down, | |
92 | # independent if it's a reboot or a poweroff, else we may corrupt | |
93 | # services or hinder node shutdown | |
94 | my $ss = $self->{service_status}; | |
95 | ||
96 | foreach my $sid (keys %$ss) { | |
97 | my $sd = $ss->{$sid}; | |
98 | next if !$sd->{node}; | |
99 | next if $sd->{node} ne $nodename; | |
100 | # Note: use undef uid to mark shutdown/stop jobs | |
101 | $self->queue_resource_command($sid, undef, 'request_stop'); | |
102 | } | |
103 | } | |
104 | ||
105 | if ($shutdown) { | |
106 | my $shutdown_type = $reboot ? 'reboot' : 'shutdown'; | |
107 | if ($maintenance) { | |
108 | $haenv->log('info', "$shutdown_type LRM, doing maintenance, removing this node from active list"); | |
109 | $self->{mode} = 'maintenance'; | |
110 | } elsif ($freeze_all) { | |
111 | $haenv->log('info', "$shutdown_type LRM, stop and freeze all services"); | |
112 | $self->{mode} = 'restart'; | |
113 | } else { | |
114 | $haenv->log('info', "shutdown LRM, stop all services"); | |
115 | $self->{mode} = 'shutdown'; | |
116 | } | |
117 | } else { | |
118 | $haenv->log('info', "restart LRM, freeze all services"); | |
119 | $self->{mode} = 'restart'; | |
120 | } | |
121 | ||
122 | $self->{shutdown_request} = $haenv->get_time(); | |
123 | ||
124 | eval { $self->update_lrm_status() or die "not quorate?\n"; }; | |
125 | if (my $err = $@) { | |
126 | $haenv->log('err', "unable to update lrm status file - $err"); | |
127 | } | |
128 | } | |
129 | ||
130 | sub get_local_status { | |
131 | my ($self) = @_; | |
132 | ||
133 | return $self->{status}; | |
134 | } | |
135 | ||
136 | sub set_local_status { | |
137 | my ($self, $new) = @_; | |
138 | ||
139 | die "invalid state '$new->{state}'" if !$valid_states->{$new->{state}}; | |
140 | ||
141 | my $haenv = $self->{haenv}; | |
142 | ||
143 | my $old = $self->{status}; | |
144 | ||
145 | # important: only update if if really changed | |
146 | return if $old->{state} eq $new->{state}; | |
147 | ||
148 | $haenv->log('info', "status change $old->{state} => $new->{state}"); | |
149 | ||
150 | $new->{state_change_time} = $haenv->get_time(); | |
151 | ||
152 | $self->{status} = $new; | |
153 | } | |
154 | ||
155 | sub update_lrm_status { | |
156 | my ($self) = @_; | |
157 | ||
158 | my $haenv = $self->{haenv}; | |
159 | ||
160 | return 0 if !$haenv->quorate(); | |
161 | ||
162 | my $lrm_status = { | |
163 | state => $self->{status}->{state}, | |
164 | mode => $self->{mode}, | |
165 | results => $self->{results}, | |
166 | timestamp => $haenv->get_time(), | |
167 | }; | |
168 | ||
169 | eval { $haenv->write_lrm_status($lrm_status); }; | |
170 | if (my $err = $@) { | |
171 | $haenv->log('err', "unable to write lrm status file - $err"); | |
172 | return 0; | |
173 | } | |
174 | ||
175 | return 1; | |
176 | } | |
177 | ||
178 | sub update_service_status { | |
179 | my ($self) = @_; | |
180 | ||
181 | my $haenv = $self->{haenv}; | |
182 | ||
183 | my $ms = eval { $haenv->read_manager_status(); }; | |
184 | if (my $err = $@) { | |
185 | $haenv->log('err', "updating service status from manager failed: $err"); | |
186 | return undef; | |
187 | } else { | |
188 | $self->{service_status} = $ms->{service_status} || {}; | |
189 | my $nodename = $haenv->nodename(); | |
190 | $self->{node_status} = $ms->{node_status}->{$nodename} || 'unknown'; | |
191 | return 1; | |
192 | } | |
193 | } | |
194 | ||
195 | sub get_protected_ha_agent_lock { | |
196 | my ($self) = @_; | |
197 | ||
198 | my $haenv = $self->{haenv}; | |
199 | ||
200 | my $count = 0; | |
201 | my $starttime = $haenv->get_time(); | |
202 | ||
203 | for (;;) { | |
204 | ||
205 | if ($haenv->get_ha_agent_lock()) { | |
206 | if ($self->{ha_agent_wd}) { | |
207 | $haenv->watchdog_update($self->{ha_agent_wd}); | |
208 | } else { | |
209 | my $wfh = $haenv->watchdog_open(); | |
210 | $self->{ha_agent_wd} = $wfh; | |
211 | } | |
212 | return 1; | |
213 | } | |
214 | ||
215 | last if ++$count > 5; # try max 5 time | |
216 | ||
217 | my $delay = $haenv->get_time() - $starttime; | |
218 | last if $delay > 5; # for max 5 seconds | |
219 | ||
220 | $haenv->sleep(1); | |
221 | } | |
222 | ||
223 | return 0; | |
224 | } | |
225 | ||
226 | # only cares if any service has the local node as their node, independent of which req.state it is | |
227 | sub has_configured_service_on_local_node { | |
228 | my ($self) = @_; | |
229 | ||
230 | my $haenv = $self->{haenv}; | |
231 | my $nodename = $haenv->nodename(); | |
232 | ||
233 | my $ss = $self->{service_status}; | |
234 | foreach my $sid (keys %$ss) { | |
235 | my $sd = $ss->{$sid}; | |
236 | next if !$sd->{node} || $sd->{node} ne $nodename; | |
237 | ||
238 | return 1; | |
239 | } | |
240 | return 0; | |
241 | } | |
242 | ||
243 | sub is_fence_requested { | |
244 | my ($self) = @_; | |
245 | ||
246 | my $haenv = $self->{haenv}; | |
247 | ||
248 | my $nodename = $haenv->nodename(); | |
249 | my $ss = $self->{service_status}; | |
250 | ||
251 | my $fenced_services = PVE::HA::Tools::count_fenced_services($ss, $nodename); | |
252 | ||
253 | return $fenced_services || $self->{node_status} eq 'fence'; | |
254 | } | |
255 | ||
256 | sub active_service_count { | |
257 | my ($self) = @_; | |
258 | ||
259 | my $haenv = $self->{haenv}; | |
260 | my $nodename = $haenv->nodename(); | |
261 | ||
262 | my $ss = $self->{service_status}; | |
263 | ||
264 | my $count = 0; | |
265 | foreach my $sid (keys %$ss) { | |
266 | my $sd = $ss->{$sid}; | |
267 | next if !$sd->{node}; | |
268 | next if $sd->{node} ne $nodename; | |
269 | my $req_state = $sd->{state}; | |
270 | next if !defined($req_state); | |
271 | next if $req_state eq 'stopped'; | |
272 | # NOTE: 'ignored' ones are already dropped by the manager from service_status | |
273 | next if $req_state eq 'freeze'; | |
274 | # erroneous services are not managed by HA, don't count them as active | |
275 | next if $req_state eq 'error'; | |
276 | ||
277 | $count++; | |
278 | } | |
279 | ||
280 | return $count; | |
281 | } | |
282 | ||
283 | my $wrote_lrm_status_at_startup = 0; | |
284 | ||
285 | sub do_one_iteration { | |
286 | my ($self) = @_; | |
287 | ||
288 | my $haenv = $self->{haenv}; | |
289 | ||
290 | $haenv->loop_start_hook(); | |
291 | ||
292 | $self->{cluster_state_update} = $haenv->cluster_state_update(); | |
293 | ||
294 | my $res = $self->work(); | |
295 | ||
296 | $haenv->loop_end_hook(); | |
297 | ||
298 | return $res; | |
299 | } | |
300 | ||
301 | # NOTE: this is disabling the self-fence mechanism, so it must NOT be called with active services | |
302 | # It's normally *only* OK on graceful shutdown (with no services, or all services frozen) | |
303 | my sub give_up_watchdog_protection { | |
304 | my ($self) = @_; | |
305 | ||
306 | if ($self->{ha_agent_wd}) { | |
307 | $self->{haenv}->watchdog_close($self->{ha_agent_wd}); | |
308 | delete $self->{ha_agent_wd}; # only delete after close! | |
309 | } | |
310 | } | |
311 | ||
312 | sub work { | |
313 | my ($self) = @_; | |
314 | ||
315 | my $haenv = $self->{haenv}; | |
316 | ||
317 | if (!$wrote_lrm_status_at_startup) { | |
318 | if ($self->update_lrm_status()) { | |
319 | $wrote_lrm_status_at_startup = 1; | |
320 | } else { | |
321 | # do nothing | |
322 | $haenv->sleep(5); | |
323 | return $self->{shutdown_request} ? 0 : 1; | |
324 | } | |
325 | } | |
326 | ||
327 | my $status = $self->get_local_status(); | |
328 | my $state = $status->{state}; | |
329 | ||
330 | $self->update_service_status(); | |
331 | ||
332 | my $fence_request = $self->is_fence_requested(); | |
333 | ||
334 | # do state changes first | |
335 | ||
336 | my $ctime = $haenv->get_time(); | |
337 | ||
338 | if ($state eq 'wait_for_agent_lock') { | |
339 | ||
340 | my $service_count = $self->active_service_count(); | |
341 | ||
342 | if (!$fence_request && $service_count && $haenv->quorate()) { | |
343 | if ($self->get_protected_ha_agent_lock()) { | |
344 | $self->set_local_status({ state => 'active' }); | |
345 | } | |
346 | } | |
347 | ||
348 | } elsif ($state eq 'lost_agent_lock') { | |
349 | ||
350 | if (!$fence_request && $haenv->quorate()) { | |
351 | if ($self->get_protected_ha_agent_lock()) { | |
352 | $self->set_local_status({ state => 'active' }); | |
353 | } | |
354 | } | |
355 | ||
356 | } elsif ($state eq 'active') { | |
357 | ||
358 | if ($fence_request) { | |
359 | $haenv->log('err', "node need to be fenced - releasing agent_lock\n"); | |
360 | $self->set_local_status({ state => 'lost_agent_lock'}); | |
361 | } elsif (!$self->get_protected_ha_agent_lock()) { | |
362 | $self->set_local_status({ state => 'lost_agent_lock'}); | |
363 | } elsif ($self->{mode} eq 'maintenance') { | |
364 | $self->set_local_status({ state => 'maintenance'}); | |
365 | } else { | |
366 | if (!$self->has_configured_service_on_local_node() && !$self->run_workers()) { | |
367 | # no active service configured for this node and all (old) workers are done | |
368 | $self->{active_idle_rounds}++; | |
369 | if ($self->{active_idle_rounds} > $max_active_idle_rounds) { | |
370 | $haenv->log('info', "node had no service configured for $max_active_idle_rounds rounds, going idle.\n"); | |
371 | # safety: no active service & no running worker for quite some time -> OK | |
372 | $haenv->release_ha_agent_lock(); | |
373 | give_up_watchdog_protection($self); | |
374 | $self->set_local_status({ state => 'wait_for_agent_lock'}); | |
375 | $self->{active_idle_rounds} = 0; | |
376 | } | |
377 | } elsif ($self->{active_idle_rounds}) { | |
378 | $self->{active_idle_rounds} = 0; | |
379 | } | |
380 | } | |
381 | } elsif ($state eq 'maintenance') { | |
382 | ||
383 | if ($fence_request) { | |
384 | $haenv->log('err', "node need to be fenced during maintenance mode - releasing agent_lock\n"); | |
385 | $self->set_local_status({ state => 'lost_agent_lock'}); | |
386 | } elsif (!$self->get_protected_ha_agent_lock()) { | |
387 | $self->set_local_status({ state => 'lost_agent_lock'}); | |
388 | } | |
389 | } | |
390 | ||
391 | $status = $self->get_local_status(); | |
392 | $state = $status->{state}; | |
393 | ||
394 | # do work | |
395 | ||
396 | if ($state eq 'wait_for_agent_lock') { | |
397 | ||
398 | return 0 if $self->{shutdown_request}; | |
399 | ||
400 | $self->update_lrm_status(); | |
401 | ||
402 | $haenv->sleep(5); | |
403 | ||
404 | } elsif ($state eq 'active') { | |
405 | ||
406 | my $startime = $haenv->get_time(); | |
407 | ||
408 | my $max_time = 10; | |
409 | ||
410 | my $shutdown = 0; | |
411 | ||
412 | # do work (max_time seconds) | |
413 | eval { | |
414 | # fixme: set alert timer | |
415 | ||
416 | # if we could not get the current service status there's no point | |
417 | # in doing anything, try again next round. | |
418 | return if !$self->update_service_status(); | |
419 | ||
420 | if ($self->{shutdown_request}) { | |
421 | ||
422 | if ($self->{mode} eq 'restart') { | |
423 | # catch exited workers to update service state | |
424 | my $workers = $self->run_workers(); | |
425 | my $service_count = $self->active_service_count(); | |
426 | ||
427 | if ($service_count == 0 && $workers == 0) { | |
428 | # safety: no active services or workers -> OK | |
429 | give_up_watchdog_protection($self); | |
430 | $shutdown = 1; | |
431 | ||
432 | # restart with no or freezed services, release the lock | |
433 | $haenv->release_ha_agent_lock(); | |
434 | } | |
435 | } else { | |
436 | ||
437 | if ($self->run_workers() == 0) { | |
438 | if ($self->{shutdown_errors} == 0) { | |
439 | # safety: no active services and LRM shutdown -> OK | |
440 | give_up_watchdog_protection($self); | |
441 | ||
442 | # shutdown with all services stopped thus release the lock | |
443 | $haenv->release_ha_agent_lock(); | |
444 | } | |
445 | ||
446 | $shutdown = 1; | |
447 | } | |
448 | } | |
449 | } else { | |
450 | if (!$self->{cluster_state_update}) { | |
451 | # update failed but we could still renew our lock (cfs restart?), | |
452 | # safely skip manage and expect to update just fine next round | |
453 | $haenv->log('notice', "temporary inconsistent cluster state " . | |
454 | "(cfs restart?), skip round"); | |
455 | return; | |
456 | } | |
457 | ||
458 | $self->manage_resources(); | |
459 | ||
460 | } | |
461 | }; | |
462 | if (my $err = $@) { | |
463 | $haenv->log('err', "got unexpected error - $err"); | |
464 | } | |
465 | ||
466 | $self->update_lrm_status(); | |
467 | ||
468 | return 0 if $shutdown; | |
469 | ||
470 | $haenv->sleep_until($startime + $max_time); | |
471 | ||
472 | } elsif ($state eq 'lost_agent_lock') { | |
473 | ||
474 | # NOTE: watchdog is active an will trigger soon! | |
475 | # so we hope to get the lock back soon! | |
476 | if ($self->{shutdown_request}) { | |
477 | ||
478 | my $service_count = $self->active_service_count(); | |
479 | ||
480 | if ($service_count > 0) { | |
481 | $haenv->log('err', "get shutdown request in state 'lost_agent_lock' - " . | |
482 | "detected $service_count running services"); | |
483 | ||
484 | if ($self->{mode} eq 'restart') { | |
485 | my $state_mt = $self->{status}->{state_change_time}; | |
486 | ||
487 | # watchdog should have already triggered, so either it's set | |
488 | # set to noboot or it failed. As we are in restart mode, and | |
489 | # have infinity stoptimeout -> exit now - we don't touch services | |
490 | # or change state, so this is save, relatively speaking | |
491 | if (($haenv->get_time() - $state_mt) > 90) { | |
492 | $haenv->log('err', "lost agent lock and restart request for over 90 seconds - giving up!"); | |
493 | return 0; | |
494 | } | |
495 | } | |
496 | } else { | |
497 | # safety: all services are stopped, so we can close the watchdog | |
498 | give_up_watchdog_protection($self); | |
499 | ||
500 | return 0; | |
501 | } | |
502 | } | |
503 | ||
504 | $haenv->sleep(5); | |
505 | ||
506 | } elsif ($state eq 'maintenance') { | |
507 | ||
508 | my $startime = $haenv->get_time(); | |
509 | return if !$self->update_service_status(); | |
510 | ||
511 | # wait until all active services moved away | |
512 | my $service_count = $self->active_service_count(); | |
513 | ||
514 | my $exit_lrm = 0; | |
515 | ||
516 | if ($self->{shutdown_request}) { | |
517 | if ($service_count == 0 && $self->run_workers() == 0) { | |
518 | # safety: going into maintenance and all active services got moved -> OK | |
519 | give_up_watchdog_protection($self); | |
520 | ||
521 | $exit_lrm = 1; | |
522 | ||
523 | # restart with no or freezed services, release the lock | |
524 | $haenv->release_ha_agent_lock(); | |
525 | } | |
526 | } | |
527 | ||
528 | $self->manage_resources() if !$exit_lrm; | |
529 | ||
530 | $self->update_lrm_status(); | |
531 | ||
532 | return 0 if $exit_lrm; | |
533 | ||
534 | $haenv->sleep_until($startime + 5); | |
535 | ||
536 | } else { | |
537 | ||
538 | die "got unexpected status '$state'\n"; | |
539 | ||
540 | } | |
541 | ||
542 | return 1; | |
543 | } | |
544 | ||
545 | sub run_workers { | |
546 | my ($self) = @_; | |
547 | ||
548 | my $haenv = $self->{haenv}; | |
549 | ||
550 | my $starttime = $haenv->get_time(); | |
551 | ||
552 | # number of workers to start, if 0 we exec the command directly witouth forking | |
553 | my $max_workers = $haenv->get_max_workers(); | |
554 | my $sc = $haenv->read_service_config(); | |
555 | ||
556 | my $worker = $self->{workers}; | |
557 | # we only got limited time but want to ensure that every queued worker is scheduled | |
558 | # eventually, so sort by the count a worker was seen here in this loop | |
559 | my $fair_sorter = sub { | |
560 | $worker->{$b}->{start_tries} <=> $worker->{$a}->{start_tries} || $a cmp $b | |
561 | }; | |
562 | ||
563 | while (($haenv->get_time() - $starttime) <= 8) { | |
564 | my $count = $self->check_active_workers(); | |
565 | ||
566 | for my $sid (sort $fair_sorter grep { !$worker->{$_}->{pid} } keys %$worker) { | |
567 | my $w = $worker->{$sid}; | |
568 | # higher try-count means higher priority especially compared to newly queued jobs, so | |
569 | # count every try to avoid starvation | |
570 | $w->{start_tries}++; | |
571 | next if $count >= $max_workers && $max_workers > 0; | |
572 | ||
573 | # only fork if we may, else call exec_resource_agent directly (e.g. for tests) | |
574 | if ($max_workers > 0) { | |
575 | my $pid = fork(); | |
576 | if (!defined($pid)) { | |
577 | $haenv->log('err', "forking worker failed - $!"); | |
578 | $count = 0; last; # abort, try later | |
579 | } elsif ($pid == 0) { | |
580 | $haenv->after_fork(); # cleanup | |
581 | ||
582 | # do work | |
583 | my $res = -1; | |
584 | eval { | |
585 | $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{params}); | |
586 | }; | |
587 | if (my $err = $@) { | |
588 | $haenv->log('err', $err); | |
589 | POSIX::_exit(-1); | |
590 | } | |
591 | POSIX::_exit($res); | |
592 | } else { | |
593 | $count++; | |
594 | $w->{pid} = $pid; | |
595 | } | |
596 | } else { | |
597 | my $res = -1; | |
598 | eval { | |
599 | $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{params}); | |
600 | $res = $res << 8 if $res > 0; | |
601 | }; | |
602 | if (my $err = $@) { | |
603 | $haenv->log('err', $err); | |
604 | } | |
605 | if (defined($w->{uid})) { | |
606 | $self->resource_command_finished($sid, $w->{uid}, $res); | |
607 | } else { | |
608 | $self->stop_command_finished($sid, $res); | |
609 | } | |
610 | } | |
611 | } | |
612 | ||
613 | last if !$count; | |
614 | ||
615 | $haenv->sleep(1); | |
616 | } | |
617 | ||
618 | return scalar(keys %{$self->{workers}}); | |
619 | } | |
620 | ||
621 | sub manage_resources { | |
622 | my ($self) = @_; | |
623 | ||
624 | my $haenv = $self->{haenv}; | |
625 | ||
626 | my $nodename = $haenv->nodename(); | |
627 | ||
628 | my $ss = $self->{service_status}; | |
629 | ||
630 | foreach my $sid (keys %{$self->{restart_tries}}) { | |
631 | delete $self->{restart_tries}->{$sid} if !$ss->{$sid}; | |
632 | } | |
633 | ||
634 | foreach my $sid (keys %$ss) { | |
635 | my $sd = $ss->{$sid}; | |
636 | next if !$sd->{node} || !$sd->{uid}; | |
637 | next if $sd->{node} ne $nodename; | |
638 | my $request_state = $sd->{state}; | |
639 | next if !defined($request_state); | |
640 | # can only happen for restricted groups where the failed node itself needs to be the | |
641 | # reocvery target. Always let the master first do so, it will then marked as 'stopped' and | |
642 | # we can just continue normally. But we must NOT do anything with it while still in recovery | |
643 | next if $request_state eq 'recovery'; | |
644 | next if $request_state eq 'freeze'; | |
645 | ||
646 | $self->queue_resource_command($sid, $sd->{uid}, $request_state, { | |
647 | 'target' => $sd->{target}, | |
648 | 'timeout' => $sd->{timeout}, | |
649 | }); | |
650 | } | |
651 | ||
652 | return $self->run_workers(); | |
653 | } | |
654 | ||
655 | sub queue_resource_command { | |
656 | my ($self, $sid, $uid, $state, $params) = @_; | |
657 | ||
658 | # do not queue the exact same command twice as this may lead to an inconsistent HA state when | |
659 | # the first command fails but the CRM does not process its failure right away and the LRM starts | |
660 | # a second try, without the CRM knowing of it (race condition) The 'stopped' command is an | |
661 | # exception as we do not process its result in the CRM and we want to execute it always (even | |
662 | # with no active CRM) | |
663 | return if $state ne 'stopped' && $uid && defined($self->{results}->{$uid}); | |
664 | ||
665 | if (my $w = $self->{workers}->{$sid}) { | |
666 | return if $w->{pid}; # already started | |
667 | # else, delete and overwrite queue entry with new command | |
668 | delete $self->{workers}->{$sid}; | |
669 | } | |
670 | ||
671 | $self->{workers}->{$sid} = { | |
672 | sid => $sid, | |
673 | uid => $uid, | |
674 | state => $state, | |
675 | start_tries => 0, | |
676 | }; | |
677 | ||
678 | $self->{workers}->{$sid}->{params} = $params if $params; | |
679 | } | |
680 | ||
681 | sub check_active_workers { | |
682 | my ($self) = @_; | |
683 | ||
684 | # finish/count workers | |
685 | my $count = 0; | |
686 | foreach my $sid (keys %{$self->{workers}}) { | |
687 | my $w = $self->{workers}->{$sid}; | |
688 | my $pid = $w->{pid} || next; | |
689 | ||
690 | my $waitpid = waitpid($pid, WNOHANG); # check status | |
691 | if (defined($waitpid) && ($waitpid == $pid)) { | |
692 | if (defined($w->{uid})) { | |
693 | $self->resource_command_finished($sid, $w->{uid}, $?); | |
694 | } else { | |
695 | $self->stop_command_finished($sid, $?); | |
696 | } | |
697 | } else { | |
698 | $count++; # still active | |
699 | } | |
700 | } | |
701 | ||
702 | return $count; | |
703 | } | |
704 | ||
705 | sub stop_command_finished { | |
706 | my ($self, $sid, $status) = @_; | |
707 | ||
708 | my $haenv = $self->{haenv}; | |
709 | ||
710 | my $w = delete $self->{workers}->{$sid}; | |
711 | return if !$w; # should not happen | |
712 | ||
713 | my $exit_code = -1; | |
714 | ||
715 | if ($status == -1) { | |
716 | $haenv->log('err', "resource agent $sid finished - failed to execute"); | |
717 | } elsif (my $sig = ($status & 127)) { | |
718 | $haenv->log('err', "resource agent $sid finished - got signal $sig"); | |
719 | } else { | |
720 | $exit_code = ($status >> 8); | |
721 | } | |
722 | ||
723 | if ($exit_code != 0) { | |
724 | $self->{shutdown_errors}++; | |
725 | } | |
726 | } | |
727 | ||
728 | sub resource_command_finished { | |
729 | my ($self, $sid, $uid, $status) = @_; | |
730 | ||
731 | my $haenv = $self->{haenv}; | |
732 | ||
733 | my $w = delete $self->{workers}->{$sid}; | |
734 | return if !$w; # should not happen | |
735 | ||
736 | my $exit_code = -1; | |
737 | ||
738 | if ($status == -1) { | |
739 | $haenv->log('err', "resource agent $sid finished - failed to execute"); | |
740 | } elsif (my $sig = ($status & 127)) { | |
741 | $haenv->log('err', "resource agent $sid finished - got signal $sig"); | |
742 | } else { | |
743 | $exit_code = ($status >> 8); | |
744 | } | |
745 | ||
746 | $exit_code = $self->handle_service_exitcode($sid, $w->{state}, $exit_code); | |
747 | ||
748 | return if $exit_code == ETRY_AGAIN; # tell nobody, simply retry | |
749 | ||
750 | $self->{results}->{$uid} = { | |
751 | sid => $w->{sid}, | |
752 | state => $w->{state}, | |
753 | exit_code => $exit_code, | |
754 | }; | |
755 | ||
756 | my $ss = $self->{service_status}; | |
757 | ||
758 | # compute hash of valid/existing uids | |
759 | my $valid_uids = {}; | |
760 | foreach my $sid (keys %$ss) { | |
761 | my $sd = $ss->{$sid}; | |
762 | next if !$sd->{uid}; | |
763 | $valid_uids->{$sd->{uid}} = 1; | |
764 | } | |
765 | ||
766 | my $results = {}; | |
767 | foreach my $id (keys %{$self->{results}}) { | |
768 | next if !$valid_uids->{$id}; | |
769 | $results->{$id} = $self->{results}->{$id}; | |
770 | } | |
771 | $self->{results} = $results; | |
772 | } | |
773 | ||
774 | # processes the exit code from a finished resource agent, so that the CRM knows | |
775 | # if the LRM wants to retry an action based on the current recovery policies for | |
776 | # the failed service, or the CRM itself must try to recover from the failure. | |
777 | sub handle_service_exitcode { | |
778 | my ($self, $sid, $cmd, $exit_code) = @_; | |
779 | ||
780 | my $haenv = $self->{haenv}; | |
781 | my $tries = $self->{restart_tries}; | |
782 | ||
783 | my $sc = $haenv->read_service_config(); | |
784 | ||
785 | my $max_restart = 0; | |
786 | ||
787 | if (my $cd = $sc->{$sid}) { | |
788 | $max_restart = $cd->{max_restart}; | |
789 | } | |
790 | ||
791 | if ($cmd eq 'started') { | |
792 | ||
793 | if ($exit_code == SUCCESS) { | |
794 | ||
795 | $tries->{$sid} = 0; | |
796 | ||
797 | return $exit_code; | |
798 | ||
799 | } elsif ($exit_code == ERROR) { | |
800 | ||
801 | $tries->{$sid} = 0 if !defined($tries->{$sid}); | |
802 | ||
803 | if ($tries->{$sid} >= $max_restart) { | |
804 | $haenv->log('err', "unable to start service $sid on local node". | |
805 | " after $tries->{$sid} retries"); | |
806 | $tries->{$sid} = 0; | |
807 | return ERROR; | |
808 | } | |
809 | ||
810 | $tries->{$sid}++; | |
811 | ||
812 | $haenv->log('warning', "restart policy: retry number $tries->{$sid}" . | |
813 | " for service '$sid'"); | |
814 | # tell CRM that we retry the start | |
815 | return ETRY_AGAIN; | |
816 | } | |
817 | } | |
818 | ||
819 | return $exit_code; | |
820 | ||
821 | } | |
822 | ||
823 | sub exec_resource_agent { | |
824 | my ($self, $sid, $service_config, $cmd, $params) = @_; | |
825 | ||
826 | # setup execution environment | |
827 | ||
828 | $ENV{'PATH'} = '/sbin:/bin:/usr/sbin:/usr/bin'; | |
829 | ||
830 | my $haenv = $self->{haenv}; | |
831 | ||
832 | my $nodename = $haenv->nodename(); | |
833 | ||
834 | my (undef, $service_type, $service_name) = $haenv->parse_sid($sid); | |
835 | ||
836 | my $plugin = PVE::HA::Resources->lookup($service_type); | |
837 | if (!$plugin) { | |
838 | $haenv->log('err', "service type '$service_type' not implemented"); | |
839 | return EUNKNOWN_SERVICE_TYPE; | |
840 | } | |
841 | ||
842 | if (!$service_config) { | |
843 | $haenv->log('err', "missing resource configuration for '$sid'"); | |
844 | return EUNKNOWN_SERVICE; | |
845 | } | |
846 | ||
847 | # process error state early | |
848 | if ($cmd eq 'error') { | |
849 | $haenv->log('err', "service $sid is in an error state and needs manual " . | |
850 | "intervention. Look up 'ERROR RECOVERY' in the documentation."); | |
851 | ||
852 | return SUCCESS; # error always succeeds | |
853 | } | |
854 | ||
855 | if ($service_config->{node} ne $nodename) { | |
856 | $haenv->log('err', "service '$sid' not on this node"); | |
857 | return EWRONG_NODE; | |
858 | } | |
859 | ||
860 | my $id = $service_name; | |
861 | ||
862 | my $running = $plugin->check_running($haenv, $id); | |
863 | ||
864 | if ($cmd eq 'started') { | |
865 | ||
866 | return SUCCESS if $running; | |
867 | ||
868 | $haenv->log("info", "starting service $sid"); | |
869 | ||
870 | $plugin->start($haenv, $id); | |
871 | ||
872 | $running = $plugin->check_running($haenv, $id); | |
873 | ||
874 | if ($running) { | |
875 | $haenv->log("info", "service status $sid started"); | |
876 | return SUCCESS; | |
877 | } else { | |
878 | $haenv->log("warning", "unable to start service $sid"); | |
879 | return ERROR; | |
880 | } | |
881 | ||
882 | } elsif ($cmd eq 'request_stop' || $cmd eq 'stopped') { | |
883 | ||
884 | return SUCCESS if !$running; | |
885 | ||
886 | if (defined($params->{timeout})) { | |
887 | $haenv->log("info", "stopping service $sid (timeout=$params->{timeout})"); | |
888 | } else { | |
889 | $haenv->log("info", "stopping service $sid"); | |
890 | } | |
891 | ||
892 | $plugin->shutdown($haenv, $id, $params->{timeout}); | |
893 | ||
894 | $running = $plugin->check_running($haenv, $id); | |
895 | ||
896 | if (!$running) { | |
897 | $haenv->log("info", "service status $sid stopped"); | |
898 | return SUCCESS; | |
899 | } else { | |
900 | $haenv->log("info", "unable to stop stop service $sid (still running)"); | |
901 | return ERROR; | |
902 | } | |
903 | ||
904 | } elsif ($cmd eq 'migrate' || $cmd eq 'relocate') { | |
905 | ||
906 | my $target = $params->{target}; | |
907 | if (!defined($target)) { | |
908 | die "$cmd '$sid' failed - missing target\n" if !defined($target); | |
909 | return EINVALID_PARAMETER; | |
910 | } | |
911 | ||
912 | if ($service_config->{node} eq $target) { | |
913 | # already there | |
914 | return SUCCESS; | |
915 | } | |
916 | ||
917 | my $online = ($cmd eq 'migrate') ? 1 : 0; | |
918 | ||
919 | my $res = $plugin->migrate($haenv, $id, $target, $online); | |
920 | ||
921 | # something went wrong if service is still on this node | |
922 | if (!$res) { | |
923 | $haenv->log("err", "service $sid not moved (migration error)"); | |
924 | return ERROR; | |
925 | } | |
926 | ||
927 | return SUCCESS; | |
928 | ||
929 | } | |
930 | ||
931 | $haenv->log("err", "implement me (cmd '$cmd')"); | |
932 | return EUNKNOWN_COMMAND; | |
933 | } | |
934 | ||
935 | ||
936 | 1; |