]>
Commit | Line | Data |
---|---|---|
5f095798 DM |
1 | package PVE::HA::LRM; |
2 | ||
3 | # Local Resource Manager | |
4 | ||
5 | use strict; | |
6 | use warnings; | |
c4a221bc | 7 | use POSIX qw(:sys_wait_h); |
5f095798 DM |
8 | |
9 | use PVE::SafeSyslog; | |
10 | use PVE::Tools; | |
a89ff919 | 11 | use PVE::HA::Tools ':exit_codes'; |
2a045f55 | 12 | use PVE::HA::Resources; |
5f095798 DM |
13 | |
14 | # Server can have several states: | |
15 | ||
16 | my $valid_states = { | |
ec911edd | 17 | wait_for_agent_lock => "waiting for agent lock", |
0bba8f60 | 18 | active => "got agent_lock", |
99278e06 | 19 | maintenance => "going into maintenance", |
5f095798 DM |
20 | lost_agent_lock => "lost agent_lock", |
21 | }; | |
22 | ||
21051707 | 23 | # we sleep ~10s per 'active' round, so if no services is available for >= 10 min we'd go in wait |
4ee32601 | 24 | # state giving up the watchdog and the LRM lock voluntary, ensuring the WD can do no harm |
21051707 TL |
25 | my $max_active_idle_rounds = 60; |
26 | ||
5f095798 DM |
27 | sub new { |
28 | my ($this, $haenv) = @_; | |
29 | ||
30 | my $class = ref($this) || $this; | |
31 | ||
32 | my $self = bless { | |
33 | haenv => $haenv, | |
34 | status => { state => 'startup' }, | |
c4a221bc DM |
35 | workers => {}, |
36 | results => {}, | |
ea4443cc | 37 | restart_tries => {}, |
067cdf33 | 38 | shutdown_request => 0, |
116dea30 | 39 | shutdown_errors => 0, |
ef2c0f29 | 40 | # mode can be: active, reboot, shutdown, restart, maintenance |
9c7d068b | 41 | mode => 'active', |
3df15380 | 42 | cluster_state_update => 0, |
21051707 | 43 | active_idle_rounds => 0, |
5f095798 DM |
44 | }, $class; |
45 | ||
289e4784 | 46 | $self->set_local_status({ state => 'wait_for_agent_lock' }); |
9c7d068b | 47 | |
5f095798 DM |
48 | return $self; |
49 | } | |
50 | ||
51 | sub shutdown_request { | |
52 | my ($self) = @_; | |
53 | ||
f1be5b3a DM |
54 | return if $self->{shutdown_request}; # already in shutdown mode |
55 | ||
499f06e3 DM |
56 | my $haenv = $self->{haenv}; |
57 | ||
116dea30 DM |
58 | my $nodename = $haenv->nodename(); |
59 | ||
f65f41b9 | 60 | my ($shutdown, $reboot) = $haenv->is_node_shutdown(); |
499f06e3 | 61 | |
7c142d68 FE |
62 | my $dc_cfg = $haenv->get_datacenter_settings(); |
63 | my $shutdown_policy = $dc_cfg->{ha}->{shutdown_policy} // 'conditional'; | |
ba15a9b9 | 64 | |
7a20d688 TL |
65 | if ($shutdown) { # don't log this on service restart, only on node shutdown |
66 | $haenv->log('info', "got shutdown request with shutdown policy '$shutdown_policy'"); | |
67 | } | |
68 | ||
d2236278 | 69 | my $freeze_all; |
99278e06 | 70 | my $maintenance; |
ba15a9b9 TL |
71 | if ($shutdown_policy eq 'conditional') { |
72 | $freeze_all = $reboot; | |
73 | } elsif ($shutdown_policy eq 'freeze') { | |
74 | $freeze_all = 1; | |
75 | } elsif ($shutdown_policy eq 'failover') { | |
76 | $freeze_all = 0; | |
99278e06 TL |
77 | } elsif ($shutdown_policy eq 'migrate') { |
78 | $maintenance = 1; | |
ba15a9b9 | 79 | } else { |
d2236278 TL |
80 | $haenv->log('err', "unknown shutdown policy '$shutdown_policy', fall back to conditional"); |
81 | $freeze_all = $reboot; | |
ba15a9b9 TL |
82 | } |
83 | ||
99278e06 TL |
84 | if ($maintenance) { |
85 | # we get marked as unaivalable by the manager, then all services will | |
86 | # be migrated away, we'll still have the same "can we exit" clause than | |
87 | # a normal shutdown -> no running service on this node | |
88 | # FIXME: after X minutes, add shutdown command for remaining services, | |
89 | # e.g., if they have no alternative node??? | |
90 | } elsif ($shutdown) { | |
f65f41b9 TL |
91 | # *always* queue stop jobs for all services if the node shuts down, |
92 | # independent if it's a reboot or a poweroff, else we may corrupt | |
93 | # services or hinder node shutdown | |
116dea30 DM |
94 | my $ss = $self->{service_status}; |
95 | ||
96 | foreach my $sid (keys %$ss) { | |
97 | my $sd = $ss->{$sid}; | |
98 | next if !$sd->{node}; | |
99 | next if $sd->{node} ne $nodename; | |
c0edbd7e | 100 | # Note: use undef uid to mark shutdown/stop jobs |
116dea30 DM |
101 | $self->queue_resource_command($sid, undef, 'request_stop'); |
102 | } | |
f65f41b9 | 103 | } |
116dea30 | 104 | |
f65f41b9 | 105 | if ($shutdown) { |
41236dcf | 106 | my $shutdown_type = $reboot ? 'reboot' : 'shutdown'; |
f129138c TL |
107 | if ($self->is_maintenance_requested()) { |
108 | if ($maintenance) { | |
109 | $haenv->log( | |
110 | 'info', | |
111 | "$shutdown_type LRM, ignore maintenance policy, already in maintenance mode", | |
112 | ); | |
113 | } else { | |
114 | $haenv->log( | |
115 | 'info', | |
116 | "$shutdown_type LRM, ignore $shutdown_policy policy as manual maintenance mode is enabled", | |
117 | ); | |
118 | } | |
119 | } elsif ($maintenance) { | |
99278e06 TL |
120 | $haenv->log('info', "$shutdown_type LRM, doing maintenance, removing this node from active list"); |
121 | $self->{mode} = 'maintenance'; | |
122 | } elsif ($freeze_all) { | |
41236dcf | 123 | $haenv->log('info', "$shutdown_type LRM, stop and freeze all services"); |
f65f41b9 TL |
124 | $self->{mode} = 'restart'; |
125 | } else { | |
126 | $haenv->log('info', "shutdown LRM, stop all services"); | |
127 | $self->{mode} = 'shutdown'; | |
128 | } | |
f129138c TL |
129 | } elsif ($self->is_maintenance_requested()) { |
130 | $haenv->log(' | |
131 | info', "Restarting LRM in maintenance mode may be delayed until all services are moved"); | |
499f06e3 DM |
132 | } else { |
133 | $haenv->log('info', "restart LRM, freeze all services"); | |
134 | $self->{mode} = 'restart'; | |
135 | } | |
9c7d068b | 136 | |
99278e06 | 137 | $self->{shutdown_request} = $haenv->get_time(); |
9c7d068b | 138 | |
a19f2576 | 139 | eval { $self->update_lrm_status() or die "not quorate?\n"; }; |
9c7d068b | 140 | if (my $err = $@) { |
a31c6fe5 | 141 | $haenv->log('err', "unable to update lrm status file - $err"); |
9c7d068b | 142 | } |
5f095798 DM |
143 | } |
144 | ||
145 | sub get_local_status { | |
146 | my ($self) = @_; | |
147 | ||
148 | return $self->{status}; | |
149 | } | |
150 | ||
151 | sub set_local_status { | |
152 | my ($self, $new) = @_; | |
153 | ||
154 | die "invalid state '$new->{state}'" if !$valid_states->{$new->{state}}; | |
155 | ||
156 | my $haenv = $self->{haenv}; | |
157 | ||
158 | my $old = $self->{status}; | |
159 | ||
289e4784 | 160 | # important: only update if if really changed |
5f095798 DM |
161 | return if $old->{state} eq $new->{state}; |
162 | ||
0bba8f60 | 163 | $haenv->log('info', "status change $old->{state} => $new->{state}"); |
5f095798 DM |
164 | |
165 | $new->{state_change_time} = $haenv->get_time(); | |
166 | ||
167 | $self->{status} = $new; | |
168 | } | |
169 | ||
9c7d068b DM |
170 | sub update_lrm_status { |
171 | my ($self) = @_; | |
172 | ||
5bd7aa54 DM |
173 | my $haenv = $self->{haenv}; |
174 | ||
79829202 | 175 | return 0 if !$haenv->quorate(); |
289e4784 TL |
176 | |
177 | my $lrm_status = { | |
331a9f00 | 178 | state => $self->{status}->{state}, |
9c7d068b DM |
179 | mode => $self->{mode}, |
180 | results => $self->{results}, | |
aa330d1c | 181 | timestamp => $haenv->get_time(), |
9c7d068b | 182 | }; |
289e4784 | 183 | |
5bd7aa54 DM |
184 | eval { $haenv->write_lrm_status($lrm_status); }; |
185 | if (my $err = $@) { | |
186 | $haenv->log('err', "unable to write lrm status file - $err"); | |
187 | return 0; | |
188 | } | |
189 | ||
190 | return 1; | |
9c7d068b DM |
191 | } |
192 | ||
8e940b68 TL |
193 | sub update_service_status { |
194 | my ($self) = @_; | |
195 | ||
196 | my $haenv = $self->{haenv}; | |
197 | ||
198 | my $ms = eval { $haenv->read_manager_status(); }; | |
199 | if (my $err = $@) { | |
200 | $haenv->log('err', "updating service status from manager failed: $err"); | |
201 | return undef; | |
202 | } else { | |
203 | $self->{service_status} = $ms->{service_status} || {}; | |
30fc7cee TL |
204 | my $nodename = $haenv->nodename(); |
205 | $self->{node_status} = $ms->{node_status}->{$nodename} || 'unknown'; | |
989c4c49 TL |
206 | |
207 | # FIXME: method name is a bit confusing for doing this, either rename or move | |
208 | if (!$self->{shutdown_request}) { | |
209 | my $request = $ms->{node_request}->{$nodename} // {}; | |
210 | if ($request->{maintenance}) { | |
211 | $self->{mode} = 'maintenance'; | |
212 | } elsif ($self->{mode} eq 'maintenance') { | |
213 | $self->{mode} = 'active'; | |
214 | } | |
215 | } | |
216 | ||
8e940b68 TL |
217 | return 1; |
218 | } | |
219 | } | |
220 | ||
5f095798 DM |
221 | sub get_protected_ha_agent_lock { |
222 | my ($self) = @_; | |
223 | ||
224 | my $haenv = $self->{haenv}; | |
225 | ||
226 | my $count = 0; | |
227 | my $starttime = $haenv->get_time(); | |
228 | ||
229 | for (;;) { | |
289e4784 | 230 | |
5f095798 DM |
231 | if ($haenv->get_ha_agent_lock()) { |
232 | if ($self->{ha_agent_wd}) { | |
233 | $haenv->watchdog_update($self->{ha_agent_wd}); | |
234 | } else { | |
235 | my $wfh = $haenv->watchdog_open(); | |
236 | $self->{ha_agent_wd} = $wfh; | |
237 | } | |
238 | return 1; | |
239 | } | |
289e4784 | 240 | |
5f095798 DM |
241 | last if ++$count > 5; # try max 5 time |
242 | ||
243 | my $delay = $haenv->get_time() - $starttime; | |
244 | last if $delay > 5; # for max 5 seconds | |
245 | ||
246 | $haenv->sleep(1); | |
247 | } | |
289e4784 | 248 | |
5f095798 DM |
249 | return 0; |
250 | } | |
251 | ||
21051707 TL |
252 | # only cares if any service has the local node as their node, independent of which req.state it is |
253 | sub has_configured_service_on_local_node { | |
254 | my ($self) = @_; | |
255 | ||
256 | my $haenv = $self->{haenv}; | |
257 | my $nodename = $haenv->nodename(); | |
258 | ||
259 | my $ss = $self->{service_status}; | |
260 | foreach my $sid (keys %$ss) { | |
261 | my $sd = $ss->{$sid}; | |
262 | next if !$sd->{node} || $sd->{node} ne $nodename; | |
263 | ||
264 | return 1; | |
265 | } | |
266 | return 0; | |
267 | } | |
268 | ||
303490d8 TL |
269 | sub is_fence_requested { |
270 | my ($self) = @_; | |
271 | ||
272 | my $haenv = $self->{haenv}; | |
30fc7cee | 273 | |
303490d8 TL |
274 | my $nodename = $haenv->nodename(); |
275 | my $ss = $self->{service_status}; | |
276 | ||
277 | my $fenced_services = PVE::HA::Tools::count_fenced_services($ss, $nodename); | |
278 | ||
30fc7cee | 279 | return $fenced_services || $self->{node_status} eq 'fence'; |
303490d8 TL |
280 | } |
281 | ||
73faade5 TL |
282 | sub is_maintenance_requested { |
283 | my ($self) = @_; | |
284 | ||
285 | # shutdown maintenance or manual request | |
286 | return $self->{mode} eq 'maintenance'; | |
287 | } | |
288 | ||
546e2f1f DM |
289 | sub active_service_count { |
290 | my ($self) = @_; | |
289e4784 | 291 | |
546e2f1f | 292 | my $haenv = $self->{haenv}; |
546e2f1f DM |
293 | my $nodename = $haenv->nodename(); |
294 | ||
295 | my $ss = $self->{service_status}; | |
296 | ||
297 | my $count = 0; | |
546e2f1f DM |
298 | foreach my $sid (keys %$ss) { |
299 | my $sd = $ss->{$sid}; | |
300 | next if !$sd->{node}; | |
301 | next if $sd->{node} ne $nodename; | |
302 | my $req_state = $sd->{state}; | |
303 | next if !defined($req_state); | |
304 | next if $req_state eq 'stopped'; | |
d54c04bd | 305 | # NOTE: 'ignored' ones are already dropped by the manager from service_status |
9c7d068b | 306 | next if $req_state eq 'freeze'; |
38545741 TL |
307 | # erroneous services are not managed by HA, don't count them as active |
308 | next if $req_state eq 'error'; | |
4931b586 TL |
309 | # request_start is for (optional) better node selection for stop -> started transition |
310 | next if $req_state eq 'request_start'; | |
546e2f1f DM |
311 | |
312 | $count++; | |
313 | } | |
289e4784 | 314 | |
546e2f1f DM |
315 | return $count; |
316 | } | |
5bd7aa54 DM |
317 | |
318 | my $wrote_lrm_status_at_startup = 0; | |
319 | ||
5f095798 DM |
320 | sub do_one_iteration { |
321 | my ($self) = @_; | |
322 | ||
323 | my $haenv = $self->{haenv}; | |
324 | ||
da6f0416 TL |
325 | $haenv->loop_start_hook(); |
326 | ||
3df15380 TL |
327 | $self->{cluster_state_update} = $haenv->cluster_state_update(); |
328 | ||
da6f0416 TL |
329 | my $res = $self->work(); |
330 | ||
331 | $haenv->loop_end_hook(); | |
332 | ||
333 | return $res; | |
334 | } | |
335 | ||
abc1499b TL |
336 | # NOTE: this is disabling the self-fence mechanism, so it must NOT be called with active services |
337 | # It's normally *only* OK on graceful shutdown (with no services, or all services frozen) | |
338 | my sub give_up_watchdog_protection { | |
339 | my ($self) = @_; | |
340 | ||
341 | if ($self->{ha_agent_wd}) { | |
342 | $self->{haenv}->watchdog_close($self->{ha_agent_wd}); | |
343 | delete $self->{ha_agent_wd}; # only delete after close! | |
344 | } | |
345 | } | |
346 | ||
da6f0416 TL |
347 | sub work { |
348 | my ($self) = @_; | |
349 | ||
350 | my $haenv = $self->{haenv}; | |
351 | ||
c5ec095f | 352 | if (!$wrote_lrm_status_at_startup) { |
79829202 | 353 | if ($self->update_lrm_status()) { |
c5ec095f DM |
354 | $wrote_lrm_status_at_startup = 1; |
355 | } else { | |
356 | # do nothing | |
357 | $haenv->sleep(5); | |
358 | return $self->{shutdown_request} ? 0 : 1; | |
359 | } | |
5bd7aa54 | 360 | } |
289e4784 | 361 | |
5f095798 DM |
362 | my $status = $self->get_local_status(); |
363 | my $state = $status->{state}; | |
364 | ||
8e940b68 | 365 | $self->update_service_status(); |
067cdf33 | 366 | |
303490d8 | 367 | my $fence_request = $self->is_fence_requested(); |
289e4784 TL |
368 | |
369 | # do state changes first | |
5f095798 DM |
370 | |
371 | my $ctime = $haenv->get_time(); | |
372 | ||
b0bf08a9 | 373 | if ($state eq 'wait_for_agent_lock') { |
5f095798 | 374 | |
546e2f1f | 375 | my $service_count = $self->active_service_count(); |
5f095798 | 376 | |
067cdf33 | 377 | if (!$fence_request && $service_count && $haenv->quorate()) { |
0bba8f60 DM |
378 | if ($self->get_protected_ha_agent_lock()) { |
379 | $self->set_local_status({ state => 'active' }); | |
5f095798 DM |
380 | } |
381 | } | |
289e4784 | 382 | |
5f095798 DM |
383 | } elsif ($state eq 'lost_agent_lock') { |
384 | ||
067cdf33 | 385 | if (!$fence_request && $haenv->quorate()) { |
0bba8f60 DM |
386 | if ($self->get_protected_ha_agent_lock()) { |
387 | $self->set_local_status({ state => 'active' }); | |
5f095798 DM |
388 | } |
389 | } | |
390 | ||
0bba8f60 | 391 | } elsif ($state eq 'active') { |
5f095798 | 392 | |
289e4784 | 393 | if ($fence_request) { |
067cdf33 | 394 | $haenv->log('err', "node need to be fenced - releasing agent_lock\n"); |
289e4784 | 395 | $self->set_local_status({ state => 'lost_agent_lock'}); |
067cdf33 | 396 | } elsif (!$self->get_protected_ha_agent_lock()) { |
5f095798 | 397 | $self->set_local_status({ state => 'lost_agent_lock'}); |
73faade5 | 398 | } elsif ($self->is_maintenance_requested()) { |
99278e06 | 399 | $self->set_local_status({ state => 'maintenance'}); |
21051707 TL |
400 | } else { |
401 | if (!$self->has_configured_service_on_local_node() && !$self->run_workers()) { | |
402 | # no active service configured for this node and all (old) workers are done | |
403 | $self->{active_idle_rounds}++; | |
404 | if ($self->{active_idle_rounds} > $max_active_idle_rounds) { | |
405 | $haenv->log('info', "node had no service configured for $max_active_idle_rounds rounds, going idle.\n"); | |
406 | # safety: no active service & no running worker for quite some time -> OK | |
407 | $haenv->release_ha_agent_lock(); | |
408 | give_up_watchdog_protection($self); | |
409 | $self->set_local_status({ state => 'wait_for_agent_lock'}); | |
410 | $self->{active_idle_rounds} = 0; | |
411 | } | |
412 | } elsif ($self->{active_idle_rounds}) { | |
413 | $self->{active_idle_rounds} = 0; | |
414 | } | |
99278e06 TL |
415 | } |
416 | } elsif ($state eq 'maintenance') { | |
417 | ||
418 | if ($fence_request) { | |
419 | $haenv->log('err', "node need to be fenced during maintenance mode - releasing agent_lock\n"); | |
420 | $self->set_local_status({ state => 'lost_agent_lock'}); | |
279d91c2 TL |
421 | } elsif ($self->active_service_count() || $self->run_workers()) { |
422 | # keep the lock and watchdog as long as not all services cleared the node | |
423 | if (!$self->get_protected_ha_agent_lock()) { | |
424 | $self->set_local_status({ state => 'lost_agent_lock'}); | |
425 | } | |
989c4c49 TL |
426 | } elsif (!$self->is_maintenance_requested()) { |
427 | # empty && no maintenance mode && not exited -> need to switch active again | |
428 | if ($self->get_protected_ha_agent_lock()) { | |
429 | $self->set_local_status({ state => 'active' }); | |
430 | } else { | |
431 | $self->set_local_status({ state => 'lost_agent_lock'}); | |
432 | } | |
5f095798 DM |
433 | } |
434 | } | |
435 | ||
436 | $status = $self->get_local_status(); | |
437 | $state = $status->{state}; | |
438 | ||
439 | # do work | |
440 | ||
441 | if ($state eq 'wait_for_agent_lock') { | |
442 | ||
443 | return 0 if $self->{shutdown_request}; | |
289e4784 | 444 | |
79829202 | 445 | $self->update_lrm_status(); |
289e4784 | 446 | |
5f095798 | 447 | $haenv->sleep(5); |
289e4784 | 448 | |
0bba8f60 | 449 | } elsif ($state eq 'active') { |
5f095798 DM |
450 | |
451 | my $startime = $haenv->get_time(); | |
452 | ||
453 | my $max_time = 10; | |
454 | ||
455 | my $shutdown = 0; | |
456 | ||
457 | # do work (max_time seconds) | |
458 | eval { | |
459 | # fixme: set alert timer | |
460 | ||
8e940b68 TL |
461 | # if we could not get the current service status there's no point |
462 | # in doing anything, try again next round. | |
463 | return if !$self->update_service_status(); | |
464 | ||
5f095798 DM |
465 | if ($self->{shutdown_request}) { |
466 | ||
499f06e3 | 467 | if ($self->{mode} eq 'restart') { |
ad645699 FG |
468 | # catch exited workers to update service state |
469 | my $workers = $self->run_workers(); | |
499f06e3 | 470 | my $service_count = $self->active_service_count(); |
5f095798 | 471 | |
ad645699 FG |
472 | if ($service_count == 0 && $workers == 0) { |
473 | # safety: no active services or workers -> OK | |
474 | give_up_watchdog_protection($self); | |
475 | $shutdown = 1; | |
e23f674c | 476 | |
ad645699 FG |
477 | # restart with no or freezed services, release the lock |
478 | $haenv->release_ha_agent_lock(); | |
116dea30 DM |
479 | } |
480 | } else { | |
481 | ||
482 | if ($self->run_workers() == 0) { | |
483 | if ($self->{shutdown_errors} == 0) { | |
abc1499b TL |
484 | # safety: no active services and LRM shutdown -> OK |
485 | give_up_watchdog_protection($self); | |
0e5b1a43 TL |
486 | |
487 | # shutdown with all services stopped thus release the lock | |
488 | $haenv->release_ha_agent_lock(); | |
499f06e3 | 489 | } |
5f095798 | 490 | |
499f06e3 DM |
491 | $shutdown = 1; |
492 | } | |
5f095798 | 493 | } |
c4a221bc | 494 | } else { |
724bd3f3 TL |
495 | if (!$self->{cluster_state_update}) { |
496 | # update failed but we could still renew our lock (cfs restart?), | |
497 | # safely skip manage and expect to update just fine next round | |
498 | $haenv->log('notice', "temporary inconsistent cluster state " . | |
499 | "(cfs restart?), skip round"); | |
500 | return; | |
501 | } | |
c4a221bc DM |
502 | |
503 | $self->manage_resources(); | |
067cdf33 | 504 | |
5f095798 DM |
505 | } |
506 | }; | |
507 | if (my $err = $@) { | |
508 | $haenv->log('err', "got unexpected error - $err"); | |
509 | } | |
510 | ||
79829202 | 511 | $self->update_lrm_status(); |
289e4784 | 512 | |
5f095798 DM |
513 | return 0 if $shutdown; |
514 | ||
515 | $haenv->sleep_until($startime + $max_time); | |
516 | ||
517 | } elsif ($state eq 'lost_agent_lock') { | |
289e4784 | 518 | |
abc1499b | 519 | # NOTE: watchdog is active an will trigger soon! |
5f095798 | 520 | # so we hope to get the lock back soon! |
5f095798 DM |
521 | if ($self->{shutdown_request}) { |
522 | ||
546e2f1f | 523 | my $service_count = $self->active_service_count(); |
5f095798 | 524 | |
546e2f1f | 525 | if ($service_count > 0) { |
289e4784 | 526 | $haenv->log('err', "get shutdown request in state 'lost_agent_lock' - " . |
546e2f1f | 527 | "detected $service_count running services"); |
5f095798 | 528 | |
c5c7faf6 TL |
529 | if ($self->{mode} eq 'restart') { |
530 | my $state_mt = $self->{status}->{state_change_time}; | |
531 | ||
532 | # watchdog should have already triggered, so either it's set | |
533 | # set to noboot or it failed. As we are in restart mode, and | |
534 | # have infinity stoptimeout -> exit now - we don't touch services | |
535 | # or change state, so this is save, relatively speaking | |
536 | if (($haenv->get_time() - $state_mt) > 90) { | |
537 | $haenv->log('err', "lost agent lock and restart request for over 90 seconds - giving up!"); | |
538 | return 0; | |
539 | } | |
540 | } | |
546e2f1f | 541 | } else { |
abc1499b TL |
542 | # safety: all services are stopped, so we can close the watchdog |
543 | give_up_watchdog_protection($self); | |
289e4784 | 544 | |
546e2f1f | 545 | return 0; |
5f095798 | 546 | } |
5f095798 DM |
547 | } |
548 | ||
b0bf08a9 DM |
549 | $haenv->sleep(5); |
550 | ||
99278e06 TL |
551 | } elsif ($state eq 'maintenance') { |
552 | ||
553 | my $startime = $haenv->get_time(); | |
554 | return if !$self->update_service_status(); | |
555 | ||
556 | # wait until all active services moved away | |
557 | my $service_count = $self->active_service_count(); | |
558 | ||
559 | my $exit_lrm = 0; | |
560 | ||
279d91c2 TL |
561 | if ($service_count == 0 && $self->run_workers() == 0) { |
562 | # safety: going into maintenance and all active services got moved -> OK | |
563 | give_up_watchdog_protection($self); | |
99278e06 | 564 | |
279d91c2 | 565 | if ($self->{shutdown_request}) { |
99278e06 | 566 | $exit_lrm = 1; |
99278e06 | 567 | } |
279d91c2 TL |
568 | |
569 | # maintenance mode with no or only frozen services anymore, release the lock | |
570 | $haenv->release_ha_agent_lock(); | |
99278e06 TL |
571 | } |
572 | ||
573 | $self->manage_resources() if !$exit_lrm; | |
574 | ||
575 | $self->update_lrm_status(); | |
576 | ||
577 | return 0 if $exit_lrm; | |
578 | ||
579 | $haenv->sleep_until($startime + 5); | |
580 | ||
5f095798 DM |
581 | } else { |
582 | ||
583 | die "got unexpected status '$state'\n"; | |
584 | ||
585 | } | |
586 | ||
587 | return 1; | |
588 | } | |
589 | ||
116dea30 | 590 | sub run_workers { |
c4a221bc DM |
591 | my ($self) = @_; |
592 | ||
593 | my $haenv = $self->{haenv}; | |
594 | ||
f31b7e94 | 595 | my $starttime = $haenv->get_time(); |
c4a221bc | 596 | |
a28fa330 TL |
597 | # number of workers to start, if 0 we exec the command directly witouth forking |
598 | my $max_workers = $haenv->get_max_workers(); | |
6dbf93a0 | 599 | my $sc = $haenv->read_service_config(); |
f31b7e94 | 600 | |
65c1fbac TL |
601 | my $worker = $self->{workers}; |
602 | # we only got limited time but want to ensure that every queued worker is scheduled | |
603 | # eventually, so sort by the count a worker was seen here in this loop | |
604 | my $fair_sorter = sub { | |
605 | $worker->{$b}->{start_tries} <=> $worker->{$a}->{start_tries} || $a cmp $b | |
606 | }; | |
607 | ||
eef4f863 | 608 | while (($haenv->get_time() - $starttime) <= 8) { |
f613e426 | 609 | my $count = $self->check_active_workers(); |
c4a221bc | 610 | |
65c1fbac TL |
611 | for my $sid (sort $fair_sorter grep { !$worker->{$_}->{pid} } keys %$worker) { |
612 | my $w = $worker->{$sid}; | |
613 | # higher try-count means higher priority especially compared to newly queued jobs, so | |
614 | # count every try to avoid starvation | |
615 | $w->{start_tries}++; | |
4931b586 | 616 | # FIXME: should be last and ensure that check_active_workers is called sooner |
65c1fbac | 617 | next if $count >= $max_workers && $max_workers > 0; |
f613e426 TL |
618 | |
619 | # only fork if we may, else call exec_resource_agent directly (e.g. for tests) | |
620 | if ($max_workers > 0) { | |
621 | my $pid = fork(); | |
622 | if (!defined($pid)) { | |
623 | $haenv->log('err', "forking worker failed - $!"); | |
624 | $count = 0; last; # abort, try later | |
625 | } elsif ($pid == 0) { | |
626 | $haenv->after_fork(); # cleanup | |
627 | ||
628 | # do work | |
c4a221bc DM |
629 | my $res = -1; |
630 | eval { | |
3ac1ee6b | 631 | $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{params}); |
c4a221bc DM |
632 | }; |
633 | if (my $err = $@) { | |
f31b7e94 | 634 | $haenv->log('err', $err); |
f613e426 | 635 | POSIX::_exit(-1); |
116dea30 | 636 | } |
f613e426 TL |
637 | POSIX::_exit($res); |
638 | } else { | |
639 | $count++; | |
640 | $w->{pid} = $pid; | |
641 | } | |
642 | } else { | |
643 | my $res = -1; | |
644 | eval { | |
645 | $res = $self->exec_resource_agent($sid, $sc->{$sid}, $w->{state}, $w->{params}); | |
646 | $res = $res << 8 if $res > 0; | |
647 | }; | |
648 | if (my $err = $@) { | |
649 | $haenv->log('err', $err); | |
650 | } | |
651 | if (defined($w->{uid})) { | |
652 | $self->resource_command_finished($sid, $w->{uid}, $res); | |
653 | } else { | |
654 | $self->stop_command_finished($sid, $res); | |
c4a221bc DM |
655 | } |
656 | } | |
657 | } | |
658 | ||
659 | last if !$count; | |
660 | ||
f31b7e94 | 661 | $haenv->sleep(1); |
c4a221bc | 662 | } |
116dea30 DM |
663 | |
664 | return scalar(keys %{$self->{workers}}); | |
665 | } | |
666 | ||
667 | sub manage_resources { | |
668 | my ($self) = @_; | |
669 | ||
670 | my $haenv = $self->{haenv}; | |
671 | ||
672 | my $nodename = $haenv->nodename(); | |
673 | ||
674 | my $ss = $self->{service_status}; | |
675 | ||
5a28da91 TL |
676 | foreach my $sid (keys %{$self->{restart_tries}}) { |
677 | delete $self->{restart_tries}->{$sid} if !$ss->{$sid}; | |
678 | } | |
679 | ||
116dea30 DM |
680 | foreach my $sid (keys %$ss) { |
681 | my $sd = $ss->{$sid}; | |
b538340c | 682 | next if !$sd->{node} || !$sd->{uid}; |
116dea30 | 683 | next if $sd->{node} ne $nodename; |
b538340c TL |
684 | my $request_state = $sd->{state}; |
685 | next if !defined($request_state); | |
90a24755 TL |
686 | # can only happen for restricted groups where the failed node itself needs to be the |
687 | # reocvery target. Always let the master first do so, it will then marked as 'stopped' and | |
688 | # we can just continue normally. But we must NOT do anything with it while still in recovery | |
b538340c TL |
689 | next if $request_state eq 'recovery'; |
690 | next if $request_state eq 'freeze'; | |
4931b586 TL |
691 | # intermediate step for optional better node selection on stop -> start request state change |
692 | next if $request_state eq 'request_start'; | |
90a24755 | 693 | |
b538340c | 694 | $self->queue_resource_command($sid, $sd->{uid}, $request_state, { |
90a24755 TL |
695 | 'target' => $sd->{target}, |
696 | 'timeout' => $sd->{timeout}, | |
697 | }); | |
116dea30 DM |
698 | } |
699 | ||
700 | return $self->run_workers(); | |
c4a221bc DM |
701 | } |
702 | ||
c4a221bc | 703 | sub queue_resource_command { |
3ac1ee6b | 704 | my ($self, $sid, $uid, $state, $params) = @_; |
c4a221bc | 705 | |
b538340c TL |
706 | # do not queue the exact same command twice as this may lead to an inconsistent HA state when |
707 | # the first command fails but the CRM does not process its failure right away and the LRM starts | |
708 | # a second try, without the CRM knowing of it (race condition) The 'stopped' command is an | |
709 | # exception as we do not process its result in the CRM and we want to execute it always (even | |
710 | # with no active CRM) | |
35cbb764 TL |
711 | return if $state ne 'stopped' && $uid && defined($self->{results}->{$uid}); |
712 | ||
c4a221bc DM |
713 | if (my $w = $self->{workers}->{$sid}) { |
714 | return if $w->{pid}; # already started | |
715 | # else, delete and overwrite queue entry with new command | |
716 | delete $self->{workers}->{$sid}; | |
717 | } | |
718 | ||
719 | $self->{workers}->{$sid} = { | |
720 | sid => $sid, | |
721 | uid => $uid, | |
722 | state => $state, | |
65c1fbac | 723 | start_tries => 0, |
c4a221bc | 724 | }; |
e88469ba | 725 | |
3ac1ee6b | 726 | $self->{workers}->{$sid}->{params} = $params if $params; |
c4a221bc DM |
727 | } |
728 | ||
729 | sub check_active_workers { | |
730 | my ($self) = @_; | |
731 | ||
732 | # finish/count workers | |
733 | my $count = 0; | |
734 | foreach my $sid (keys %{$self->{workers}}) { | |
735 | my $w = $self->{workers}->{$sid}; | |
b538340c TL |
736 | my $pid = $w->{pid} || next; |
737 | ||
738 | my $waitpid = waitpid($pid, WNOHANG); # check status | |
739 | if (defined($waitpid) && ($waitpid == $pid)) { | |
740 | if (defined($w->{uid})) { | |
741 | $self->resource_command_finished($sid, $w->{uid}, $?); | |
c4a221bc | 742 | } else { |
b538340c | 743 | $self->stop_command_finished($sid, $?); |
c4a221bc | 744 | } |
b538340c TL |
745 | } else { |
746 | $count++; # still active | |
c4a221bc DM |
747 | } |
748 | } | |
289e4784 | 749 | |
c4a221bc DM |
750 | return $count; |
751 | } | |
752 | ||
116dea30 DM |
753 | sub stop_command_finished { |
754 | my ($self, $sid, $status) = @_; | |
755 | ||
756 | my $haenv = $self->{haenv}; | |
757 | ||
758 | my $w = delete $self->{workers}->{$sid}; | |
759 | return if !$w; # should not happen | |
760 | ||
761 | my $exit_code = -1; | |
762 | ||
763 | if ($status == -1) { | |
764 | $haenv->log('err', "resource agent $sid finished - failed to execute"); | |
765 | } elsif (my $sig = ($status & 127)) { | |
766 | $haenv->log('err', "resource agent $sid finished - got signal $sig"); | |
767 | } else { | |
768 | $exit_code = ($status >> 8); | |
769 | } | |
770 | ||
771 | if ($exit_code != 0) { | |
772 | $self->{shutdown_errors}++; | |
773 | } | |
774 | } | |
775 | ||
c4a221bc DM |
776 | sub resource_command_finished { |
777 | my ($self, $sid, $uid, $status) = @_; | |
778 | ||
779 | my $haenv = $self->{haenv}; | |
780 | ||
781 | my $w = delete $self->{workers}->{$sid}; | |
782 | return if !$w; # should not happen | |
783 | ||
784 | my $exit_code = -1; | |
785 | ||
786 | if ($status == -1) { | |
289e4784 | 787 | $haenv->log('err', "resource agent $sid finished - failed to execute"); |
c4a221bc | 788 | } elsif (my $sig = ($status & 127)) { |
0f70400d | 789 | $haenv->log('err', "resource agent $sid finished - got signal $sig"); |
c4a221bc DM |
790 | } else { |
791 | $exit_code = ($status >> 8); | |
c4a221bc DM |
792 | } |
793 | ||
ea4443cc TL |
794 | $exit_code = $self->handle_service_exitcode($sid, $w->{state}, $exit_code); |
795 | ||
280ee5d5 DM |
796 | return if $exit_code == ETRY_AGAIN; # tell nobody, simply retry |
797 | ||
c4a221bc DM |
798 | $self->{results}->{$uid} = { |
799 | sid => $w->{sid}, | |
800 | state => $w->{state}, | |
801 | exit_code => $exit_code, | |
802 | }; | |
803 | ||
804 | my $ss = $self->{service_status}; | |
805 | ||
806 | # compute hash of valid/existing uids | |
807 | my $valid_uids = {}; | |
808 | foreach my $sid (keys %$ss) { | |
809 | my $sd = $ss->{$sid}; | |
810 | next if !$sd->{uid}; | |
811 | $valid_uids->{$sd->{uid}} = 1; | |
812 | } | |
813 | ||
814 | my $results = {}; | |
815 | foreach my $id (keys %{$self->{results}}) { | |
816 | next if !$valid_uids->{$id}; | |
817 | $results->{$id} = $self->{results}->{$id}; | |
818 | } | |
819 | $self->{results} = $results; | |
c4a221bc DM |
820 | } |
821 | ||
ea4443cc TL |
822 | # processes the exit code from a finished resource agent, so that the CRM knows |
823 | # if the LRM wants to retry an action based on the current recovery policies for | |
824 | # the failed service, or the CRM itself must try to recover from the failure. | |
825 | sub handle_service_exitcode { | |
826 | my ($self, $sid, $cmd, $exit_code) = @_; | |
827 | ||
828 | my $haenv = $self->{haenv}; | |
829 | my $tries = $self->{restart_tries}; | |
830 | ||
831 | my $sc = $haenv->read_service_config(); | |
aaabde6a DM |
832 | |
833 | my $max_restart = 0; | |
834 | ||
835 | if (my $cd = $sc->{$sid}) { | |
836 | $max_restart = $cd->{max_restart}; | |
837 | } | |
ea4443cc TL |
838 | |
839 | if ($cmd eq 'started') { | |
840 | ||
a89ff919 | 841 | if ($exit_code == SUCCESS) { |
ea4443cc TL |
842 | |
843 | $tries->{$sid} = 0; | |
844 | ||
845 | return $exit_code; | |
846 | ||
a89ff919 | 847 | } elsif ($exit_code == ERROR) { |
ea4443cc TL |
848 | |
849 | $tries->{$sid} = 0 if !defined($tries->{$sid}); | |
850 | ||
aaabde6a | 851 | if ($tries->{$sid} >= $max_restart) { |
ea4443cc TL |
852 | $haenv->log('err', "unable to start service $sid on local node". |
853 | " after $tries->{$sid} retries"); | |
854 | $tries->{$sid} = 0; | |
a89ff919 | 855 | return ERROR; |
ea4443cc TL |
856 | } |
857 | ||
e9e1cd68 TL |
858 | $tries->{$sid}++; |
859 | ||
860 | $haenv->log('warning', "restart policy: retry number $tries->{$sid}" . | |
861 | " for service '$sid'"); | |
a89ff919 TL |
862 | # tell CRM that we retry the start |
863 | return ETRY_AGAIN; | |
ea4443cc TL |
864 | } |
865 | } | |
866 | ||
867 | return $exit_code; | |
868 | ||
869 | } | |
870 | ||
2a045f55 | 871 | sub exec_resource_agent { |
3ac1ee6b | 872 | my ($self, $sid, $service_config, $cmd, $params) = @_; |
2a045f55 TL |
873 | |
874 | # setup execution environment | |
875 | ||
876 | $ENV{'PATH'} = '/sbin:/bin:/usr/sbin:/usr/bin'; | |
877 | ||
2a045f55 TL |
878 | my $haenv = $self->{haenv}; |
879 | ||
880 | my $nodename = $haenv->nodename(); | |
881 | ||
0087839a | 882 | my (undef, $service_type, $service_name) = $haenv->parse_sid($sid); |
2a045f55 TL |
883 | |
884 | my $plugin = PVE::HA::Resources->lookup($service_type); | |
885 | if (!$plugin) { | |
886 | $haenv->log('err', "service type '$service_type' not implemented"); | |
887 | return EUNKNOWN_SERVICE_TYPE; | |
888 | } | |
889 | ||
aaabde6a DM |
890 | if (!$service_config) { |
891 | $haenv->log('err', "missing resource configuration for '$sid'"); | |
892 | return EUNKNOWN_SERVICE; | |
893 | } | |
894 | ||
d338a56f TL |
895 | # process error state early |
896 | if ($cmd eq 'error') { | |
d338a56f TL |
897 | $haenv->log('err', "service $sid is in an error state and needs manual " . |
898 | "intervention. Look up 'ERROR RECOVERY' in the documentation."); | |
899 | ||
900 | return SUCCESS; # error always succeeds | |
901 | } | |
902 | ||
2a045f55 TL |
903 | if ($service_config->{node} ne $nodename) { |
904 | $haenv->log('err', "service '$sid' not on this node"); | |
905 | return EWRONG_NODE; | |
906 | } | |
907 | ||
908 | my $id = $service_name; | |
909 | ||
910 | my $running = $plugin->check_running($haenv, $id); | |
911 | ||
912 | if ($cmd eq 'started') { | |
913 | ||
914 | return SUCCESS if $running; | |
915 | ||
916 | $haenv->log("info", "starting service $sid"); | |
917 | ||
918 | $plugin->start($haenv, $id); | |
919 | ||
920 | $running = $plugin->check_running($haenv, $id); | |
921 | ||
922 | if ($running) { | |
923 | $haenv->log("info", "service status $sid started"); | |
924 | return SUCCESS; | |
925 | } else { | |
926 | $haenv->log("warning", "unable to start service $sid"); | |
927 | return ERROR; | |
928 | } | |
929 | ||
930 | } elsif ($cmd eq 'request_stop' || $cmd eq 'stopped') { | |
931 | ||
932 | return SUCCESS if !$running; | |
933 | ||
e4ef317d FE |
934 | if (defined($params->{timeout})) { |
935 | $haenv->log("info", "stopping service $sid (timeout=$params->{timeout})"); | |
936 | } else { | |
937 | $haenv->log("info", "stopping service $sid"); | |
938 | } | |
2a045f55 | 939 | |
e4ef317d | 940 | $plugin->shutdown($haenv, $id, $params->{timeout}); |
2a045f55 TL |
941 | |
942 | $running = $plugin->check_running($haenv, $id); | |
943 | ||
944 | if (!$running) { | |
945 | $haenv->log("info", "service status $sid stopped"); | |
946 | return SUCCESS; | |
947 | } else { | |
948 | $haenv->log("info", "unable to stop stop service $sid (still running)"); | |
949 | return ERROR; | |
950 | } | |
951 | ||
314ef257 | 952 | } elsif ($cmd eq 'migrate' || $cmd eq 'relocate' || $cmd eq 'request_start_balance') { |
2a045f55 | 953 | |
3ac1ee6b | 954 | my $target = $params->{target}; |
2a045f55 TL |
955 | if (!defined($target)) { |
956 | die "$cmd '$sid' failed - missing target\n" if !defined($target); | |
957 | return EINVALID_PARAMETER; | |
958 | } | |
959 | ||
960 | if ($service_config->{node} eq $target) { | |
961 | # already there | |
962 | return SUCCESS; | |
963 | } | |
964 | ||
5a9c3a28 FE |
965 | if ($cmd eq 'request_start_balance' && $running) { |
966 | $haenv->log("info", "ignoring rebalance-on-start for service $sid - already running"); | |
967 | return IGNORED; | |
968 | } | |
969 | ||
2a045f55 TL |
970 | my $online = ($cmd eq 'migrate') ? 1 : 0; |
971 | ||
ea28f873 | 972 | my $res = $plugin->migrate($haenv, $id, $target, $online); |
2a045f55 TL |
973 | |
974 | # something went wrong if service is still on this node | |
ea28f873 | 975 | if (!$res) { |
2a045f55 TL |
976 | $haenv->log("err", "service $sid not moved (migration error)"); |
977 | return ERROR; | |
978 | } | |
979 | ||
980 | return SUCCESS; | |
981 | ||
2a045f55 TL |
982 | } |
983 | ||
984 | $haenv->log("err", "implement me (cmd '$cmd')"); | |
985 | return EUNKNOWN_COMMAND; | |
986 | } | |
987 | ||
988 | ||
5f095798 | 989 | 1; |