]>
Commit | Line | Data |
---|---|---|
1 | package PVE::HA::Env::PVE2; | |
2 | ||
3 | use strict; | |
4 | use warnings; | |
5 | use POSIX qw(:errno_h :fcntl_h); | |
6 | use IO::File; | |
7 | use IO::Socket::UNIX; | |
8 | ||
9 | use PVE::SafeSyslog; | |
10 | use PVE::Tools; | |
11 | use PVE::Cluster qw(cfs_register_file cfs_read_file cfs_write_file cfs_lock_file); | |
12 | use PVE::INotify; | |
13 | use PVE::RPCEnvironment; | |
14 | ||
15 | use PVE::HA::Tools ':exit_codes'; | |
16 | use PVE::HA::Env; | |
17 | use PVE::HA::Config; | |
18 | use PVE::HA::Resources; | |
19 | use PVE::HA::Resources::PVEVM; | |
20 | use PVE::HA::Resources::PVECT; | |
21 | ||
22 | PVE::HA::Resources::PVEVM->register(); | |
23 | PVE::HA::Resources::PVECT->register(); | |
24 | ||
25 | PVE::HA::Resources->init(); | |
26 | ||
27 | my $lockdir = "/etc/pve/priv/lock"; | |
28 | ||
29 | sub new { | |
30 | my ($this, $nodename) = @_; | |
31 | ||
32 | die "missing nodename" if !$nodename; | |
33 | ||
34 | my $class = ref($this) || $this; | |
35 | ||
36 | my $self = bless {}, $class; | |
37 | ||
38 | $self->{nodename} = $nodename; | |
39 | ||
40 | return $self; | |
41 | } | |
42 | ||
43 | sub nodename { | |
44 | my ($self) = @_; | |
45 | ||
46 | return $self->{nodename}; | |
47 | } | |
48 | ||
49 | sub hardware { | |
50 | my ($self) = @_; | |
51 | ||
52 | die "hardware is for testing and simulation only"; | |
53 | } | |
54 | ||
55 | sub read_manager_status { | |
56 | my ($self) = @_; | |
57 | ||
58 | return PVE::HA::Config::read_manager_status(); | |
59 | } | |
60 | ||
61 | sub write_manager_status { | |
62 | my ($self, $status_obj) = @_; | |
63 | ||
64 | PVE::HA::Config::write_manager_status($status_obj); | |
65 | } | |
66 | ||
67 | sub read_lrm_status { | |
68 | my ($self, $node) = @_; | |
69 | ||
70 | $node = $self->{nodename} if !defined($node); | |
71 | ||
72 | return PVE::HA::Config::read_lrm_status($node); | |
73 | } | |
74 | ||
75 | sub write_lrm_status { | |
76 | my ($self, $status_obj) = @_; | |
77 | ||
78 | my $node = $self->{nodename}; | |
79 | ||
80 | PVE::HA::Config::write_lrm_status($node, $status_obj); | |
81 | } | |
82 | ||
83 | sub is_node_shutdown { | |
84 | my ($self) = @_; | |
85 | ||
86 | my $shutdown = 0; | |
87 | ||
88 | my $code = sub { | |
89 | my $line = shift; | |
90 | ||
91 | $shutdown = 1 if ($line =~ m/shutdown\.target/); | |
92 | }; | |
93 | ||
94 | my $cmd = ['/bin/systemctl', 'list-jobs']; | |
95 | eval { PVE::Tools::run_command($cmd, outfunc => $code, noerr => 1); }; | |
96 | ||
97 | return $shutdown; | |
98 | } | |
99 | ||
100 | sub queue_crm_commands { | |
101 | my ($self, $cmd) = @_; | |
102 | ||
103 | return PVE::HA::Config::queue_crm_commands($cmd); | |
104 | } | |
105 | ||
106 | sub read_crm_commands { | |
107 | my ($self) = @_; | |
108 | ||
109 | return PVE::HA::Config::read_crm_commands(); | |
110 | } | |
111 | ||
112 | sub read_service_config { | |
113 | my ($self) = @_; | |
114 | ||
115 | my $res = PVE::HA::Config::read_resources_config(); | |
116 | ||
117 | my $vmlist = PVE::Cluster::get_vmlist(); | |
118 | my $conf = {}; | |
119 | ||
120 | foreach my $sid (keys %{$res->{ids}}) { | |
121 | my $d = $res->{ids}->{$sid}; | |
122 | my (undef, undef, $name) = PVE::HA::Tools::parse_sid($sid); | |
123 | $d->{state} = 'enabled' if !defined($d->{state}); | |
124 | $d->{max_restart} = 1 if !defined($d->{max_restart}); | |
125 | $d->{max_relocate} = 1 if !defined($d->{max_relocate}); | |
126 | if (PVE::HA::Resources->lookup($d->{type})) { | |
127 | if (my $vmd = $vmlist->{ids}->{$name}) { | |
128 | if (!$vmd) { | |
129 | warn "no such VM '$name'\n"; | |
130 | } else { | |
131 | $d->{node} = $vmd->{node}; | |
132 | $conf->{$sid} = $d; | |
133 | } | |
134 | } else { | |
135 | if (defined($d->{node})) { | |
136 | $conf->{$sid} = $d; | |
137 | } else { | |
138 | warn "service '$sid' without node\n"; | |
139 | } | |
140 | } | |
141 | } | |
142 | } | |
143 | ||
144 | return $conf; | |
145 | } | |
146 | ||
147 | sub change_service_location { | |
148 | my ($self, $sid, $current_node, $new_node) = @_; | |
149 | ||
150 | my (undef, $type, $name) = PVE::HA::Tools::parse_sid($sid); | |
151 | ||
152 | if(my $plugin = PVE::HA::Resources->lookup($type)) { | |
153 | my $old = $plugin->config_file($name, $current_node); | |
154 | my $new = $plugin->config_file($name, $new_node); | |
155 | rename($old, $new) || | |
156 | die "rename '$old' to '$new' failed - $!\n"; | |
157 | } else { | |
158 | die "implement me"; | |
159 | } | |
160 | } | |
161 | ||
162 | sub read_group_config { | |
163 | my ($self) = @_; | |
164 | ||
165 | return PVE::HA::Config::read_group_config(); | |
166 | } | |
167 | ||
168 | # this should return a hash containing info | |
169 | # what nodes are members and online. | |
170 | sub get_node_info { | |
171 | my ($self) = @_; | |
172 | ||
173 | my ($node_info, $quorate) = ({}, 0); | |
174 | ||
175 | my $nodename = $self->{nodename}; | |
176 | ||
177 | $quorate = PVE::Cluster::check_cfs_quorum(1) || 0; | |
178 | ||
179 | my $members = PVE::Cluster::get_members(); | |
180 | ||
181 | foreach my $node (keys %$members) { | |
182 | my $d = $members->{$node}; | |
183 | $node_info->{$node}->{online} = $d->{online}; | |
184 | } | |
185 | ||
186 | $node_info->{$nodename}->{online} = 1; # local node is always up | |
187 | ||
188 | return ($node_info, $quorate); | |
189 | } | |
190 | ||
191 | sub log { | |
192 | my ($self, $level, $msg) = @_; | |
193 | ||
194 | chomp $msg; | |
195 | ||
196 | syslog($level, $msg); | |
197 | } | |
198 | ||
199 | my $last_lock_status = {}; | |
200 | ||
201 | sub get_pve_lock { | |
202 | my ($self, $lockid) = @_; | |
203 | ||
204 | my $got_lock = 0; | |
205 | ||
206 | my $filename = "$lockdir/$lockid"; | |
207 | ||
208 | my $last = $last_lock_status->{$lockid} || 0; | |
209 | ||
210 | my $ctime = time(); | |
211 | ||
212 | my $retry = 0; | |
213 | my $retry_timeout = 100; # fixme: what timeout | |
214 | ||
215 | eval { | |
216 | ||
217 | mkdir $lockdir; | |
218 | ||
219 | # pve cluster filesystem not online | |
220 | die "can't create '$lockdir' (pmxcfs not mounted?)\n" if ! -d $lockdir; | |
221 | ||
222 | if ($last && (($ctime - $last) < $retry_timeout)) { | |
223 | # send cfs lock update request (utime) | |
224 | if (!utime(0, $ctime, $filename)) { | |
225 | $retry = 1; | |
226 | die "cfs lock update failed - $!\n"; | |
227 | } | |
228 | } else { | |
229 | ||
230 | # fixme: wait some time? | |
231 | if (!(mkdir $filename)) { | |
232 | utime 0, 0, $filename; # cfs unlock request | |
233 | die "can't get cfs lock\n"; | |
234 | } | |
235 | } | |
236 | ||
237 | $got_lock = 1; | |
238 | }; | |
239 | ||
240 | my $err = $@; | |
241 | ||
242 | if ($retry) { | |
243 | # $self->log('err', $err) if $err; # for debugging | |
244 | return 0; | |
245 | } | |
246 | ||
247 | $last_lock_status->{$lockid} = $got_lock ? $ctime : 0; | |
248 | ||
249 | if (!!$got_lock != !!$last) { | |
250 | if ($got_lock) { | |
251 | $self->log('info', "successfully acquired lock '$lockid'"); | |
252 | } else { | |
253 | my $msg = "lost lock '$lockid"; | |
254 | $msg .= " - $err" if $err; | |
255 | $self->log('err', $msg); | |
256 | } | |
257 | } else { | |
258 | # $self->log('err', $err) if $err; # for debugging | |
259 | } | |
260 | ||
261 | return $got_lock; | |
262 | } | |
263 | ||
264 | sub get_ha_manager_lock { | |
265 | my ($self) = @_; | |
266 | ||
267 | return $self->get_pve_lock("ha_manager_lock"); | |
268 | } | |
269 | ||
270 | # release the cluster wide manager lock. | |
271 | # when released another CRM may step up and get the lock, thus this should only | |
272 | # get called when shutting down/deactivating the current master | |
273 | sub release_ha_manager_lock { | |
274 | my ($self) = @_; | |
275 | ||
276 | return rmdir("$lockdir/ha_manager_lock"); | |
277 | } | |
278 | ||
279 | sub get_ha_agent_lock { | |
280 | my ($self, $node) = @_; | |
281 | ||
282 | $node = $self->nodename() if !defined($node); | |
283 | ||
284 | return $self->get_pve_lock("ha_agent_${node}_lock"); | |
285 | } | |
286 | ||
287 | # release the respective node agent lock. | |
288 | # this should only get called if the nodes LRM gracefully shuts down with | |
289 | # all services already cleanly stopped! | |
290 | sub release_ha_agent_lock { | |
291 | my ($self) = @_; | |
292 | ||
293 | my $node = $self->nodename(); | |
294 | ||
295 | return rmdir("$lockdir/ha_agent_${node}_lock"); | |
296 | } | |
297 | ||
298 | sub quorate { | |
299 | my ($self) = @_; | |
300 | ||
301 | my $quorate = 0; | |
302 | eval { | |
303 | $quorate = PVE::Cluster::check_cfs_quorum(); | |
304 | }; | |
305 | ||
306 | return $quorate; | |
307 | } | |
308 | ||
309 | sub get_time { | |
310 | my ($self) = @_; | |
311 | ||
312 | return time(); | |
313 | } | |
314 | ||
315 | sub sleep { | |
316 | my ($self, $delay) = @_; | |
317 | ||
318 | CORE::sleep($delay); | |
319 | } | |
320 | ||
321 | sub sleep_until { | |
322 | my ($self, $end_time) = @_; | |
323 | ||
324 | for (;;) { | |
325 | my $cur_time = time(); | |
326 | ||
327 | last if $cur_time >= $end_time; | |
328 | ||
329 | $self->sleep(1); | |
330 | } | |
331 | } | |
332 | ||
333 | sub loop_start_hook { | |
334 | my ($self) = @_; | |
335 | ||
336 | PVE::Cluster::cfs_update(); | |
337 | ||
338 | $self->{loop_start} = $self->get_time(); | |
339 | } | |
340 | ||
341 | sub loop_end_hook { | |
342 | my ($self) = @_; | |
343 | ||
344 | my $delay = $self->get_time() - $self->{loop_start}; | |
345 | ||
346 | warn "loop take too long ($delay seconds)\n" if $delay > 30; | |
347 | } | |
348 | ||
349 | my $watchdog_fh; | |
350 | ||
351 | sub watchdog_open { | |
352 | my ($self) = @_; | |
353 | ||
354 | die "watchdog already open\n" if defined($watchdog_fh); | |
355 | ||
356 | $watchdog_fh = IO::Socket::UNIX->new( | |
357 | Type => SOCK_STREAM(), | |
358 | Peer => "/run/watchdog-mux.sock") || | |
359 | die "unable to open watchdog socket - $!\n"; | |
360 | ||
361 | $self->log('info', "watchdog active"); | |
362 | } | |
363 | ||
364 | sub watchdog_update { | |
365 | my ($self, $wfh) = @_; | |
366 | ||
367 | my $res = $watchdog_fh->syswrite("\0", 1); | |
368 | if (!defined($res)) { | |
369 | $self->log('err', "watchdog update failed - $!\n"); | |
370 | return 0; | |
371 | } | |
372 | if ($res != 1) { | |
373 | $self->log('err', "watchdog update failed - write $res bytes\n"); | |
374 | return 0; | |
375 | } | |
376 | ||
377 | return 1; | |
378 | } | |
379 | ||
380 | sub watchdog_close { | |
381 | my ($self, $wfh) = @_; | |
382 | ||
383 | $watchdog_fh->syswrite("V", 1); # magic watchdog close | |
384 | if (!$watchdog_fh->close()) { | |
385 | $self->log('err', "watchdog close failed - $!"); | |
386 | } else { | |
387 | $watchdog_fh = undef; | |
388 | $self->log('info', "watchdog closed (disabled)"); | |
389 | } | |
390 | } | |
391 | ||
392 | sub after_fork { | |
393 | my ($self) = @_; | |
394 | ||
395 | # close inherited inotify FD from parent and reopen our own | |
396 | PVE::INotify::inotify_close(); | |
397 | PVE::INotify::inotify_init(); | |
398 | ||
399 | PVE::Cluster::cfs_update(); | |
400 | } | |
401 | ||
402 | sub get_max_workers { | |
403 | my ($self) = @_; | |
404 | ||
405 | my $datacenterconfig = cfs_read_file('datacenter.cfg'); | |
406 | ||
407 | return $datacenterconfig->{max_workers} || 4; | |
408 | } | |
409 | ||
410 | 1; |