]> git.proxmox.com Git - pve-ha-manager.git/blame - src/PVE/HA/Env/PVE2.pm
bump version to 1.0-26
[pve-ha-manager.git] / src / PVE / HA / Env / PVE2.pm
CommitLineData
714a4016
DM
1package PVE::HA::Env::PVE2;
2
3use strict;
4use warnings;
76737af5
DM
5use POSIX qw(:errno_h :fcntl_h);
6use IO::File;
115805fd 7use IO::Socket::UNIX;
714a4016
DM
8
9use PVE::SafeSyslog;
10use PVE::Tools;
119656b9 11use PVE::Cluster qw(cfs_register_file cfs_read_file cfs_write_file cfs_lock_file);
022e4e79
DM
12use PVE::INotify;
13use PVE::RPCEnvironment;
714a4016 14
a89ff919 15use PVE::HA::Tools ':exit_codes';
714a4016 16use PVE::HA::Env;
ce216792 17use PVE::HA::Config;
9e5ea8f7
DM
18use PVE::HA::Resources;
19use PVE::HA::Resources::PVEVM;
20use PVE::HA::Resources::PVECT;
714a4016 21
9e5ea8f7
DM
22PVE::HA::Resources::PVEVM->register();
23PVE::HA::Resources::PVECT->register();
24
25PVE::HA::Resources->init();
022e4e79 26
007fcc8b
DM
27my $lockdir = "/etc/pve/priv/lock";
28
714a4016
DM
29sub new {
30 my ($this, $nodename) = @_;
31
32 die "missing nodename" if !$nodename;
33
34 my $class = ref($this) || $this;
35
36 my $self = bless {}, $class;
37
38 $self->{nodename} = $nodename;
39
40 return $self;
41}
42
43sub nodename {
44 my ($self) = @_;
45
46 return $self->{nodename};
47}
48
dd9c0c9d
TL
49sub hardware {
50 my ($self) = @_;
51
52 die "hardware is for testing and simulation only";
53}
54
714a4016
DM
55sub read_manager_status {
56 my ($self) = @_;
714a4016 57
139a9b90 58 return PVE::HA::Config::read_manager_status();
714a4016
DM
59}
60
61sub write_manager_status {
62 my ($self, $status_obj) = @_;
63f6a08c 63
139a9b90 64 PVE::HA::Config::write_manager_status($status_obj);
714a4016
DM
65}
66
c4a221bc
DM
67sub read_lrm_status {
68 my ($self, $node) = @_;
69
70 $node = $self->{nodename} if !defined($node);
71
139a9b90 72 return PVE::HA::Config::read_lrm_status($node);
c4a221bc
DM
73}
74
75sub write_lrm_status {
76 my ($self, $status_obj) = @_;
77
6cbcb5f7 78 my $node = $self->{nodename};
63f6a08c 79
139a9b90
DM
80 PVE::HA::Config::write_lrm_status($node, $status_obj);
81}
c4a221bc 82
cde77779 83sub is_node_shutdown {
d42219a3
TL
84 my ($self) = @_;
85
cde77779 86 my $shutdown = 0;
d42219a3
TL
87
88 my $code = sub {
89 my $line = shift;
90
cde77779 91 $shutdown = 1 if ($line =~ m/shutdown\.target/);
d42219a3
TL
92 };
93
94 my $cmd = ['/bin/systemctl', 'list-jobs'];
95 eval { PVE::Tools::run_command($cmd, outfunc => $code, noerr => 1); };
96
cde77779 97 return $shutdown;
d42219a3
TL
98}
99
139a9b90
DM
100sub queue_crm_commands {
101 my ($self, $cmd) = @_;
c4a221bc 102
139a9b90
DM
103 return PVE::HA::Config::queue_crm_commands($cmd);
104}
105
106sub read_crm_commands {
107 my ($self) = @_;
108
109 return PVE::HA::Config::read_crm_commands();
c4a221bc
DM
110}
111
b83b4ae8
DM
112sub read_service_config {
113 my ($self) = @_;
ce216792 114
b83b4ae8 115 my $res = PVE::HA::Config::read_resources_config();
63f6a08c 116
ce216792
DM
117 my $vmlist = PVE::Cluster::get_vmlist();
118 my $conf = {};
119
120 foreach my $sid (keys %{$res->{ids}}) {
121 my $d = $res->{ids}->{$sid};
6ca2edcd 122 my (undef, undef, $name) = PVE::HA::Tools::parse_sid($sid);
7a19642e 123 $d->{state} = 'enabled' if !defined($d->{state});
ea4443cc
TL
124 $d->{max_restart} = 1 if !defined($d->{max_restart});
125 $d->{max_relocate} = 1 if !defined($d->{max_relocate});
303a08aa 126 if (PVE::HA::Resources->lookup($d->{type})) {
b47a7a1b 127 if (my $vmd = $vmlist->{ids}->{$name}) {
ce216792 128 if (!$vmd) {
b47a7a1b 129 warn "no such VM '$name'\n";
ce216792
DM
130 } else {
131 $d->{node} = $vmd->{node};
132 $conf->{$sid} = $d;
133 }
134 } else {
135 if (defined($d->{node})) {
136 $conf->{$sid} = $d;
137 } else {
138 warn "service '$sid' without node\n";
139 }
140 }
141 }
142 }
63f6a08c 143
ce216792 144 return $conf;
714a4016
DM
145}
146
9da84a0d
TL
147# this is only allowed by the master to recover a _fenced_ service
148sub steal_service {
6da27e23 149 my ($self, $sid, $current_node, $new_node) = @_;
8456bde2 150
6ca2edcd 151 my (undef, $type, $name) = PVE::HA::Tools::parse_sid($sid);
6da27e23 152
303a08aa
TL
153 if(my $plugin = PVE::HA::Resources->lookup($type)) {
154 my $old = $plugin->config_file($name, $current_node);
155 my $new = $plugin->config_file($name, $new_node);
6da27e23
DM
156 rename($old, $new) ||
157 die "rename '$old' to '$new' failed - $!\n";
158 } else {
159 die "implement me";
160 }
8456bde2
DM
161}
162
abc920b4
DM
163sub read_group_config {
164 my ($self) = @_;
165
139a9b90 166 return PVE::HA::Config::read_group_config();
3b996922
DM
167}
168
714a4016
DM
169# this should return a hash containing info
170# what nodes are members and online.
171sub get_node_info {
172 my ($self) = @_;
173
d706ef8b 174 my ($node_info, $quorate) = ({}, 0);
63f6a08c 175
d706ef8b
DM
176 my $nodename = $self->{nodename};
177
178 $quorate = PVE::Cluster::check_cfs_quorum(1) || 0;
179
180 my $members = PVE::Cluster::get_members();
181
182 foreach my $node (keys %$members) {
183 my $d = $members->{$node};
63f6a08c 184 $node_info->{$node}->{online} = $d->{online};
d706ef8b 185 }
63f6a08c 186
d706ef8b 187 $node_info->{$nodename}->{online} = 1; # local node is always up
63f6a08c 188
d706ef8b 189 return ($node_info, $quorate);
714a4016
DM
190}
191
192sub log {
193 my ($self, $level, $msg) = @_;
194
195 chomp $msg;
196
197 syslog($level, $msg);
198}
199
007fcc8b
DM
200my $last_lock_status = {};
201
202sub get_pve_lock {
203 my ($self, $lockid) = @_;
714a4016 204
007fcc8b 205 my $got_lock = 0;
4d24e7db 206
4d24e7db
DM
207 my $filename = "$lockdir/$lockid";
208
007fcc8b
DM
209 my $last = $last_lock_status->{$lockid} || 0;
210
211 my $ctime = time();
4d24e7db 212
75aca181
DM
213 my $retry = 0;
214 my $retry_timeout = 100; # fixme: what timeout
63f6a08c 215
4d24e7db
DM
216 eval {
217
218 mkdir $lockdir;
219
007fcc8b
DM
220 # pve cluster filesystem not online
221 die "can't create '$lockdir' (pmxcfs not mounted?)\n" if ! -d $lockdir;
222
75aca181
DM
223 if ($last && (($ctime - $last) < $retry_timeout)) {
224 # send cfs lock update request (utime)
225 if (!utime(0, $ctime, $filename)) {
226 $retry = 1;
007fcc8b 227 die "cfs lock update failed - $!\n";
75aca181 228 }
007fcc8b
DM
229 } else {
230
231 # fixme: wait some time?
232 if (!(mkdir $filename)) {
233 utime 0, 0, $filename; # cfs unlock request
234 die "can't get cfs lock\n";
235 }
236 }
4d24e7db 237
007fcc8b 238 $got_lock = 1;
4d24e7db
DM
239 };
240
007fcc8b
DM
241 my $err = $@;
242
75aca181
DM
243 if ($retry) {
244 # $self->log('err', $err) if $err; # for debugging
245 return 0;
246 }
63f6a08c 247
007fcc8b
DM
248 $last_lock_status->{$lockid} = $got_lock ? $ctime : 0;
249
17e90af6 250 if (!!$got_lock != !!$last) {
007fcc8b 251 if ($got_lock) {
63f6a08c 252 $self->log('info', "successfully acquired lock '$lockid'");
007fcc8b
DM
253 } else {
254 my $msg = "lost lock '$lockid";
63f6a08c 255 $msg .= " - $err" if $err;
007fcc8b
DM
256 $self->log('err', $msg);
257 }
75aca181
DM
258 } else {
259 # $self->log('err', $err) if $err; # for debugging
007fcc8b
DM
260 }
261
262 return $got_lock;
263}
264
265sub get_ha_manager_lock {
266 my ($self) = @_;
267
007fcc8b 268 return $self->get_pve_lock("ha_manager_lock");
714a4016
DM
269}
270
de002253
TL
271# release the cluster wide manager lock.
272# when released another CRM may step up and get the lock, thus this should only
273# get called when shutting down/deactivating the current master
274sub release_ha_manager_lock {
275 my ($self) = @_;
276
277 return rmdir("$lockdir/ha_manager_lock");
278}
279
714a4016 280sub get_ha_agent_lock {
714a4016 281 my ($self, $node) = @_;
63f6a08c 282
f5c29173 283 $node = $self->nodename() if !defined($node);
714a4016 284
f5c29173 285 return $self->get_pve_lock("ha_agent_${node}_lock");
714a4016
DM
286}
287
ff165cd8
TL
288# release the respective node agent lock.
289# this should only get called if the nodes LRM gracefully shuts down with
290# all services already cleanly stopped!
291sub release_ha_agent_lock {
292 my ($self) = @_;
293
294 my $node = $self->nodename();
295
296 return rmdir("$lockdir/ha_agent_${node}_lock");
297}
298
714a4016
DM
299sub quorate {
300 my ($self) = @_;
301
4d24e7db 302 my $quorate = 0;
63f6a08c
TL
303 eval {
304 $quorate = PVE::Cluster::check_cfs_quorum();
4d24e7db 305 };
63f6a08c 306
4d24e7db 307 return $quorate;
714a4016
DM
308}
309
310sub get_time {
311 my ($self) = @_;
312
313 return time();
314}
315
316sub sleep {
317 my ($self, $delay) = @_;
318
319 CORE::sleep($delay);
320}
321
322sub sleep_until {
323 my ($self, $end_time) = @_;
324
325 for (;;) {
326 my $cur_time = time();
327
328 last if $cur_time >= $end_time;
329
330 $self->sleep(1);
331 }
332}
333
334sub loop_start_hook {
335 my ($self) = @_;
336
4d24e7db 337 PVE::Cluster::cfs_update();
63f6a08c 338
714a4016
DM
339 $self->{loop_start} = $self->get_time();
340}
341
342sub loop_end_hook {
343 my ($self) = @_;
344
345 my $delay = $self->get_time() - $self->{loop_start};
63f6a08c 346
714a4016
DM
347 warn "loop take too long ($delay seconds)\n" if $delay > 30;
348}
349
76737af5
DM
350my $watchdog_fh;
351
714a4016
DM
352sub watchdog_open {
353 my ($self) = @_;
354
76737af5
DM
355 die "watchdog already open\n" if defined($watchdog_fh);
356
115805fd
DM
357 $watchdog_fh = IO::Socket::UNIX->new(
358 Type => SOCK_STREAM(),
359 Peer => "/run/watchdog-mux.sock") ||
360 die "unable to open watchdog socket - $!\n";
63f6a08c 361
76737af5 362 $self->log('info', "watchdog active");
714a4016
DM
363}
364
365sub watchdog_update {
366 my ($self, $wfh) = @_;
367
76737af5
DM
368 my $res = $watchdog_fh->syswrite("\0", 1);
369 if (!defined($res)) {
370 $self->log('err', "watchdog update failed - $!\n");
371 return 0;
372 }
373 if ($res != 1) {
374 $self->log('err', "watchdog update failed - write $res bytes\n");
375 return 0;
376 }
377
378 return 1;
714a4016
DM
379}
380
381sub watchdog_close {
382 my ($self, $wfh) = @_;
383
76737af5
DM
384 $watchdog_fh->syswrite("V", 1); # magic watchdog close
385 if (!$watchdog_fh->close()) {
386 $self->log('err', "watchdog close failed - $!");
387 } else {
388 $watchdog_fh = undef;
389 $self->log('info', "watchdog closed (disabled)");
390 }
714a4016
DM
391}
392
a2aae08a
TL
393sub after_fork {
394 my ($self) = @_;
395
396 # close inherited inotify FD from parent and reopen our own
397 PVE::INotify::inotify_close();
398 PVE::INotify::inotify_init();
399
400 PVE::Cluster::cfs_update();
401}
402
a28fa330
TL
403sub get_max_workers {
404 my ($self) = @_;
405
406 my $datacenterconfig = cfs_read_file('datacenter.cfg');
407
408 return $datacenterconfig->{max_workers} || 4;
409}
410
714a4016 4111;