]>
git.proxmox.com Git - pve-ha-manager.git/blob - src/PVE/HA/Env/PVE2.pm
1 package PVE
::HA
::Env
::PVE2
;
5 use POSIX
qw(:errno_h :fcntl_h);
11 use PVE
::Cluster
qw(cfs_register_file cfs_read_file cfs_write_file cfs_lock_file);
13 use PVE
::RPCEnvironment
;
15 use PVE
::HA
::Tools
':exit_codes';
18 use PVE
::HA
::FenceConfig
;
19 use PVE
::HA
::Resources
;
20 use PVE
::HA
::Resources
::PVEVM
;
21 use PVE
::HA
::Resources
::PVECT
;
23 PVE
::HA
::Resources
::PVEVM-
>register();
24 PVE
::HA
::Resources
::PVECT-
>register();
26 PVE
::HA
::Resources-
>init();
28 my $lockdir = "/etc/pve/priv/lock";
31 my ($this, $nodename) = @_;
33 die "missing nodename" if !$nodename;
35 my $class = ref($this) || $this;
37 my $self = bless {}, $class;
39 $self->{nodename
} = $nodename;
47 return $self->{nodename
};
53 die "hardware is for testing and simulation only";
56 sub read_manager_status
{
59 return PVE
::HA
::Config
::read_manager_status
();
62 sub write_manager_status
{
63 my ($self, $status_obj) = @_;
65 PVE
::HA
::Config
::write_manager_status
($status_obj);
69 my ($self, $node) = @_;
71 $node = $self->{nodename
} if !defined($node);
73 return PVE
::HA
::Config
::read_lrm_status
($node);
76 sub write_lrm_status
{
77 my ($self, $status_obj) = @_;
79 my $node = $self->{nodename
};
81 PVE
::HA
::Config
::write_lrm_status
($node, $status_obj);
84 sub is_node_shutdown
{
92 $shutdown = 1 if ($line =~ m/shutdown\.target/);
95 my $cmd = ['/bin/systemctl', 'list-jobs'];
96 eval { PVE
::Tools
::run_command
($cmd, outfunc
=> $code, noerr
=> 1); };
101 sub queue_crm_commands
{
102 my ($self, $cmd) = @_;
104 return PVE
::HA
::Config
::queue_crm_commands
($cmd);
107 sub read_crm_commands
{
110 return PVE
::HA
::Config
::read_crm_commands
();
113 sub read_service_config
{
116 my $res = PVE
::HA
::Config
::read_resources_config
();
118 my $vmlist = PVE
::Cluster
::get_vmlist
();
121 foreach my $sid (keys %{$res->{ids
}}) {
122 my $d = $res->{ids
}->{$sid};
123 my (undef, undef, $name) = PVE
::HA
::Tools
::parse_sid
($sid);
124 $d->{state} = 'enabled' if !defined($d->{state});
125 $d->{max_restart
} = 1 if !defined($d->{max_restart
});
126 $d->{max_relocate
} = 1 if !defined($d->{max_relocate
});
127 if (PVE
::HA
::Resources-
>lookup($d->{type
})) {
128 if (my $vmd = $vmlist->{ids
}->{$name}) {
130 warn "no such VM '$name'\n";
132 $d->{node
} = $vmd->{node
};
136 if (defined($d->{node
})) {
139 warn "service '$sid' without node\n";
148 sub read_fence_config
{
151 return PVE
::HA
::Config
::read_fence_config
();
157 my $datacenterconfig = cfs_read_file
('datacenter.cfg');
159 return 'watchdog' if !$datacenterconfig->{fencing
};
161 return $datacenterconfig->{fencing
};
164 sub exec_fence_agent
{
165 my ($self, $agent, $node, @param) = @_;
167 # setup execution environment
168 $ENV{'PATH'} = '/sbin:/bin:/usr/sbin:/usr/bin';
170 my $cmd = "$agent " . PVE
::HA
::FenceConfig
::gen_arg_str
(@param);
176 # this is only allowed by the master to recover a _fenced_ service
178 my ($self, $sid, $current_node, $new_node) = @_;
180 my (undef, $type, $name) = PVE
::HA
::Tools
::parse_sid
($sid);
182 if(my $plugin = PVE
::HA
::Resources-
>lookup($type)) {
183 my $old = $plugin->config_file($name, $current_node);
184 my $new = $plugin->config_file($name, $new_node);
185 rename($old, $new) ||
186 die "rename '$old' to '$new' failed - $!\n";
192 sub read_group_config
{
195 return PVE
::HA
::Config
::read_group_config
();
198 # this should return a hash containing info
199 # what nodes are members and online.
203 my ($node_info, $quorate) = ({}, 0);
205 my $nodename = $self->{nodename
};
207 $quorate = PVE
::Cluster
::check_cfs_quorum
(1) || 0;
209 my $members = PVE
::Cluster
::get_members
();
211 foreach my $node (keys %$members) {
212 my $d = $members->{$node};
213 $node_info->{$node}->{online
} = $d->{online
};
216 $node_info->{$nodename}->{online
} = 1; # local node is always up
218 return ($node_info, $quorate);
222 my ($self, $level, $msg) = @_;
226 syslog
($level, $msg);
229 my $last_lock_status = {};
232 my ($self, $lockid) = @_;
236 my $filename = "$lockdir/$lockid";
238 my $last = $last_lock_status->{$lockid} || 0;
243 my $retry_timeout = 100; # fixme: what timeout
249 # pve cluster filesystem not online
250 die "can't create '$lockdir' (pmxcfs not mounted?)\n" if ! -d
$lockdir;
252 if ($last && (($ctime - $last) < $retry_timeout)) {
253 # send cfs lock update request (utime)
254 if (!utime(0, $ctime, $filename)) {
256 die "cfs lock update failed - $!\n";
260 # fixme: wait some time?
261 if (!(mkdir $filename)) {
262 utime 0, 0, $filename; # cfs unlock request
263 die "can't get cfs lock\n";
273 # $self->log('err', $err) if $err; # for debugging
277 $last_lock_status->{$lockid} = $got_lock ?
$ctime : 0;
279 if (!!$got_lock != !!$last) {
281 $self->log('info', "successfully acquired lock '$lockid'");
283 my $msg = "lost lock '$lockid";
284 $msg .= " - $err" if $err;
285 $self->log('err', $msg);
288 # $self->log('err', $err) if $err; # for debugging
294 sub get_ha_manager_lock
{
297 return $self->get_pve_lock("ha_manager_lock");
300 # release the cluster wide manager lock.
301 # when released another CRM may step up and get the lock, thus this should only
302 # get called when shutting down/deactivating the current master
303 sub release_ha_manager_lock
{
306 return rmdir("$lockdir/ha_manager_lock");
309 sub get_ha_agent_lock
{
310 my ($self, $node) = @_;
312 $node = $self->nodename() if !defined($node);
314 return $self->get_pve_lock("ha_agent_${node}_lock");
317 # release the respective node agent lock.
318 # this should only get called if the nodes LRM gracefully shuts down with
319 # all services already cleanly stopped!
320 sub release_ha_agent_lock
{
323 my $node = $self->nodename();
325 return rmdir("$lockdir/ha_agent_${node}_lock");
333 $quorate = PVE
::Cluster
::check_cfs_quorum
();
346 my ($self, $delay) = @_;
352 my ($self, $end_time) = @_;
355 my $cur_time = time();
357 last if $cur_time >= $end_time;
363 sub loop_start_hook
{
366 PVE
::Cluster
::cfs_update
();
368 $self->{loop_start
} = $self->get_time();
374 my $delay = $self->get_time() - $self->{loop_start
};
376 warn "loop take too long ($delay seconds)\n" if $delay > 30;
384 die "watchdog already open\n" if defined($watchdog_fh);
386 $watchdog_fh = IO
::Socket
::UNIX-
>new(
387 Type
=> SOCK_STREAM
(),
388 Peer
=> "/run/watchdog-mux.sock") ||
389 die "unable to open watchdog socket - $!\n";
391 $self->log('info', "watchdog active");
394 sub watchdog_update
{
395 my ($self, $wfh) = @_;
397 my $res = $watchdog_fh->syswrite("\0", 1);
398 if (!defined($res)) {
399 $self->log('err', "watchdog update failed - $!\n");
403 $self->log('err', "watchdog update failed - write $res bytes\n");
411 my ($self, $wfh) = @_;
413 $watchdog_fh->syswrite("V", 1); # magic watchdog close
414 if (!$watchdog_fh->close()) {
415 $self->log('err', "watchdog close failed - $!");
417 $watchdog_fh = undef;
418 $self->log('info', "watchdog closed (disabled)");
425 # close inherited inotify FD from parent and reopen our own
426 PVE
::INotify
::inotify_close
();
427 PVE
::INotify
::inotify_init
();
429 PVE
::Cluster
::cfs_update
();
432 sub get_max_workers
{
435 my $datacenterconfig = cfs_read_file
('datacenter.cfg');
437 return $datacenterconfig->{max_workers
} || 4;