package PVE::Cluster;
use strict;
-use POSIX;
+use warnings;
+use POSIX qw(EEXIST);
use File::stat qw();
use Socket;
use Storable qw(dclone);
use IO::File;
use MIME::Base64;
+use Digest::SHA;
use Digest::HMAC_SHA1;
+use Net::SSLeay;
use PVE::Tools;
use PVE::INotify;
use PVE::IPCC;
use PVE::SafeSyslog;
+use PVE::JSONSchema;
+use PVE::Network;
use JSON;
use RRDs;
use Encode;
+use UUID;
use base 'Exporter';
our @EXPORT_OK = qw(
my $sshglobalknownhosts = "/etc/ssh/ssh_known_hosts";
my $sshknownhosts = "/etc/pve/priv/known_hosts";
my $sshauthkeys = "/etc/pve/priv/authorized_keys";
+my $sshd_config_fn = "/etc/ssh/sshd_config";
my $rootsshauthkeys = "/root/.ssh/authorized_keys";
+my $rootsshauthkeysbackup = "${rootsshauthkeys}.org";
+my $rootsshconfig = "/root/.ssh/config";
my $observed = {
- 'vzdump' => 1,
+ 'vzdump.cron' => 1,
'storage.cfg' => 1,
'datacenter.cfg' => 1,
- 'cluster.cfg' => 1,
+ 'corosync.conf' => 1,
+ 'corosync.conf.new' => 1,
'user.cfg' => 1,
'domains.cfg' => 1,
'priv/shadow.cfg' => 1,
'/qemu-server/' => 1,
'/openvz/' => 1,
+ '/lxc/' => 1,
+ 'ha/crm_commands' => 1,
+ 'ha/manager_status' => 1,
+ 'ha/resources.cfg' => 1,
+ 'ha/groups.cfg' => 1,
+ 'ha/fence.cfg' => 1,
+ 'status.cfg' => 1,
};
# only write output if something fails
};
eval {
- PVE::Tools::run_command($cmd, outfunc => $record_output,
+ PVE::Tools::run_command($cmd, outfunc => $record_output,
errfunc => $record_output);
};
my @required_dirs = (
"$basedir/priv",
- "$basedir/nodes",
+ "$basedir/nodes",
"$basedir/nodes/$nodename",
+ "$basedir/nodes/$nodename/lxc",
"$basedir/nodes/$nodename/qemu-server",
"$basedir/nodes/$nodename/openvz",
"$basedir/nodes/$nodename/priv");
-
+
foreach my $dir (@required_dirs) {
if (! -d $dir) {
- mkdir($dir) || die "unable to create directory '$dir' - $!\n";
+ mkdir($dir) || $! == EEXIST || die "unable to create directory '$dir' - $!\n";
}
}
}
check_cfs_is_mounted();
- mkdir $authdir || die "unable to create dir '$authdir' - $!\n";
+ mkdir $authdir || $! == EEXIST || die "unable to create dir '$authdir' - $!\n";
- my $cmd = "openssl genrsa -out '$authprivkeyfn' 2048";
- run_silent_cmd($cmd);
+ run_silent_cmd(['openssl', 'genrsa', '-out', $authprivkeyfn, '2048']);
- $cmd = "openssl rsa -in '$authprivkeyfn' -pubout -out '$authpubkeyfn'";
- run_silent_cmd($cmd)
+ run_silent_cmd(['openssl', 'rsa', '-in', $authprivkeyfn, '-pubout', '-out', $authpubkeyfn]);
}
sub gen_pveca_key {
return if -f $pveca_key_fn;
eval {
- run_silent_cmd(['openssl', 'genrsa', '-out', $pveca_key_fn, '2048']);
+ run_silent_cmd(['openssl', 'genrsa', '-out', $pveca_key_fn, '4096']);
};
die "unable to generate pve ca key:\n$@" if $@;
# we try to generate an unique 'subject' to avoid browser problems
# (reused serial numbers, ..)
- my $nid = (split (/\s/, `md5sum '$pveca_key_fn'`))[0] || time();
+ my $uuid;
+ UUID::generate($uuid);
+ my $uuid_str;
+ UUID::unparse($uuid, $uuid_str);
eval {
- run_silent_cmd(['openssl', 'req', '-batch', '-days', '3650', '-new',
- '-x509', '-nodes', '-key',
+ # wrap openssl with faketime to prevent bug #904
+ run_silent_cmd(['faketime', 'yesterday', 'openssl', 'req', '-batch',
+ '-days', '3650', '-new', '-x509', '-nodes', '-key',
$pveca_key_fn, '-out', $pveca_cert_fn, '-subj',
- "/CN=Proxmox Virtual Environment/OU=$nid/O=PVE Cluster Manager CA/"]);
+ "/CN=Proxmox Virtual Environment/OU=$uuid_str/O=PVE Cluster Manager CA/"]);
};
die "generating pve root certificate failed:\n$@" if $@;
return if !$force && -f $pvessl_cert_fn;
- my $names = "IP:127.0.0.1,DNS:localhost";
+ my $names = "IP:127.0.0.1,IP:::1,DNS:localhost";
my $rc = PVE::INotify::read_file('resolvconf');
$names .= ",IP:$ip";
-
+
my $fqdn = $nodename;
$names .= ",DNS:$nodename";
[ v3_req ]
basicConstraints = CA:FALSE
-nsCertType = server
-keyUsage = nonRepudiation, digitalSignature, keyEncipherment
+extendedKeyUsage = serverAuth
subjectAltName = $names
__EOD
update_serial("0000000000000000") if ! -f $pveca_srl_fn;
eval {
- run_silent_cmd(['openssl', 'x509', '-req', '-in', $reqfn, '-days', '3650',
- '-out', $pvessl_cert_fn, '-CAkey', $pveca_key_fn,
- '-CA', $pveca_cert_fn, '-CAserial', $pveca_srl_fn,
- '-extfile', $cfgfn]);
+ # wrap openssl with faketime to prevent bug #904
+ run_silent_cmd(['faketime', 'yesterday', 'openssl', 'x509', '-req',
+ '-in', $reqfn, '-days', '3650', '-out', $pvessl_cert_fn,
+ '-CAkey', $pveca_key_fn, '-CA', $pveca_cert_fn,
+ '-CAserial', $pveca_srl_fn, '-extfile', $cfgfn]);
};
if (my $err = $@) {
sub gen_pve_vzdump_symlink {
- my $filename = "/etc/pve/vzdump";
+ my $filename = "/etc/pve/vzdump.cron";
my $link_fn = "/etc/cron.d/vzdump";
sub gen_pve_vzdump_files {
- my $filename = "/etc/pve/vzdump";
+ my $filename = "/etc/pve/vzdump.cron";
PVE::Tools::file_set_contents($filename, $vzdump_cron_dummy)
if ! -f $filename;
eval {
my $ver = $kvstore->{$node}->{tasklist} if $kvstore->{$node};
my $cd = $tasklistcache->{$node};
- if (!$cd || !$ver || ($cd->{version} != $ver)) {
+ if (!$cd || !$ver || !$cd->{version} ||
+ ($cd->{version} != $ver)) {
my $raw = &$ipcc_get_status("tasklist", $node) || '[]';
my $data = decode_json($raw);
push @$res, @$data;
my $res = {};
- while ($raw =~ s/^(.*)\n//) {
- my ($key, @ela) = split(/:/, $1);
- next if !$key;
- next if !(scalar(@ela) > 1);
- $res->{$key} = \@ela;
+ if ($raw) {
+ while ($raw =~ s/^(.*)\n//) {
+ my ($key, @ela) = split(/:/, $1);
+ next if !$key;
+ next if !(scalar(@ela) > 1);
+ $res->{$key} = [ map { $_ eq 'U' ? undef : $_ } @ela ];
+ }
}
$last_rrd_dump = $ctime;
my $err = RRDs::error;
die "RRD error: $err\n" if $err;
-
- die "got wrong time resolution ($step != $reso)\n"
+
+ die "got wrong time resolution ($step != $reso)\n"
if $step != $reso;
my $res = [];
for my $line (@$data) {
my $entry = { 'time' => $start };
$start += $step;
- my $found_undefs;
for (my $i = 0; $i < $fields; $i++) {
my $name = $names->[$i];
if (defined(my $val = $line->[$i])) {
$entry->{$name} = $val;
} else {
- # we only add entryies with all data defined
- # extjs chart has problems with undefined values
- $found_undefs = 1;
+ # leave empty fields undefined
+ # maybe make this configurable?
}
}
- push @$res, $entry if !$found_undefs;
+ push @$res, $entry;
}
return $res;
# Using RRD graph is clumsy - maybe it
# is better to simply fetch the data, and do all display
# related things with javascript (new extjs html5 graph library).
-
+
my $rrddir = "/var/lib/rrdcached/db";
my $rrd = "$rrddir/$rrdname";
- my $filename = "$rrd.png";
+ my @ids = PVE::Tools::split_list($ds);
+
+ my $ds_txt = join('_', @ids);
+
+ my $filename = "${rrd}_${ds_txt}.png";
my $setup = {
hour => [ 60, 60 ],
"--width" => 800,
"--start" => - $reso*$count,
"--end" => 'now' ,
+ "--lower-limit" => 0,
);
my $socket = "/var/run/rrdcached.sock";
push @args, "--daemon" => "unix:$socket" if -S $socket;
- my @ids = PVE::Tools::split_list($ds);
-
my @coldef = ('#00ddff', '#ff0000');
$cf = "AVERAGE" if !$cf;
push @args, "LINE2:${dataid}${col}:${id}";
}
- RRDs::graph($filename, @args);
+ push @args, '--full-size-mode';
+
+ # we do not really store data into the file
+ my $res = RRDs::graphv('', @args);
my $err = RRDs::error;
die "RRD error: $err\n" if $err;
- return { filename => $filename };
+ return { filename => $filename, image => $res->{image} };
}
# a fast way to read files (avoid fuse overhead)
sub get_config {
my ($path) = @_;
- return &$ipcc_get_config($path);
+ return &$ipcc_get_config($path);
}
sub get_cluster_log {
my $ci = $ccache->{$filename};
- if (!$ci->{version} || $ci->{version} != $version) {
-
+ if (!$ci->{version} || !$version || $ci->{version} != $version) {
+ # we always call the parser, even when the file does not exists
+ # (in that case $data is undef)
my $data = get_config($filename);
$ci->{data} = &$parser("/etc/pve/$filename", $data);
$ci->{version} = $version;
my $version;
my $infotag;
- if ($filename =~ m!^nodes/[^/]+/(openvz|qemu-server)/(\d+)\.conf$!) {
+ if ($filename =~ m!^nodes/[^/]+/(openvz|lxc|qemu-server)/(\d+)\.conf$!) {
my ($type, $vmid) = ($1, $2);
if ($vmlist && $vmlist->{ids} && $vmlist->{ids}->{$vmid}) {
$version = $vmlist->{ids}->{$vmid}->{version};
sub cfs_read_file {
my ($filename) = @_;
- my ($version, $info) = cfs_file_version($filename);
+ my ($version, $info) = cfs_file_version($filename);
my $parser = $info->{parser};
return &$ccache_read($filename, $parser, $version);
sub cfs_write_file {
my ($filename, $data) = @_;
- my $info = $file_info->{$filename} || die "unknown file '$filename'";
+ my ($version, $info) = cfs_file_version($filename);
my $writer = $info->{writer} || die "no writer defined";
alarm(60);
local $SIG{ALRM} = sub { die "got lock timeout - aborting command\n"; };
+ cfs_update(); # make sure we read latest versions inside code()
+
$res = &$code(@param);
alarm(0);
if ($err && ($err eq "got lock request timeout\n") &&
!check_cfs_quorum()){
$err = "$msg: no quorum!\n";
- }
+ }
if (!$err || $err !~ /^got lock timeout -/) {
rmdir $filename; # cfs unlock
&$cfs_lock($lockid, $timeout, $code, @param);
}
+sub cfs_lock_domain {
+ my ($domainname, $timeout, $code, @param) = @_;
+
+ my $lockid = "domain-$domainname";
+
+ &$cfs_lock($lockid, $timeout, $code, @param);
+}
+
my $log_levels = {
"emerg" => 0,
"alert" => 1,
$msg = "empty message" if !$msg;
$ident = "" if !$ident;
- $ident = encode("ascii", decode_utf8($ident),
+ $ident = encode("ascii", $ident,
sub { sprintf "\\u%04x", shift });
- my $utf8 = decode_utf8($msg);
-
- my $ascii = encode("ascii", $utf8, sub { sprintf "\\u%04x", shift });
+ my $ascii = encode("ascii", $msg, sub { sprintf "\\u%04x", shift });
if ($ident) {
syslog($priority, "<%s> %s", $ident, $ascii);
syslog("err", "writing cluster log failed: $@") if $@;
}
+sub check_vmid_unused {
+ my ($vmid, $noerr) = @_;
+
+ my $vmlist = get_vmlist();
+
+ my $d = $vmlist->{ids}->{$vmid};
+ return 1 if !defined($d);
+
+ return undef if $noerr;
+
+ my $vmtypestr = $d->{type} eq 'qemu' ? 'VM' : 'CT';
+ die "$vmtypestr $vmid already exists on node '$d->{node}'\n";
+}
+
sub check_node_exists {
my ($nodename, $noerr) = @_;
my $nodelist = $clinfo->{nodelist};
if ($nodelist && $nodelist->{$nodename}) {
if (my $ip = $nodelist->{$nodename}->{ip}) {
- return $ip;
+ return $ip if !wantarray;
+ my $family = $nodelist->{$nodename}->{address_family};
+ if (!$family) {
+ $nodelist->{$nodename}->{address_family} =
+ $family =
+ PVE::Tools::get_host_address_family($ip);
+ }
+ return wantarray ? ($ip, $family) : $ip;
}
}
# fallback: try to get IP by other means
- my $packed_ip = gethostbyname($nodename);
- if (defined $packed_ip) {
- my $ip = inet_ntoa($packed_ip);
+ return PVE::Network::get_ip_from_hostname($nodename, $noerr);
+}
- if ($ip =~ m/^127\./) {
- die "hostname lookup failed - got local IP address ($nodename = $ip)\n" if !$noerr;
- return undef;
- }
+sub get_local_migration_ip {
+ my ($migration_network, $noerr) = @_;
+
+ my $cidr = $migration_network;
- return $ip;
+ if (!defined($cidr)) {
+ my $dc_conf = cfs_read_file('datacenter.cfg');
+ $cidr = $dc_conf->{migration}->{network}
+ if defined($dc_conf->{migration}->{network});
}
- die "unable to get IP for node '$nodename' - node offline?\n" if !$noerr;
+ if (defined($cidr)) {
+ my $ips = PVE::Network::get_local_ip_from_cidr($cidr);
+
+ die "could not get migration ip: no IP address configured on local " .
+ "node for network '$cidr'\n" if !$noerr && (scalar(@$ips) == 0);
+
+ die "could not get migration ip: multiple IP address configured for " .
+ "network '$cidr'\n" if !$noerr && (scalar(@$ips) > 1);
+
+ return @$ips[0];
+ }
return undef;
-}
+};
# ssh related utility functions
chomp($data);
}
+ my $found_backup;
+ if (-f $rootsshauthkeysbackup) {
+ $data .= "\n";
+ $data .= PVE::Tools::file_get_contents($rootsshauthkeysbackup, 128*1024);
+ chomp($data);
+ $found_backup = 1;
+ }
+
# always add ourself
if (-f $ssh_rsa_id) {
my $pub = PVE::Tools::file_get_contents($ssh_rsa_id);
my $newdata = "";
my $vhash = {};
- while ($data && $data =~ s/^((.*?)(\n|$))//) {
- my $line = "$2\n";
- if ($line =~ m/^ssh-rsa\s+\S+\s+(\S+)$/) {
- $vhash->{$1} = $line;
- } else {
- $newdata .= $line;
+ my @lines = split(/\n/, $data);
+ foreach my $line (@lines) {
+ if ($line !~ /^#/ && $line =~ m/(^|\s)ssh-(rsa|dsa)\s+(\S+)\s+\S+$/) {
+ next if $vhash->{$3}++;
}
+ $newdata .= "$line\n";
}
-
- $newdata .= join("", values(%$vhash));
PVE::Tools::file_set_contents($sshauthkeys, $newdata, 0600);
+
+ if ($found_backup && -l $rootsshauthkeys) {
+ # everything went well, so we can remove the backup
+ unlink $rootsshauthkeysbackup;
+ }
}
-sub setup_ssh_keys {
+sub setup_sshd_config {
+ my ($start_sshd) = @_;
+
+ my $conf = PVE::Tools::file_get_contents($sshd_config_fn);
+
+ return if $conf =~ m/^PermitRootLogin\s+yes\s*$/m;
+
+ if ($conf !~ s/^#?PermitRootLogin.*$/PermitRootLogin yes/m) {
+ chomp $conf;
+ $conf .= "\nPermitRootLogin yes\n";
+ }
+
+ PVE::Tools::file_set_contents($sshd_config_fn, $conf);
+
+ my $cmd = $start_sshd ? 'reload-or-restart' : 'reload-or-try-restart';
+ PVE::Tools::run_command(['systemctl', $cmd, 'sshd']);
+}
+
+sub setup_rootsshconfig {
# create ssh key if it does not exist
if (! -f $ssh_rsa_id) {
system ("echo|ssh-keygen -t rsa -N '' -b 2048 -f ${ssh_rsa_id_priv}");
}
+ # create ssh config if it does not exist
+ if (! -f $rootsshconfig) {
+ mkdir '/root/.ssh';
+ if (my $fh = IO::File->new($rootsshconfig, O_CREAT|O_WRONLY|O_EXCL, 0640)) {
+ # this is the default ciphers list from debian openssl0.9.8 except blowfish is added as prefered
+ print $fh "Ciphers blowfish-cbc,aes128-ctr,aes192-ctr,aes256-ctr,arcfour256,arcfour128,aes128-cbc,3des-cbc\n";
+ close($fh);
+ }
+ }
+}
+
+sub setup_ssh_keys {
+
mkdir $authdir;
+ my $import_ok;
+
if (! -f $sshauthkeys) {
+ my $old;
+ if (-f $rootsshauthkeys) {
+ $old = PVE::Tools::file_get_contents($rootsshauthkeys, 128*1024);
+ }
if (my $fh = IO::File->new ($sshauthkeys, O_CREAT|O_WRONLY|O_EXCL, 0400)) {
+ PVE::Tools::safe_print($sshauthkeys, $fh, $old) if $old;
close($fh);
+ $import_ok = 1;
}
}
- warn "can't create shared ssh key database '$sshauthkeys'\n"
+ warn "can't create shared ssh key database '$sshauthkeys'\n"
if ! -f $sshauthkeys;
- if (-f $rootsshauthkeys) {
- system("mv '$rootsshauthkeys' '$rootsshauthkeys.org'");
+ if (-f $rootsshauthkeys && ! -l $rootsshauthkeys) {
+ if (!rename($rootsshauthkeys , $rootsshauthkeysbackup)) {
+ warn "rename $rootsshauthkeys failed - $!\n";
+ }
}
if (! -l $rootsshauthkeys) {
symlink $sshauthkeys, $rootsshauthkeys;
}
- warn "can't create symlink for ssh keys '$rootsshauthkeys' -> '$sshauthkeys'\n"
- if ! -l $rootsshauthkeys;
+ if (! -l $rootsshauthkeys) {
+ warn "can't create symlink for ssh keys '$rootsshauthkeys' -> '$sshauthkeys'\n";
+ } else {
+ unlink $rootsshauthkeysbackup if $import_ok;
+ }
}
sub ssh_unmerge_known_hosts {
die "no node name specified" if !$nodename;
die "no ip address specified" if !$ip_address;
-
+
mkdir $authdir;
if (! -f $sshknownhosts) {
}
}
- my $old = PVE::Tools::file_get_contents($sshknownhosts, 128*1024);
-
+ my $old = PVE::Tools::file_get_contents($sshknownhosts, 128*1024);
+
my $new = '';
-
+
if ((! -l $sshglobalknownhosts) && (-f $sshglobalknownhosts)) {
$new = PVE::Tools::file_get_contents($sshglobalknownhosts, 128*1024);
}
my $hostkey = PVE::Tools::file_get_contents($ssh_host_rsa_id);
- die "can't parse $ssh_rsa_id" if $hostkey !~ m/^(ssh-rsa\s\S+)(\s.*)?$/;
+ # Note: file sometimes containe emty lines at start, so we use multiline match
+ die "can't parse $ssh_host_rsa_id" if $hostkey !~ m/^(ssh-rsa\s\S+)(\s.*)?$/m;
$hostkey = $1;
my $data = '';
unlink $sshglobalknownhosts;
symlink $sshknownhosts, $sshglobalknownhosts;
-
- warn "can't create symlink for ssh known hosts '$sshglobalknownhosts' -> '$sshknownhosts'\n"
+
+ warn "can't create symlink for ssh known hosts '$sshglobalknownhosts' -> '$sshknownhosts'\n"
if ! -l $sshglobalknownhosts;
}
-my $keymaphash = PVE::Tools::kvmkeymaps();
+my $migration_format = {
+ type => {
+ default_key => 1,
+ type => 'string',
+ enum => ['secure', 'insecure'],
+ description => "Migration traffic is encrypted using an SSH tunnel by " .
+ "default. On secure, completely private networks this can be " .
+ "disabled to increase performance.",
+ default => 'secure',
+ },
+ network => {
+ optional => 1,
+ type => 'string', format => 'CIDR',
+ format_description => 'CIDR',
+ description => "CIDR of the (sub) network that is used for migration."
+ },
+};
+
my $datacenter_schema = {
type => "object",
additionalProperties => 0,
optional => 1,
type => 'string',
description => "Default keybord layout for vnc server.",
- enum => [ keys %$keymaphash ],
+ enum => PVE::Tools::kvmkeymaplist(),
},
language => {
optional => 1,
description => "Specify external http proxy which is used for downloads (example: 'http://username:password\@host:port/')",
pattern => "http://.*",
},
+ migration_unsecure => {
+ optional => 1,
+ type => 'boolean',
+ description => "Migration is secure using SSH tunnel by default. " .
+ "For secure private networks you can disable it to speed up " .
+ "migration. Deprecated, use the 'migration' property instead!",
+ },
+ migration => {
+ optional => 1,
+ type => 'string', format => $migration_format,
+ description => "For cluster wide migration settings.",
+ },
+ storage_replication_network => {
+ optional => 1,
+ type => 'string', format => 'CIDR',
+ description => "For cluster wide storage replication network.",
+ },
+ console => {
+ optional => 1,
+ type => 'string',
+ description => "Select the default Console viewer. You can either use the builtin java applet (VNC), an external virt-viewer comtatible application (SPICE), or an HTML5 based viewer (noVNC).",
+ enum => ['applet', 'vv', 'html5'],
+ },
+ email_from => {
+ optional => 1,
+ type => 'string',
+ format => 'email-opt',
+ description => "Specify email address to send notification from (default is root@\$hostname)",
+ },
+ max_workers => {
+ optional => 1,
+ type => 'integer',
+ minimum => 1,
+ description => "Defines how many workers (per node) are maximal started ".
+ " on actions like 'stopall VMs' or task from the ha-manager.",
+ },
+ fencing => {
+ optional => 1,
+ type => 'string',
+ default => 'watchdog',
+ enum => [ 'watchdog', 'hardware', 'both' ],
+ description => "Set the fencing mode of the HA cluster. Hardware mode " .
+ "needs a valid configuration of fence devices in /etc/pve/ha/fence.cfg." .
+ " With both all two modes are used." .
+ "\n\nWARNING: 'hardware' and 'both' are EXPERIMENTAL & WIP",
+ },
+ mac_prefix => {
+ optional => 1,
+ type => 'string',
+ pattern => qr/[a-f0-9]{2}(?::[a-f0-9]{2}){0,2}:?/i,
+ description => 'Prefix for autogenerated MAC addresses.',
+ },
},
};
sub parse_datacenter_config {
my ($filename, $raw) = @_;
- return PVE::JSONSchema::parse_config($datacenter_schema, $filename, $raw);
+ my $res = PVE::JSONSchema::parse_config($datacenter_schema, $filename, $raw // '');
+
+ if (my $migration = $res->{migration}) {
+ $res->{migration} = PVE::JSONSchema::parse_property_string($migration_format, $migration);
+ }
+
+ # for backwards compatibility only, new migration property has precedence
+ if (defined($res->{migration_unsecure})) {
+ if (defined($res->{migration}->{type})) {
+ warn "deprecated setting 'migration_unsecure' and new 'migration: type' " .
+ "set at same time! Ignore 'migration_unsecure'\n";
+ } else {
+ $res->{migration}->{type} = ($res->{migration_unsecure}) ? 'insecure' : 'secure';
+ }
+ }
+
+ return $res;
}
sub write_datacenter_config {
my ($filename, $cfg) = @_;
-
+
+ # map deprecated setting to new one
+ if (defined($cfg->{migration_unsecure}) && !defined($cfg->{migration})) {
+ my $migration_unsecure = delete $cfg->{migration_unsecure};
+ $cfg->{migration}->{type} = ($migration_unsecure) ? 'insecure' : 'secure';
+ }
+
return PVE::JSONSchema::dump_config($datacenter_schema, $filename, $cfg);
}
-cfs_register_file('datacenter.cfg',
- \&parse_datacenter_config,
+cfs_register_file('datacenter.cfg',
+ \&parse_datacenter_config,
\&write_datacenter_config);
+
+# a very simply parser ...
+sub parse_corosync_conf {
+ my ($filename, $raw) = @_;
+
+ return {} if !$raw;
+
+ my $digest = Digest::SHA::sha1_hex(defined($raw) ? $raw : '');
+
+ $raw =~ s/#.*$//mg;
+ $raw =~ s/\r?\n/ /g;
+ $raw =~ s/\s+/ /g;
+ $raw =~ s/^\s+//;
+ $raw =~ s/\s*$//;
+
+ my @tokens = split(/\s/, $raw);
+
+ my $conf = { section => 'main', children => [] };
+
+ my $stack = [];
+ my $section = $conf;
+
+ while (defined(my $token = shift @tokens)) {
+ my $nexttok = $tokens[0];
+
+ if ($nexttok && ($nexttok eq '{')) {
+ shift @tokens; # skip '{'
+ my $new_section = {
+ section => $token,
+ children => [],
+ };
+ push @{$section->{children}}, $new_section;
+ push @$stack, $section;
+ $section = $new_section;
+ next;
+ }
+
+ if ($token eq '}') {
+ $section = pop @$stack;
+ die "parse error - uncexpected '}'\n" if !$section;
+ next;
+ }
+
+ my $key = $token;
+ die "missing ':' after key '$key'\n" if ! ($key =~ s/:$//);
+
+ die "parse error - no value for '$key'\n" if !defined($nexttok);
+ my $value = shift @tokens;
+
+ push @{$section->{children}}, { key => $key, value => $value };
+ }
+
+ $conf->{digest} = $digest;
+
+ return $conf;
+}
+
+my $dump_corosync_section;
+$dump_corosync_section = sub {
+ my ($section, $prefix) = @_;
+
+ my $raw = $prefix . $section->{section} . " {\n";
+
+ my @list = grep { defined($_->{key}) } @{$section->{children}};
+ foreach my $child (sort {$a->{key} cmp $b->{key}} @list) {
+ $raw .= $prefix . " $child->{key}: $child->{value}\n";
+ }
+
+ @list = grep { defined($_->{section}) } @{$section->{children}};
+ foreach my $child (sort {$a->{section} cmp $b->{section}} @list) {
+ $raw .= &$dump_corosync_section($child, "$prefix ");
+ }
+
+ $raw .= $prefix . "}\n\n";
+
+ return $raw;
+
+};
+
+sub write_corosync_conf {
+ my ($filename, $conf) = @_;
+
+ my $raw = '';
+
+ my $prefix = '';
+
+ die "no main section" if $conf->{section} ne 'main';
+
+ my @list = grep { defined($_->{key}) } @{$conf->{children}};
+ foreach my $child (sort {$a->{key} cmp $b->{key}} @list) {
+ $raw .= "$child->{key}: $child->{value}\n";
+ }
+
+ @list = grep { defined($_->{section}) } @{$conf->{children}};
+ foreach my $child (sort {$a->{section} cmp $b->{section}} @list) {
+ $raw .= &$dump_corosync_section($child, $prefix);
+ }
+
+ return $raw;
+}
+
+sub corosync_conf_version {
+ my ($conf, $noerr, $new_value) = @_;
+
+ foreach my $child (@{$conf->{children}}) {
+ next if !defined($child->{section});
+ if ($child->{section} eq 'totem') {
+ foreach my $e (@{$child->{children}}) {
+ next if !defined($e->{key});
+ if ($e->{key} eq 'config_version') {
+ if ($new_value) {
+ $e->{value} = $new_value;
+ return $new_value;
+ } elsif (my $version = int($e->{value})) {
+ return $version;
+ }
+ last;
+ }
+ }
+ }
+ }
+
+ return undef if $noerr;
+
+ die "invalid corosync config - unable to read version\n";
+}
+
+# read only - use "rename corosync.conf.new corosync.conf" to write
+PVE::Cluster::cfs_register_file('corosync.conf', \&parse_corosync_conf);
+# this is read/write
+PVE::Cluster::cfs_register_file('corosync.conf.new', \&parse_corosync_conf,
+ \&write_corosync_conf);
+
+sub check_corosync_conf_exists {
+ my ($silent) = @_;
+
+ $silent = $silent // 0;
+
+ my $exists = -f "$basedir/corosync.conf";
+
+ warn "Corosync config '$basedir/corosync.conf' does not exist - is this node part of a cluster?\n"
+ if !$silent && !$exists;
+
+ return $exists;
+}
+
+sub corosync_update_nodelist {
+ my ($conf, $nodelist) = @_;
+
+ delete $conf->{digest};
+
+ my $version = corosync_conf_version($conf);
+ corosync_conf_version($conf, undef, $version + 1);
+
+ my $children = [];
+ foreach my $v (values %$nodelist) {
+ next if !($v->{ring0_addr} || $v->{name});
+ my $kv = [];
+ foreach my $k (keys %$v) {
+ push @$kv, { key => $k, value => $v->{$k} };
+ }
+ my $ns = { section => 'node', children => $kv };
+ push @$children, $ns;
+ }
+
+ foreach my $main (@{$conf->{children}}) {
+ next if !defined($main->{section});
+ if ($main->{section} eq 'nodelist') {
+ $main->{children} = $children;
+ last;
+ }
+ }
+
+
+ cfs_write_file("corosync.conf.new", $conf);
+
+ rename("/etc/pve/corosync.conf.new", "/etc/pve/corosync.conf")
+ || die "activate corosync.conf.new failed - $!\n";
+}
+
+sub corosync_nodelist {
+ my ($conf) = @_;
+
+ my $nodelist = {};
+
+ foreach my $main (@{$conf->{children}}) {
+ next if !defined($main->{section});
+ if ($main->{section} eq 'nodelist') {
+ foreach my $ne (@{$main->{children}}) {
+ next if !defined($ne->{section}) || ($ne->{section} ne 'node');
+ my $node = { quorum_votes => 1 };
+ my $name;
+ foreach my $child (@{$ne->{children}}) {
+ next if !defined($child->{key});
+ $node->{$child->{key}} = $child->{value};
+ # use 'name' over 'ring0_addr' if set
+ if ($child->{key} eq 'name') {
+ delete $nodelist->{$name} if $name;
+ $name = $child->{value};
+ $nodelist->{$name} = $node;
+ } elsif(!$name && $child->{key} eq 'ring0_addr') {
+ $name = $child->{value};
+ $nodelist->{$name} = $node;
+ }
+ }
+ }
+ }
+ }
+
+ return $nodelist;
+}
+
+# get a hash representation of the corosync config totem section
+sub corosync_totem_config {
+ my ($conf) = @_;
+
+ my $res = {};
+
+ foreach my $main (@{$conf->{children}}) {
+ next if !defined($main->{section}) ||
+ $main->{section} ne 'totem';
+
+ foreach my $e (@{$main->{children}}) {
+
+ if ($e->{section} && $e->{section} eq 'interface') {
+ my $entry = {};
+
+ $res->{interface} = {};
+
+ foreach my $child (@{$e->{children}}) {
+ next if !defined($child->{key});
+ $entry->{$child->{key}} = $child->{value};
+ if($child->{key} eq 'ringnumber') {
+ $res->{interface}->{$child->{value}} = $entry;
+ }
+ }
+
+ } elsif ($e->{key}) {
+ $res->{$e->{key}} = $e->{value};
+ }
+ }
+ }
+
+ return $res;
+}
+
+# X509 Certificate cache helper
+
+my $cert_cache_nodes = {};
+my $cert_cache_timestamp = time();
+my $cert_cache_fingerprints = {};
+
+sub update_cert_cache {
+ my ($update_node, $clear) = @_;
+
+ syslog('info', "Clearing outdated entries from certificate cache")
+ if $clear;
+
+ $cert_cache_timestamp = time() if !defined($update_node);
+
+ my $node_list = defined($update_node) ?
+ [ $update_node ] : [ keys %$cert_cache_nodes ];
+
+ foreach my $node (@$node_list) {
+ my $clear_old = sub {
+ if (my $old_fp = $cert_cache_nodes->{$node}) {
+ # distrust old fingerprint
+ delete $cert_cache_fingerprints->{$old_fp};
+ # ensure reload on next proxied request
+ delete $cert_cache_nodes->{$node};
+ }
+ };
+
+ my $cert_path = "/etc/pve/nodes/$node/pve-ssl.pem";
+ my $custom_cert_path = "/etc/pve/nodes/$node/pveproxy-ssl.pem";
+
+ $cert_path = $custom_cert_path if -f $custom_cert_path;
+
+ my $cert;
+ eval {
+ my $bio = Net::SSLeay::BIO_new_file($cert_path, 'r');
+ $cert = Net::SSLeay::PEM_read_bio_X509($bio);
+ Net::SSLeay::BIO_free($bio);
+ };
+ my $err = $@;
+ if ($err || !defined($cert)) {
+ &$clear_old() if $clear;
+ next;
+ }
+
+ my $fp;
+ eval {
+ $fp = Net::SSLeay::X509_get_fingerprint($cert, 'sha256');
+ };
+ $err = $@;
+ if ($err || !defined($fp) || $fp eq '') {
+ &$clear_old() if $clear;
+ next;
+ }
+
+ my $old_fp = $cert_cache_nodes->{$node};
+ $cert_cache_fingerprints->{$fp} = 1;
+ $cert_cache_nodes->{$node} = $fp;
+
+ if (defined($old_fp) && $fp ne $old_fp) {
+ delete $cert_cache_fingerprints->{$old_fp};
+ }
+ }
+}
+
+# load and cache cert fingerprint once
+sub initialize_cert_cache {
+ my ($node) = @_;
+
+ update_cert_cache($node)
+ if defined($node) && !defined($cert_cache_nodes->{$node});
+}
+
+sub check_cert_fingerprint {
+ my ($cert) = @_;
+
+ # clear cache every 30 minutes at least
+ update_cert_cache(undef, 1) if time() - $cert_cache_timestamp >= 60*30;
+
+ # get fingerprint of server certificate
+ my $fp;
+ eval {
+ $fp = Net::SSLeay::X509_get_fingerprint($cert, 'sha256');
+ };
+ return 0 if $@ || !defined($fp) || $fp eq ''; # error
+
+ my $check = sub {
+ for my $expected (keys %$cert_cache_fingerprints) {
+ return 1 if $fp eq $expected;
+ }
+ return 0;
+ };
+
+ return 1 if &$check();
+
+ # clear cache and retry at most once every minute
+ if (time() - $cert_cache_timestamp >= 60) {
+ syslog ('info', "Could not verify remote node certificate '$fp' with list of pinned certificates, refreshing cache");
+ update_cert_cache();
+ return &$check();
+ }
+
+ return 0;
+}
+
+# bash completion helpers
+
+sub complete_next_vmid {
+
+ my $vmlist = get_vmlist() || {};
+ my $idlist = $vmlist->{ids} || {};
+
+ for (my $i = 100; $i < 10000; $i++) {
+ return [$i] if !defined($idlist->{$i});
+ }
+
+ return [];
+}
+
+sub complete_vmid {
+
+ my $vmlist = get_vmlist();
+ my $ids = $vmlist->{ids} || {};
+
+ return [ keys %$ids ];
+}
+
+sub complete_local_vmid {
+
+ my $vmlist = get_vmlist();
+ my $ids = $vmlist->{ids} || {};
+
+ my $nodename = PVE::INotify::nodename();
+
+ my $res = [];
+ foreach my $vmid (keys %$ids) {
+ my $d = $ids->{$vmid};
+ next if !$d->{node} || $d->{node} ne $nodename;
+ push @$res, $vmid;
+ }
+
+ return $res;
+}
+
+sub complete_migration_target {
+
+ my $res = [];
+
+ my $nodename = PVE::INotify::nodename();
+
+ my $nodelist = get_nodelist();
+ foreach my $node (@$nodelist) {
+ next if $node eq $nodename;
+ push @$res, $node;
+ }
+
+ return $res;
+}
+
+1;