]> git.proxmox.com Git - qemu-server.git/blobdiff - PVE/API2/Qemu.pm
api: replica: safer config update
[qemu-server.git] / PVE / API2 / Qemu.pm
index d0070a67651ae1cdf0a8225345356b8725fe2a11..80bc141b5817bcc0339bb9f60dfc5633231559a5 100644 (file)
@@ -5,6 +5,8 @@ use warnings;
 use Cwd 'abs_path';
 use Net::SSLeay;
 use UUID;
+use POSIX;
+use IO::Socket::IP;
 
 use PVE::Cluster qw (cfs_read_file cfs_write_file);;
 use PVE::SafeSyslog;
@@ -22,8 +24,16 @@ use PVE::INotify;
 use PVE::Network;
 use PVE::Firewall;
 use PVE::API2::Firewall::VM;
-use PVE::HA::Env::PVE2;
-use PVE::HA::Config;
+use PVE::ReplicationTools;
+
+BEGIN {
+    if (!$ENV{PVE_GENERATING_DOCS}) {
+       require PVE::HA::Env::PVE2;
+       import PVE::HA::Env::PVE2;
+       require PVE::HA::Config;
+       import PVE::HA::Config;
+    }
+}
 
 use Data::Dumper; # fixme: remove
 
@@ -60,7 +70,7 @@ my $check_storage_access = sub {
            die "no storage ID specified (and no default storage)\n" if !$storeid;
            $rpcenv->check($authuser, "/storage/$storeid", ['Datastore.AllocateSpace']);
        } else {
-           $rpcenv->check_volume_access($authuser, $storecfg, $vmid, $volid);
+           PVE::Storage::check_volume_access($rpcenv, $authuser, $storecfg, $vmid, $volid);
        }
     });
 };
@@ -157,7 +167,7 @@ my $create_disks = sub {
            $res->{$ds} = PVE::QemuServer::print_drive($vmid, $disk);
        } else {
 
-           $rpcenv->check_volume_access($authuser, $storecfg, $vmid, $volid);
+           PVE::Storage::check_volume_access($rpcenv, $authuser, $storecfg, $vmid, $volid);
 
            my $volid_is_new = 1;
 
@@ -474,7 +484,7 @@ __PACKAGE__->register_method({
                die "pipe requires cli environment\n"
                    if $rpcenv->{type} ne 'cli';
            } else {
-               $rpcenv->check_volume_access($authuser, $storecfg, $vmid, $archive);
+               PVE::Storage::check_volume_access($rpcenv, $authuser, $storecfg, $vmid, $archive);
                $archive = PVE::Storage::abs_filesystem_path($storecfg, $archive);
            }
        }
@@ -998,6 +1008,12 @@ my $update_vm_api  = sub {
            foreach my $opt (@delete) {
                $modified->{$opt} = 1;
                $conf = PVE::QemuConfig->load_config($vmid); # update/reload
+               if (!defined($conf->{$opt})) {
+                   warn "cannot delete '$opt' - not set in current configuration!\n";
+                   $modified->{$opt} = 0;
+                   next;
+               }
+
                if ($opt =~ m/^unused/) {
                    my $drive = PVE::QemuServer::parse_drive($opt, $conf->{$opt});
                    PVE::QemuConfig->check_protection($conf, "can't remove unused disk '$drive->{file}'");
@@ -1013,6 +1029,16 @@ my $update_vm_api  = sub {
                        if defined($conf->{pending}->{$opt});
                    PVE::QemuServer::vmconfig_delete_pending_option($conf, $opt, $force);
                    PVE::QemuConfig->write_config($vmid, $conf);
+               } elsif ($opt eq "replica" || $opt eq "replica_target") {
+                   delete $conf->{$opt};
+                   delete $conf->{replica} if $opt eq "replica_target";
+
+                   PVE::ReplicationTools::job_remove($vmid);
+                   PVE::QemuConfig->write_config($vmid, $conf);
+               } elsif ($opt eq "replica_interval" || $opt eq "replica_rate_limit") {
+                   delete $conf->{$opt};
+                   PVE::ReplicationTools::update_conf($vmid, $opt, $param->{$opt});
+                   PVE::QemuConfig->write_config($vmid, $conf);
                } else {
                    PVE::QemuServer::vmconfig_delete_pending_option($conf, $opt, $force);
                    PVE::QemuConfig->write_config($vmid, $conf);
@@ -1035,6 +1061,27 @@ my $update_vm_api  = sub {
                        if defined($conf->{pending}->{$opt});
 
                    &$create_disks($rpcenv, $authuser, $conf->{pending}, $storecfg, $vmid, undef, {$opt => $param->{$opt}});
+               } elsif ($opt eq "replica") {
+                   die "Not all volumes are syncable, please check your config\n"
+                       if !PVE::ReplicationTools::check_guest_volumes_syncable($conf, 'qemu');
+                   die "replica_target is required\n"
+                       if !$conf->{replica_target} && !$param->{replica_target};
+                   my $value = $param->{$opt};
+                   if ($value) {
+                       PVE::ReplicationTools::job_enable($vmid);
+                   } else {
+                       PVE::ReplicationTools::job_disable($vmid);
+                   }
+                   $conf->{$opt} = $param->{$opt};
+               } elsif ($opt eq "replica_interval" || $opt eq "replica_rate_limit") {
+                   $conf->{$opt} = $param->{$opt};
+                   PVE::ReplicationTools::update_conf($vmid, $opt, $param->{$opt});
+               } elsif ($opt eq "replica_target" ) {
+                   die "Node: $param->{$opt} does not exists in Cluster.\n"
+                       if !PVE::Cluster::check_node_exists($param->{$opt});
+                   PVE::ReplicationTools::update_conf($vmid, $opt, $param->{$opt})
+                       if defined($conf->{$opt});
+                   $conf->{$opt} = $param->{$opt};
                } else {
                    $conf->{pending}->{$opt} = $param->{$opt};
                }
@@ -1270,6 +1317,9 @@ __PACKAGE__->register_method({
 
            syslog('info', "destroy VM $vmid: $upid\n");
 
+           # return without error if vm has no replica job
+           PVE::ReplicationTools::destroy_replica($vmid);
+
            PVE::QemuServer::vm_destroy($storecfg, $vmid, $skiplock);
 
            PVE::AccessControl::remove_vm_access($vmid);
@@ -1401,20 +1451,36 @@ __PACKAGE__->register_method({
                $cmd = ['/usr/bin/vncterm', '-rfbport', $port,
                        '-timeout', $timeout, '-authpath', $authpath,
                        '-perm', 'Sys.Console', '-c', @$remcmd, @$termcmd];
+               PVE::Tools::run_command($cmd);
            } else {
 
                $ENV{LC_PVE_TICKET} = $ticket if $websocket; # set ticket with "qm vncproxy"
 
-               my $qmcmd = [@$remcmd, "/usr/sbin/qm", 'vncproxy', $vmid];
-
-               my $qmstr = join(' ', @$qmcmd);
-
-               # also redirect stderr (else we get RFB protocol errors)
-               $cmd = ['/bin/nc6', '-l', '-p', $port, '-w', $timeout, '-e', "$qmstr 2>/dev/null"];
+               $cmd = [@$remcmd, "/usr/sbin/qm", 'vncproxy', $vmid];
+
+               my $sock = IO::Socket::IP->new(
+                   Listen => 1,
+                   LocalPort => $port,
+                   Proto => 'tcp',
+                   GetAddrInfoFlags => 0,
+                   ) or die "failed to create socket: $!\n";
+               # Inside the worker we shouldn't have any previous alarms
+               # running anyway...:
+               alarm(0);
+               local $SIG{ALRM} = sub { die "connection timed out\n" };
+               alarm $timeout;
+               accept(my $cli, $sock) or die "connection failed: $!\n";
+               alarm(0);
+               close($sock);
+               if (PVE::Tools::run_command($cmd,
+                   output => '>&'.fileno($cli),
+                   input => '<&'.fileno($cli),
+                   noerr => 1) != 0)
+               {
+                   die "Failed to run vncproxy.\n";
+               }
            }
 
-           PVE::Tools::run_command($cmd);
-
            return;
        };
 
@@ -1642,6 +1708,11 @@ __PACKAGE__->register_method({
                optional => 1,
            },
            machine => get_standard_option('pve-qm-machine'),
+           targetstorage => {
+               description => "Target storage for the migration. (Can be '1' to use the same storage id as on the source node.)",
+               type => 'string',
+               optional => 1
+           }
        },
     },
     returns => {
@@ -1680,6 +1751,13 @@ __PACKAGE__->register_method({
        raise_param_exc({ migration_network => "Only root may use this option." })
            if $migration_network && $authuser ne 'root@pam';
 
+       my $targetstorage = extract_param($param, 'targetstorage');
+       raise_param_exc({ targetstorage => "Only root may use this option." })
+           if $targetstorage && $authuser ne 'root@pam';
+
+       raise_param_exc({ targetstorage => "targetstorage can only by used with migratedfrom." })
+           if $targetstorage && !$migratedfrom;
+
        # read spice ticket from STDIN
        my $spice_ticket;
        if ($stateuri && ($stateuri eq 'tcp') && $migratedfrom && ($rpcenv->{type} eq 'cli')) {
@@ -1720,7 +1798,7 @@ __PACKAGE__->register_method({
                syslog('info', "start VM $vmid: $upid\n");
 
                PVE::QemuServer::vm_start($storecfg, $vmid, $stateuri, $skiplock, $migratedfrom, undef,
-                                         $machine, $spice_ticket, $migration_network, $migration_type);
+                                         $machine, $spice_ticket, $migration_network, $migration_type, $targetstorage);
 
                return;
            };
@@ -2443,21 +2521,28 @@ __PACKAGE__->register_method({
                my $upid = shift;
 
                my $newvollist = [];
+               my $jobs = {};
 
                eval {
                    local $SIG{INT} = $SIG{TERM} = $SIG{QUIT} = $SIG{HUP} = sub { die "interrupted by signal\n"; };
 
                    PVE::Storage::activate_volumes($storecfg, $vollist, $snapname);
 
+                   my $total_jobs = scalar(keys %{$drives});
+                   my $i = 1;
+
                    foreach my $opt (keys %$drives) {
                        my $drive = $drives->{$opt};
+                       my $skipcomplete = ($total_jobs != $i); # finish after last drive
 
                        my $newdrive = PVE::QemuServer::clone_disk($storecfg, $vmid, $running, $opt, $drive, $snapname,
-                                                                  $newid, $storage, $format, $fullclone->{$opt}, $newvollist);
+                                                                  $newid, $storage, $format, $fullclone->{$opt}, $newvollist,
+                                                                  $jobs, $skipcomplete, $oldconf->{agent});
 
                        $newconf->{$opt} = PVE::QemuServer::print_drive($vmid, $newdrive);
 
                        PVE::QemuConfig->write_config($newid, $newconf);
+                       $i++;
                    }
 
                    delete $newconf->{lock};
@@ -2478,6 +2563,8 @@ __PACKAGE__->register_method({
                if (my $err = $@) {
                    unlink $conffile;
 
+                   eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $jobs) };
+
                    sleep 1; # some storage like rbd need to wait before release volume - really?
 
                    foreach my $volid (@$newvollist) {
@@ -2629,6 +2716,10 @@ __PACKAGE__->register_method({
 
                    PVE::QemuConfig->add_unused_volume($conf, $old_volid) if !$param->{delete};
 
+                   # convert moved disk to base if part of template
+                   PVE::QemuServer::template_create($vmid, $conf, $disk)
+                       if PVE::QemuConfig->is_template($conf);
+
                    PVE::QemuConfig->write_config($vmid, $conf);
 
                    eval {
@@ -2702,6 +2793,16 @@ __PACKAGE__->register_method({
                description => "CIDR of the (sub) network that is used for migration.",
                optional => 1,
            },
+           "with-local-disks" => {
+               type => 'boolean',
+               description => "Enable live storage migration for local disk",
+               optional => 1,
+           },
+            targetstorage => get_standard_option('pve-storage-id', {
+               description => "Default target storage.",
+               optional => 1,
+               completion => \&PVE::QemuServer::complete_storage,
+            }),
        },
     },
     returns => {
@@ -2728,6 +2829,9 @@ __PACKAGE__->register_method({
 
        my $vmid = extract_param($param, 'vmid');
 
+       raise_param_exc({ targetstorage => "Live storage migration can only be done online." })
+           if !$param->{online} && $param->{targetstorage};
+
        raise_param_exc({ force => "Only root may use this option." })
            if $param->{force} && $authuser ne 'root@pam';
 
@@ -2751,7 +2855,12 @@ __PACKAGE__->register_method({
        }
 
        my $storecfg = PVE::Storage::config();
-       PVE::QemuServer::check_storage_availability($storecfg, $conf, $target);
+
+       if( $param->{targetstorage}) {
+           PVE::Storage::storage_check_node($storecfg, $param->{targetstorage}, $target);
+        } else {
+           PVE::QemuServer::check_storage_availability($storecfg, $conf, $target);
+       }
 
        if (PVE::HA::Config::vm_is_ha_managed($vmid) && $rpcenv->{type} ne 'ha') {
 
@@ -2923,7 +3032,7 @@ __PACKAGE__->register_method({
            size => {
                type => 'string',
                pattern => '\+?\d+(\.\d+)?[KMGT]?',
-               description => "The new size. With the '+' sign the value is added to the actual size of the volume and without it, the value is taken as an absolute one. Shrinking disk size is not supported.",
+               description => "The new size. With the `+` sign the value is added to the actual size of the volume and without it, the value is taken as an absolute one. Shrinking disk size is not supported.",
            },
            digest => {
                type => 'string',
@@ -3004,7 +3113,7 @@ __PACKAGE__->register_method({
            $newsize += $size if $ext;
            $newsize = int($newsize);
 
-           die "unable to skrink disk size\n" if $newsize < $size;
+           die "shrinking disks is not supported\n" if $newsize < $size;
 
            return if $size == $newsize;