]> git.proxmox.com Git - qemu-server.git/blobdiff - PVE/API2/Qemu.pm
Remove guest states to ensure no old states are exists
[qemu-server.git] / PVE / API2 / Qemu.pm
index d0070a67651ae1cdf0a8225345356b8725fe2a11..40e68ddd702aeafb7a17363c95d5b1a1bc5468cb 100644 (file)
@@ -5,6 +5,8 @@ use warnings;
 use Cwd 'abs_path';
 use Net::SSLeay;
 use UUID;
+use POSIX;
+use IO::Socket::IP;
 
 use PVE::Cluster qw (cfs_read_file cfs_write_file);;
 use PVE::SafeSyslog;
@@ -13,6 +15,8 @@ use PVE::Exception qw(raise raise_param_exc raise_perm_exc);
 use PVE::Storage;
 use PVE::JSONSchema qw(get_standard_option);
 use PVE::RESTHandler;
+use PVE::ReplicationConfig;
+use PVE::GuestHelpers;
 use PVE::QemuConfig;
 use PVE::QemuServer;
 use PVE::QemuMigrate;
@@ -22,8 +26,15 @@ use PVE::INotify;
 use PVE::Network;
 use PVE::Firewall;
 use PVE::API2::Firewall::VM;
-use PVE::HA::Env::PVE2;
-use PVE::HA::Config;
+
+BEGIN {
+    if (!$ENV{PVE_GENERATING_DOCS}) {
+       require PVE::HA::Env::PVE2;
+       import PVE::HA::Env::PVE2;
+       require PVE::HA::Config;
+       import PVE::HA::Config;
+    }
+}
 
 use Data::Dumper; # fixme: remove
 
@@ -60,7 +71,7 @@ my $check_storage_access = sub {
            die "no storage ID specified (and no default storage)\n" if !$storeid;
            $rpcenv->check($authuser, "/storage/$storeid", ['Datastore.AllocateSpace']);
        } else {
-           $rpcenv->check_volume_access($authuser, $storecfg, $vmid, $volid);
+           PVE::Storage::check_volume_access($rpcenv, $authuser, $storecfg, $vmid, $volid);
        }
     });
 };
@@ -104,13 +115,15 @@ my $check_storage_access_clone = sub {
 
 # Note: $pool is only needed when creating a VM, because pool permissions
 # are automatically inherited if VM already exists inside a pool.
+my $NEW_DISK_RE = qr!^(([^/:\s]+):)?(\d+(\.\d+)?)$!;
 my $create_disks = sub {
     my ($rpcenv, $authuser, $conf, $storecfg, $vmid, $pool, $settings, $default_storage) = @_;
 
     my $vollist = [];
 
     my $res = {};
-    PVE::QemuServer::foreach_drive($settings, sub {
+
+    my $code = sub {
        my ($ds, $disk) = @_;
 
        my $volid = $disk->{file};
@@ -118,7 +131,7 @@ my $create_disks = sub {
        if (!$volid || $volid eq 'none' || $volid eq 'cdrom') {
            delete $disk->{size};
            $res->{$ds} = PVE::QemuServer::print_drive($vmid, $disk);
-       } elsif ($volid =~ m/^(([^:\s]+):)?(\d+(\.\d+)?)$/) {
+       } elsif ($volid =~ $NEW_DISK_RE) {
            my ($storeid, $size) = ($2 || $default_storage, $3);
            die "no storage ID specified (and no default storage)\n" if !$storeid;
            my $defformat = PVE::Storage::storage_default_format($storecfg, $storeid);
@@ -157,7 +170,7 @@ my $create_disks = sub {
            $res->{$ds} = PVE::QemuServer::print_drive($vmid, $disk);
        } else {
 
-           $rpcenv->check_volume_access($authuser, $storecfg, $vmid, $volid);
+           PVE::Storage::check_volume_access($rpcenv, $authuser, $storecfg, $vmid, $volid);
 
            my $volid_is_new = 1;
 
@@ -181,7 +194,9 @@ my $create_disks = sub {
 
            $res->{$ds} = PVE::QemuServer::print_drive($vmid, $disk);
        }
-    });
+    };
+
+    eval { PVE::QemuServer::foreach_drive($settings, $code); };
 
     # free allocated images on error
     if (my $err = $@) {
@@ -474,7 +489,7 @@ __PACKAGE__->register_method({
                die "pipe requires cli environment\n"
                    if $rpcenv->{type} ne 'cli';
            } else {
-               $rpcenv->check_volume_access($authuser, $storecfg, $vmid, $archive);
+               PVE::Storage::check_volume_access($rpcenv, $authuser, $storecfg, $vmid, $archive);
                $archive = PVE::Storage::abs_filesystem_path($storecfg, $archive);
            }
        }
@@ -511,6 +526,9 @@ __PACKAGE__->register_method({
                PVE::AccessControl::add_vm_to_pool($vmid, $pool) if $pool;
            };
 
+           # ensure no old replication state are exists
+           PVE::ReplicationState::delete_guest_states($vmid);
+
            return $rpcenv->fork_worker('qmrestore', $vmid, $authuser, $realcmd);
        };
 
@@ -519,6 +537,9 @@ __PACKAGE__->register_method({
            # test after locking
            PVE::Cluster::check_vmid_unused($vmid);
 
+           # ensure no old replication state are exists
+           PVE::ReplicationState::delete_guest_states($vmid);
+
            my $realcmd = sub {
 
                my $vollist = [];
@@ -940,12 +961,35 @@ my $update_vm_api  = sub {
        push @delete, $opt;
     }
 
+    my $repl_conf = PVE::ReplicationConfig->new();
+    my $is_replicated = $repl_conf->check_for_existing_jobs($vmid, 1);
+    my $check_replication = sub {
+       my ($drive) = @_;
+       return if !$is_replicated;
+       my $volid = $drive->{file};
+       return if !$volid || !($drive->{replicate}//1);
+       return if PVE::QemuServer::drive_is_cdrom($drive);
+       my ($storeid, $format);
+       if ($volid =~ $NEW_DISK_RE) {
+           $storeid = $2;
+           $format = $drive->{format} || PVE::Storage::storage_default_format($storecfg, $storeid);
+       } else {
+           ($storeid, undef) = PVE::Storage::parse_volume_id($volid, 1);
+           $format = (PVE::Storage::parse_volname($storecfg, $volid))[6];
+       }
+       return if PVE::Storage::storage_can_replicate($storecfg, $storeid, $format);
+       my $scfg = PVE::Storage::storage_config($storecfg, $storeid);
+       return if $scfg->{shared};
+       die "cannot add non-replicatable volume to a replicated VM\n";
+    };
+
     foreach my $opt (keys %$param) {
        if (PVE::QemuServer::is_valid_drivename($opt)) {
            # cleanup drive path
            my $drive = PVE::QemuServer::parse_drive($opt, $param->{$opt});
            raise_param_exc({ $opt => "unable to parse drive options" }) if !$drive;
            PVE::QemuServer::cleanup_drive_path($opt, $storecfg, $drive);
+           $check_replication->($drive);
            $param->{$opt} = PVE::QemuServer::print_drive($vmid, $drive);
        } elsif ($opt =~ m/^net(\d+)$/) {
            # add macaddr
@@ -998,6 +1042,12 @@ my $update_vm_api  = sub {
            foreach my $opt (@delete) {
                $modified->{$opt} = 1;
                $conf = PVE::QemuConfig->load_config($vmid); # update/reload
+               if (!defined($conf->{$opt}) && !defined($conf->{pending}->{$opt})) {
+                   warn "cannot delete '$opt' - not set in current configuration!\n";
+                   $modified->{$opt} = 0;
+                   next;
+               }
+
                if ($opt =~ m/^unused/) {
                    my $drive = PVE::QemuServer::parse_drive($opt, $conf->{$opt});
                    PVE::QemuConfig->check_protection($conf, "can't remove unused disk '$drive->{file}'");
@@ -1261,6 +1311,10 @@ __PACKAGE__->register_method({
        die "unable to remove VM $vmid - used in HA resources\n"
            if PVE::HA::Config::vm_is_ha_managed($vmid);
 
+       # do not allow destroy if there are replication jobs
+       my $repl_conf = PVE::ReplicationConfig->new();
+       $repl_conf->check_for_existing_jobs($vmid);
+
        # early tests (repeat after locking)
        die "VM $vmid is running - destroy failed\n"
            if PVE::QemuServer::check_running($vmid);
@@ -1401,24 +1455,41 @@ __PACKAGE__->register_method({
                $cmd = ['/usr/bin/vncterm', '-rfbport', $port,
                        '-timeout', $timeout, '-authpath', $authpath,
                        '-perm', 'Sys.Console', '-c', @$remcmd, @$termcmd];
+               PVE::Tools::run_command($cmd);
            } else {
 
                $ENV{LC_PVE_TICKET} = $ticket if $websocket; # set ticket with "qm vncproxy"
 
-               my $qmcmd = [@$remcmd, "/usr/sbin/qm", 'vncproxy', $vmid];
-
-               my $qmstr = join(' ', @$qmcmd);
-
-               # also redirect stderr (else we get RFB protocol errors)
-               $cmd = ['/bin/nc6', '-l', '-p', $port, '-w', $timeout, '-e', "$qmstr 2>/dev/null"];
+               $cmd = [@$remcmd, "/usr/sbin/qm", 'vncproxy', $vmid];
+
+               my $sock = IO::Socket::IP->new(
+                   ReuseAddr => 1,
+                   Listen => 1,
+                   LocalPort => $port,
+                   Proto => 'tcp',
+                   GetAddrInfoFlags => 0,
+                   ) or die "failed to create socket: $!\n";
+               # Inside the worker we shouldn't have any previous alarms
+               # running anyway...:
+               alarm(0);
+               local $SIG{ALRM} = sub { die "connection timed out\n" };
+               alarm $timeout;
+               accept(my $cli, $sock) or die "connection failed: $!\n";
+               alarm(0);
+               close($sock);
+               if (PVE::Tools::run_command($cmd,
+                   output => '>&'.fileno($cli),
+                   input => '<&'.fileno($cli),
+                   noerr => 1) != 0)
+               {
+                   die "Failed to run vncproxy.\n";
+               }
            }
 
-           PVE::Tools::run_command($cmd);
-
            return;
        };
 
-       my $upid = $rpcenv->fork_worker('vncproxy', $vmid, $authuser, $realcmd);
+       my $upid = $rpcenv->fork_worker('vncproxy', $vmid, $authuser, $realcmd, 1);
 
        PVE::Tools::wait_for_vnc_port($port);
 
@@ -1642,6 +1713,11 @@ __PACKAGE__->register_method({
                optional => 1,
            },
            machine => get_standard_option('pve-qm-machine'),
+           targetstorage => {
+               description => "Target storage for the migration. (Can be '1' to use the same storage id as on the source node.)",
+               type => 'string',
+               optional => 1
+           }
        },
     },
     returns => {
@@ -1680,6 +1756,13 @@ __PACKAGE__->register_method({
        raise_param_exc({ migration_network => "Only root may use this option." })
            if $migration_network && $authuser ne 'root@pam';
 
+       my $targetstorage = extract_param($param, 'targetstorage');
+       raise_param_exc({ targetstorage => "Only root may use this option." })
+           if $targetstorage && $authuser ne 'root@pam';
+
+       raise_param_exc({ targetstorage => "targetstorage can only by used with migratedfrom." })
+           if $targetstorage && !$migratedfrom;
+
        # read spice ticket from STDIN
        my $spice_ticket;
        if ($stateuri && ($stateuri eq 'tcp') && $migratedfrom && ($rpcenv->{type} eq 'cli')) {
@@ -1703,7 +1786,7 @@ __PACKAGE__->register_method({
 
                my $cmd = ['ha-manager', 'set', $service, '--state', 'started'];
 
-               print "Executing HA start for VM $vmid\n";
+               print "Requesting HA start for VM $vmid\n";
 
                PVE::Tools::run_command($cmd);
 
@@ -1720,7 +1803,7 @@ __PACKAGE__->register_method({
                syslog('info', "start VM $vmid: $upid\n");
 
                PVE::QemuServer::vm_start($storecfg, $vmid, $stateuri, $skiplock, $migratedfrom, undef,
-                                         $machine, $spice_ticket, $migration_network, $migration_type);
+                                         $machine, $spice_ticket, $migration_network, $migration_type, $targetstorage);
 
                return;
            };
@@ -1800,7 +1883,7 @@ __PACKAGE__->register_method({
 
                my $cmd = ['ha-manager', 'set', $service, '--state', 'stopped'];
 
-               print "Executing HA stop for VM $vmid\n";
+               print "Requesting HA stop for VM $vmid\n";
 
                PVE::Tools::run_command($cmd);
 
@@ -1970,7 +2053,7 @@ __PACKAGE__->register_method({
 
                my $cmd = ['ha-manager', 'set', $service, '--state', 'stopped'];
 
-               print "Executing HA stop for VM $vmid\n";
+               print "Requesting HA stop for VM $vmid\n";
 
                PVE::Tools::run_command($cmd);
 
@@ -2395,12 +2478,12 @@ __PACKAGE__->register_method({
                        $newconf->{$opt} = $value; # simply copy configuration
                    } else {
                        if ($param->{full}) {
-                           die "Full clone feature is not available"
+                           die "Full clone feature is not supported for drive '$opt'\n"
                                if !PVE::Storage::volume_has_feature($storecfg, 'copy', $drive->{file}, $snapname, $running);
                            $fullclone->{$opt} = 1;
                        } else {
                            # not full means clone instead of copy
-                           die "Linked clone feature is not available"
+                           die "Linked clone feature is not supported for drive '$opt'\n"
                                if !PVE::Storage::volume_has_feature($storecfg, 'clone', $drive->{file}, $snapname, $running);
                        }
                        $drives->{$opt} = $drive;
@@ -2443,21 +2526,28 @@ __PACKAGE__->register_method({
                my $upid = shift;
 
                my $newvollist = [];
+               my $jobs = {};
 
                eval {
                    local $SIG{INT} = $SIG{TERM} = $SIG{QUIT} = $SIG{HUP} = sub { die "interrupted by signal\n"; };
 
                    PVE::Storage::activate_volumes($storecfg, $vollist, $snapname);
 
+                   my $total_jobs = scalar(keys %{$drives});
+                   my $i = 1;
+
                    foreach my $opt (keys %$drives) {
                        my $drive = $drives->{$opt};
+                       my $skipcomplete = ($total_jobs != $i); # finish after last drive
 
                        my $newdrive = PVE::QemuServer::clone_disk($storecfg, $vmid, $running, $opt, $drive, $snapname,
-                                                                  $newid, $storage, $format, $fullclone->{$opt}, $newvollist);
+                                                                  $newid, $storage, $format, $fullclone->{$opt}, $newvollist,
+                                                                  $jobs, $skipcomplete, $oldconf->{agent});
 
                        $newconf->{$opt} = PVE::QemuServer::print_drive($vmid, $newdrive);
 
                        PVE::QemuConfig->write_config($newid, $newconf);
+                       $i++;
                    }
 
                    delete $newconf->{lock};
@@ -2478,6 +2568,8 @@ __PACKAGE__->register_method({
                if (my $err = $@) {
                    unlink $conffile;
 
+                   eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $jobs) };
+
                    sleep 1; # some storage like rbd need to wait before release volume - really?
 
                    foreach my $volid (@$newvollist) {
@@ -2629,6 +2721,10 @@ __PACKAGE__->register_method({
 
                    PVE::QemuConfig->add_unused_volume($conf, $old_volid) if !$param->{delete};
 
+                   # convert moved disk to base if part of template
+                   PVE::QemuServer::template_create($vmid, $conf, $disk)
+                       if PVE::QemuConfig->is_template($conf);
+
                    PVE::QemuConfig->write_config($vmid, $conf);
 
                    eval {
@@ -2702,6 +2798,16 @@ __PACKAGE__->register_method({
                description => "CIDR of the (sub) network that is used for migration.",
                optional => 1,
            },
+           "with-local-disks" => {
+               type => 'boolean',
+               description => "Enable live storage migration for local disk",
+               optional => 1,
+           },
+            targetstorage => get_standard_option('pve-storage-id', {
+               description => "Default target storage.",
+               optional => 1,
+               completion => \&PVE::QemuServer::complete_storage,
+            }),
        },
     },
     returns => {
@@ -2728,6 +2834,9 @@ __PACKAGE__->register_method({
 
        my $vmid = extract_param($param, 'vmid');
 
+       raise_param_exc({ targetstorage => "Live storage migration can only be done online." })
+           if !$param->{online} && $param->{targetstorage};
+
        raise_param_exc({ force => "Only root may use this option." })
            if $param->{force} && $authuser ne 'root@pam';
 
@@ -2751,7 +2860,12 @@ __PACKAGE__->register_method({
        }
 
        my $storecfg = PVE::Storage::config();
-       PVE::QemuServer::check_storage_availability($storecfg, $conf, $target);
+
+       if( $param->{targetstorage}) {
+           PVE::Storage::storage_check_node($storecfg, $param->{targetstorage}, $target);
+        } else {
+           PVE::QemuServer::check_storage_availability($storecfg, $conf, $target);
+       }
 
        if (PVE::HA::Config::vm_is_ha_managed($vmid) && $rpcenv->{type} ne 'ha') {
 
@@ -2762,7 +2876,7 @@ __PACKAGE__->register_method({
 
                my $cmd = ['ha-manager', 'migrate', $service, $target];
 
-               print "Executing HA migrate for VM $vmid to node $target\n";
+               print "Requesting HA migration for VM $vmid to node $target\n";
 
                PVE::Tools::run_command($cmd);
 
@@ -2774,12 +2888,14 @@ __PACKAGE__->register_method({
        } else {
 
            my $realcmd = sub {
-               my $upid = shift;
-
                PVE::QemuMigrate->migrate($target, $targetip, $vmid, $param);
            };
 
-           return $rpcenv->fork_worker('qmigrate', $vmid, $authuser, $realcmd);
+           my $worker = sub {
+               return PVE::GuestHelpers::guest_migration_lock($vmid, 10, $realcmd);
+           };
+
+           return $rpcenv->fork_worker('qmigrate', $vmid, $authuser, $worker);
        }
 
     }});
@@ -2923,7 +3039,7 @@ __PACKAGE__->register_method({
            size => {
                type => 'string',
                pattern => '\+?\d+(\.\d+)?[KMGT]?',
-               description => "The new size. With the '+' sign the value is added to the actual size of the volume and without it, the value is taken as an absolute one. Shrinking disk size is not supported.",
+               description => "The new size. With the `+` sign the value is added to the actual size of the volume and without it, the value is taken as an absolute one. Shrinking disk size is not supported.",
            },
            digest => {
                type => 'string',
@@ -3004,7 +3120,7 @@ __PACKAGE__->register_method({
            $newsize += $size if $ext;
            $newsize = int($newsize);
 
-           die "unable to skrink disk size\n" if $newsize < $size;
+           die "shrinking disks is not supported\n" if $newsize < $size;
 
            return if $size == $newsize;
 
@@ -3306,7 +3422,12 @@ __PACKAGE__->register_method({
            PVE::QemuConfig->snapshot_rollback($vmid, $snapname);
        };
 
-       return $rpcenv->fork_worker('qmrollback', $vmid, $authuser, $realcmd);
+       my $worker = sub {
+           # hold migration lock, this makes sure that nobody create replication snapshots
+           return PVE::GuestHelpers::guest_migration_lock($vmid, 10, $realcmd);
+       };
+
+       return $rpcenv->fork_worker('qmrollback', $vmid, $authuser, $worker);
     }});
 
 __PACKAGE__->register_method({