]> git.proxmox.com Git - qemu-server.git/blobdiff - PVE/API2/Qemu.pm
API/clone: do not overwrite global signal handlers
[qemu-server.git] / PVE / API2 / Qemu.pm
index 07b9aa9ab320dba85069579fcfacf410a33e36e1..747172e21adbb73e23f3d45d7ac4316119755ccc 100644 (file)
@@ -5,6 +5,8 @@ use warnings;
 use Cwd 'abs_path';
 use Net::SSLeay;
 use UUID;
+use POSIX;
+use IO::Socket::IP;
 
 use PVE::Cluster qw (cfs_read_file cfs_write_file);;
 use PVE::SafeSyslog;
@@ -13,6 +15,9 @@ use PVE::Exception qw(raise raise_param_exc raise_perm_exc);
 use PVE::Storage;
 use PVE::JSONSchema qw(get_standard_option);
 use PVE::RESTHandler;
+use PVE::ReplicationConfig;
+use PVE::GuestHelpers;
+use PVE::QemuConfig;
 use PVE::QemuServer;
 use PVE::QemuMigrate;
 use PVE::RPCEnvironment;
@@ -21,7 +26,15 @@ use PVE::INotify;
 use PVE::Network;
 use PVE::Firewall;
 use PVE::API2::Firewall::VM;
-use PVE::HA::Config;
+
+BEGIN {
+    if (!$ENV{PVE_GENERATING_DOCS}) {
+       require PVE::HA::Env::PVE2;
+       import PVE::HA::Env::PVE2;
+       require PVE::HA::Config;
+       import PVE::HA::Config;
+    }
+}
 
 use Data::Dumper; # fixme: remove
 
@@ -39,6 +52,7 @@ my $resolve_cdrom_alias = sub {
     }
 };
 
+my $NEW_DISK_RE = qr!^(([^/:\s]+):)?(\d+(\.\d+)?)$!;
 my $check_storage_access = sub {
    my ($rpcenv, $authuser, $storecfg, $vmid, $settings, $default_storage) = @_;
 
@@ -53,12 +67,15 @@ my $check_storage_access = sub {
            # nothing to check
        } elsif ($isCDROM && ($volid eq 'cdrom')) {
            $rpcenv->check($authuser, "/", ['Sys.Console']);
-       } elsif (!$isCDROM && ($volid =~ m/^(([^:\s]+):)?(\d+(\.\d+)?)$/)) {
+       } elsif (!$isCDROM && ($volid =~ $NEW_DISK_RE)) {
            my ($storeid, $size) = ($2 || $default_storage, $3);
            die "no storage ID specified (and no default storage)\n" if !$storeid;
            $rpcenv->check($authuser, "/storage/$storeid", ['Datastore.AllocateSpace']);
+           my $scfg = PVE::Storage::storage_config($storecfg, $storeid);
+           raise_param_exc({ storage => "storage '$storeid' does not support vm images"})
+               if !$scfg->{content}->{images};
        } else {
-           $rpcenv->check_volume_access($authuser, $storecfg, $vmid, $volid);
+           PVE::Storage::check_volume_access($rpcenv, $authuser, $storecfg, $vmid, $volid);
        }
     });
 };
@@ -108,7 +125,8 @@ my $create_disks = sub {
     my $vollist = [];
 
     my $res = {};
-    PVE::QemuServer::foreach_drive($settings, sub {
+
+    my $code = sub {
        my ($ds, $disk) = @_;
 
        my $volid = $disk->{file};
@@ -116,21 +134,46 @@ my $create_disks = sub {
        if (!$volid || $volid eq 'none' || $volid eq 'cdrom') {
            delete $disk->{size};
            $res->{$ds} = PVE::QemuServer::print_drive($vmid, $disk);
-       } elsif ($volid =~ m/^(([^:\s]+):)?(\d+(\.\d+)?)$/) {
+       } elsif ($volid =~ $NEW_DISK_RE) {
            my ($storeid, $size) = ($2 || $default_storage, $3);
            die "no storage ID specified (and no default storage)\n" if !$storeid;
            my $defformat = PVE::Storage::storage_default_format($storecfg, $storeid);
            my $fmt = $disk->{format} || $defformat;
-           my $volid = PVE::Storage::vdisk_alloc($storecfg, $storeid, $vmid,
-                                                 $fmt, undef, $size*1024*1024);
-           $disk->{file} = $volid;
-           $disk->{size} = $size*1024*1024*1024;
+
+           my $volid;
+           if ($ds eq 'efidisk0') {
+               # handle efidisk
+               my $ovmfvars = '/usr/share/kvm/OVMF_VARS-pure-efi.fd';
+               die "uefi vars image not found\n" if ! -f $ovmfvars;
+               $volid = PVE::Storage::vdisk_alloc($storecfg, $storeid, $vmid,
+                                                     $fmt, undef, 128);
+               $disk->{file} = $volid;
+               $disk->{size} = 128*1024;
+               my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid);
+               my $scfg = PVE::Storage::storage_config($storecfg, $storeid);
+               my $qemufmt = PVE::QemuServer::qemu_img_format($scfg, $volname);
+               my $path = PVE::Storage::path($storecfg, $volid);
+               my $efidiskcmd = ['/usr/bin/qemu-img', 'convert', '-n', '-f', 'raw', '-O', $qemufmt];
+               push @$efidiskcmd, $ovmfvars;
+               push @$efidiskcmd, $path;
+
+               PVE::Storage::activate_volumes($storecfg, [$volid]);
+
+               eval { PVE::Tools::run_command($efidiskcmd); };
+               my $err = $@;
+               die "Copying of EFI Vars image failed: $err" if $err;
+           } else {
+               $volid = PVE::Storage::vdisk_alloc($storecfg, $storeid, $vmid,
+                                                     $fmt, undef, $size*1024*1024);
+               $disk->{file} = $volid;
+               $disk->{size} = $size*1024*1024*1024;
+           }
            push @$vollist, $volid;
            delete $disk->{format}; # no longer needed
            $res->{$ds} = PVE::QemuServer::print_drive($vmid, $disk);
        } else {
 
-           $rpcenv->check_volume_access($authuser, $storecfg, $vmid, $volid);
+           PVE::Storage::check_volume_access($rpcenv, $authuser, $storecfg, $vmid, $volid);
 
            my $volid_is_new = 1;
 
@@ -154,7 +197,9 @@ my $create_disks = sub {
 
            $res->{$ds} = PVE::QemuServer::print_drive($vmid, $disk);
        }
-    });
+    };
+
+    eval { PVE::QemuServer::foreach_drive($settings, $code); };
 
     # free allocated images on error
     if (my $err = $@) {
@@ -174,6 +219,64 @@ my $create_disks = sub {
     return $vollist;
 };
 
+my $cpuoptions = {
+    'cores' => 1,
+    'cpu' => 1,
+    'cpulimit' => 1,
+    'cpuunits' => 1,
+    'numa' => 1,
+    'smp' => 1,
+    'sockets' => 1,
+    'vcpus' => 1,
+};
+
+my $memoryoptions = {
+    'memory' => 1,
+    'balloon' => 1,
+    'shares' => 1,
+};
+
+my $hwtypeoptions = {
+    'acpi' => 1,
+    'hotplug' => 1,
+    'kvm' => 1,
+    'machine' => 1,
+    'scsihw' => 1,
+    'smbios1' => 1,
+    'tablet' => 1,
+    'vga' => 1,
+    'watchdog' => 1,
+};
+
+my $generaloptions = {
+    'agent' => 1,
+    'autostart' => 1,
+    'bios' => 1,
+    'description' => 1,
+    'keyboard' => 1,
+    'localtime' => 1,
+    'migrate_downtime' => 1,
+    'migrate_speed' => 1,
+    'name' => 1,
+    'onboot' => 1,
+    'ostype' => 1,
+    'protection' => 1,
+    'reboot' => 1,
+    'startdate' => 1,
+    'startup' => 1,
+    'tdf' => 1,
+    'template' => 1,
+};
+
+my $vmpoweroptions = {
+    'freeze' => 1,
+};
+
+my $diskoptions = {
+    'boot' => 1,
+    'bootdisk' => 1,
+};
+
 my $check_vm_modify_config_perm = sub {
     my ($rpcenv, $authuser, $vmid, $pool, $key_list) = @_;
 
@@ -181,37 +284,38 @@ my $check_vm_modify_config_perm = sub {
 
     foreach my $opt (@$key_list) {
        # disk checks need to be done somewhere else
-       next if PVE::QemuServer::valid_drivename($opt);
+       next if PVE::QemuServer::is_valid_drivename($opt);
+       next if $opt eq 'cdrom';
+       next if $opt =~ m/^unused\d+$/;
 
-       if ($opt eq 'sockets' || $opt eq 'cores' ||
-           $opt eq 'cpu' || $opt eq 'smp' || $opt eq 'vcpus' ||
-           $opt eq 'cpulimit' || $opt eq 'cpuunits') {
+       if ($cpuoptions->{$opt} || $opt =~ m/^numa\d+$/) {
            $rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.CPU']);
-       } elsif ($opt eq 'memory' || $opt eq 'balloon' || $opt eq 'shares') {
+       } elsif ($memoryoptions->{$opt}) {
            $rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.Memory']);
-       } elsif ($opt eq 'args' || $opt eq 'lock') {
-           die "only root can set '$opt' config\n";
-       } elsif ($opt eq 'cpu' || $opt eq 'kvm' || $opt eq 'acpi' || $opt eq 'machine' ||
-                $opt eq 'vga' || $opt eq 'watchdog' || $opt eq 'tablet' || $opt eq 'smbios1') {
+       } elsif ($hwtypeoptions->{$opt}) {
            $rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.HWType']);
+       } elsif ($generaloptions->{$opt}) {
+           $rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.Options']);
+           # special case for startup since it changes host behaviour
+           if ($opt eq 'startup') {
+               $rpcenv->check_full($authuser, "/", ['Sys.Modify']);
+           }
+       } elsif ($vmpoweroptions->{$opt}) {
+           $rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.PowerMgmt']);
+       } elsif ($diskoptions->{$opt}) {
+           $rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.Disk']);
        } elsif ($opt =~ m/^net\d+$/) {
            $rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.Network']);
        } else {
-           $rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.Options']);
+           # catches usb\d+, hostpci\d+, args, lock, etc.
+           # new options will be checked here
+           die "only root can set '$opt' config\n";
        }
     }
 
     return 1;
 };
 
-my $check_protection = sub {
-    my ($vm_conf, $err_msg) = @_;
-
-    if ($vm_conf->{protection}) {
-       die "$err_msg - protection mode enabled\n";
-    }
-};
-
 __PACKAGE__->register_method({
     name => 'vmlist',
     path => '',
@@ -227,6 +331,11 @@ __PACKAGE__->register_method({
        additionalProperties => 0,
        properties => {
            node => get_standard_option('pve-node'),
+           full => {
+               type => 'boolean',
+               optional => 1,
+               description => "Determine the full status of active VMs.",
+           },
        },
     },
     returns => {
@@ -243,7 +352,7 @@ __PACKAGE__->register_method({
        my $rpcenv = PVE::RPCEnvironment::get();
        my $authuser = $rpcenv->get_user();
 
-       my $vmstatus = PVE::QemuServer::vmstatus();
+       my $vmstatus = PVE::QemuServer::vmstatus(undef, $param->{full});
 
        my $res = [];
        foreach my $vmid (keys %$vmstatus) {
@@ -333,7 +442,7 @@ __PACKAGE__->register_method({
 
        my $pool = extract_param($param, 'pool');
 
-       my $filename = PVE::QemuServer::config_file($vmid);
+       my $filename = PVE::QemuConfig->config_file($vmid);
 
        my $storecfg = PVE::Storage::config();
 
@@ -365,7 +474,7 @@ __PACKAGE__->register_method({
            &$check_vm_modify_config_perm($rpcenv, $authuser, $vmid, $pool, [ keys %$param]);
 
            foreach my $opt (keys %$param) {
-               if (PVE::QemuServer::valid_drivename($opt)) {
+               if (PVE::QemuServer::is_valid_drivename($opt)) {
                    my $drive = PVE::QemuServer::parse_drive($opt, $param->{$opt});
                    raise_param_exc({ $opt => "unable to parse drive options" }) if !$drive;
 
@@ -383,7 +492,7 @@ __PACKAGE__->register_method({
                die "pipe requires cli environment\n"
                    if $rpcenv->{type} ne 'cli';
            } else {
-               $rpcenv->check_volume_access($authuser, $storecfg, $vmid, $archive);
+               PVE::Storage::check_volume_access($rpcenv, $authuser, $storecfg, $vmid, $archive);
                $archive = PVE::Storage::abs_filesystem_path($storecfg, $archive);
            }
        }
@@ -393,15 +502,19 @@ __PACKAGE__->register_method({
            if ($vmlist->{ids}->{$vmid}) {
                my $current_node = $vmlist->{ids}->{$vmid}->{node};
                if ($current_node eq $node) {
-                   my $conf = PVE::QemuServer::load_config($vmid);
+                   my $conf = PVE::QemuConfig->load_config($vmid);
 
-                   &$check_protection($conf, "unable to restore VM $vmid");
+                   PVE::QemuConfig->check_protection($conf, "unable to restore VM $vmid");
 
                    die "unable to restore vm $vmid - config file already exists\n"
                        if !$force;
 
                    die "unable to restore vm $vmid - vm is running\n"
                        if PVE::QemuServer::check_running($vmid);
+
+                   die "unable to restore vm $vmid - vm is a template\n"
+                       if PVE::QemuConfig->is_template($conf);
+
                } else {
                    die "unable to restore vm $vmid - already existing on cluster node '$current_node'\n";
                }
@@ -416,6 +529,9 @@ __PACKAGE__->register_method({
                PVE::AccessControl::add_vm_to_pool($vmid, $pool) if $pool;
            };
 
+           # ensure no old replication state are exists
+           PVE::ReplicationState::delete_guest_states($vmid);
+
            return $rpcenv->fork_worker('qmrestore', $vmid, $authuser, $realcmd);
        };
 
@@ -424,6 +540,9 @@ __PACKAGE__->register_method({
            # test after locking
            PVE::Cluster::check_vmid_unused($vmid);
 
+           # ensure no old replication state are exists
+           PVE::ReplicationState::delete_guest_states($vmid);
+
            my $realcmd = sub {
 
                my $vollist = [];
@@ -435,7 +554,7 @@ __PACKAGE__->register_method({
                    $vollist = &$create_disks($rpcenv, $authuser, $conf, $storecfg, $vmid, $pool, $param, $storage);
 
                    # try to be smart about bootdisk
-                   my @disks = PVE::QemuServer::disknames();
+                   my @disks = PVE::QemuServer::valid_drive_names();
                    my $firstdisk;
                    foreach my $ds (reverse @disks) {
                        next if !$conf->{$ds};
@@ -456,7 +575,7 @@ __PACKAGE__->register_method({
                        $conf->{smbios1} = "uuid=$uuid_str";
                    }
 
-                   PVE::QemuServer::update_config_nolock($vmid, $conf);
+                   PVE::QemuConfig->write_config($vmid, $conf);
 
                };
                my $err = $@;
@@ -475,7 +594,7 @@ __PACKAGE__->register_method({
            return $rpcenv->fork_worker('qmcreate', $vmid, $authuser, $realcmd);
        };
 
-       return PVE::QemuServer::lock_config_full($vmid, 1, $archive ? $restorefn : $createfn);
+       return PVE::QemuConfig->lock_config_full($vmid, 1, $archive ? $restorefn : $createfn);
     }});
 
 __PACKAGE__->register_method({
@@ -519,6 +638,7 @@ __PACKAGE__->register_method({
            { subdir => 'rrd' },
            { subdir => 'rrddata' },
            { subdir => 'monitor' },
+           { subdir => 'agent' },
            { subdir => 'snapshot' },
            { subdir => 'spiceproxy' },
            { subdir => 'sendkey' },
@@ -655,7 +775,7 @@ __PACKAGE__->register_method({
     code => sub {
        my ($param) = @_;
 
-       my $conf = PVE::QemuServer::load_config($param->{vmid});
+       my $conf = PVE::QemuConfig->load_config($param->{vmid});
 
        delete $conf->{snapshots};
 
@@ -726,7 +846,7 @@ __PACKAGE__->register_method({
     code => sub {
        my ($param) = @_;
 
-       my $conf = PVE::QemuServer::load_config($param->{vmid});
+       my $conf = PVE::QemuConfig->load_config($param->{vmid});
 
        my $pending_delete_hash = PVE::QemuServer::split_flagged_list($conf->{pending}->{delete});
 
@@ -788,7 +908,7 @@ my $update_vm_api  = sub {
     my $background_delay = extract_param($param, 'background_delay');
 
     my @paramarr = (); # used for log message
-    foreach my $key (keys %$param) {
+    foreach my $key (sort keys %$param) {
        push @paramarr, "-$key", $param->{$key};
     }
 
@@ -844,11 +964,35 @@ my $update_vm_api  = sub {
        push @delete, $opt;
     }
 
+    my $repl_conf = PVE::ReplicationConfig->new();
+    my $is_replicated = $repl_conf->check_for_existing_jobs($vmid, 1);
+    my $check_replication = sub {
+       my ($drive) = @_;
+       return if !$is_replicated;
+       my $volid = $drive->{file};
+       return if !$volid || !($drive->{replicate}//1);
+       return if PVE::QemuServer::drive_is_cdrom($drive);
+       my ($storeid, $format);
+       if ($volid =~ $NEW_DISK_RE) {
+           $storeid = $2;
+           $format = $drive->{format} || PVE::Storage::storage_default_format($storecfg, $storeid);
+       } else {
+           ($storeid, undef) = PVE::Storage::parse_volume_id($volid, 1);
+           $format = (PVE::Storage::parse_volname($storecfg, $volid))[6];
+       }
+       return if PVE::Storage::storage_can_replicate($storecfg, $storeid, $format);
+       my $scfg = PVE::Storage::storage_config($storecfg, $storeid);
+       return if $scfg->{shared};
+       die "cannot add non-replicatable volume to a replicated VM\n";
+    };
+
     foreach my $opt (keys %$param) {
-       if (PVE::QemuServer::valid_drivename($opt)) {
+       if (PVE::QemuServer::is_valid_drivename($opt)) {
            # cleanup drive path
            my $drive = PVE::QemuServer::parse_drive($opt, $param->{$opt});
+           raise_param_exc({ $opt => "unable to parse drive options" }) if !$drive;
            PVE::QemuServer::cleanup_drive_path($opt, $storecfg, $drive);
+           $check_replication->($drive);
            $param->{$opt} = PVE::QemuServer::print_drive($vmid, $drive);
        } elsif ($opt =~ m/^net(\d+)$/) {
            # add macaddr
@@ -865,12 +1009,12 @@ my $update_vm_api  = sub {
 
     my $updatefn =  sub {
 
-       my $conf = PVE::QemuServer::load_config($vmid);
+       my $conf = PVE::QemuConfig->load_config($vmid);
 
        die "checksum missmatch (file change by other user?)\n"
            if $digest && $digest ne $conf->{digest};
 
-       PVE::QemuServer::check_lock($conf) if !$skiplock;
+       PVE::QemuConfig->check_lock($conf) if !$skiplock;
 
        foreach my $opt (keys %$revert) {
            if (defined($conf->{$opt})) {
@@ -900,34 +1044,40 @@ my $update_vm_api  = sub {
 
            foreach my $opt (@delete) {
                $modified->{$opt} = 1;
-               $conf = PVE::QemuServer::load_config($vmid); # update/reload
+               $conf = PVE::QemuConfig->load_config($vmid); # update/reload
+               if (!defined($conf->{$opt}) && !defined($conf->{pending}->{$opt})) {
+                   warn "cannot delete '$opt' - not set in current configuration!\n";
+                   $modified->{$opt} = 0;
+                   next;
+               }
+
                if ($opt =~ m/^unused/) {
                    my $drive = PVE::QemuServer::parse_drive($opt, $conf->{$opt});
-                   &$check_protection($conf, "can't remove unused disk '$drive->{file}'");
+                   PVE::QemuConfig->check_protection($conf, "can't remove unused disk '$drive->{file}'");
                    $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.Disk']);
                    if (PVE::QemuServer::try_deallocate_drive($storecfg, $vmid, $conf, $opt, $drive, $rpcenv, $authuser)) {
                        delete $conf->{$opt};
-                       PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
+                       PVE::QemuConfig->write_config($vmid, $conf);
                    }
-               } elsif (PVE::QemuServer::valid_drivename($opt)) {
-                   &$check_protection($conf, "can't remove drive '$opt'");
+               } elsif (PVE::QemuServer::is_valid_drivename($opt)) {
+                   PVE::QemuConfig->check_protection($conf, "can't remove drive '$opt'");
                    $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.Disk']);
                    PVE::QemuServer::vmconfig_register_unused_drive($storecfg, $vmid, $conf, PVE::QemuServer::parse_drive($opt, $conf->{pending}->{$opt}))
                        if defined($conf->{pending}->{$opt});
                    PVE::QemuServer::vmconfig_delete_pending_option($conf, $opt, $force);
-                   PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
+                   PVE::QemuConfig->write_config($vmid, $conf);
                } else {
                    PVE::QemuServer::vmconfig_delete_pending_option($conf, $opt, $force);
-                   PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
+                   PVE::QemuConfig->write_config($vmid, $conf);
                }
            }
 
            foreach my $opt (keys %$param) { # add/change
                $modified->{$opt} = 1;
-               $conf = PVE::QemuServer::load_config($vmid); # update/reload
+               $conf = PVE::QemuConfig->load_config($vmid); # update/reload
                next if defined($conf->{pending}->{$opt}) && ($param->{$opt} eq $conf->{pending}->{$opt}); # skip if nothing changed
 
-               if (PVE::QemuServer::valid_drivename($opt)) {
+               if (PVE::QemuServer::is_valid_drivename($opt)) {
                    my $drive = PVE::QemuServer::parse_drive($opt, $param->{$opt});
                    if (PVE::QemuServer::drive_is_cdrom($drive)) { # CDROM
                        $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.CDROM']);
@@ -942,13 +1092,13 @@ my $update_vm_api  = sub {
                    $conf->{pending}->{$opt} = $param->{$opt};
                }
                PVE::QemuServer::vmconfig_undelete_pending_option($conf, $opt);
-               PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
+               PVE::QemuConfig->write_config($vmid, $conf);
            }
 
            # remove pending changes when nothing changed
-           $conf = PVE::QemuServer::load_config($vmid); # update/reload
+           $conf = PVE::QemuConfig->load_config($vmid); # update/reload
            my $changes = PVE::QemuServer::vmconfig_cleanup_pending($conf);
-           PVE::QemuServer::update_config_nolock($vmid, $conf, 1) if $changes;
+           PVE::QemuConfig->write_config($vmid, $conf) if $changes;
 
            return if !scalar(keys %{$conf->{pending}});
 
@@ -956,7 +1106,7 @@ my $update_vm_api  = sub {
 
            # apply pending changes
 
-           $conf = PVE::QemuServer::load_config($vmid); # update/reload
+           $conf = PVE::QemuConfig->load_config($vmid); # update/reload
 
            if ($running) {
                my $errors = {};
@@ -1002,7 +1152,7 @@ my $update_vm_api  = sub {
        }
     };
 
-    return PVE::QemuServer::lock_config($vmid, $updatefn);
+    return PVE::QemuConfig->lock_config($vmid, $updatefn);
 };
 
 my $vm_config_perm_list = [
@@ -1155,15 +1305,19 @@ __PACKAGE__->register_method({
            if $skiplock && $authuser ne 'root@pam';
 
        # test if VM exists
-       my $conf = PVE::QemuServer::load_config($vmid);
+       my $conf = PVE::QemuConfig->load_config($vmid);
 
        my $storecfg = PVE::Storage::config();
 
-       &$check_protection($conf, "can't remove VM $vmid");
+       PVE::QemuConfig->check_protection($conf, "can't remove VM $vmid");
 
        die "unable to remove VM $vmid - used in HA resources\n"
            if PVE::HA::Config::vm_is_ha_managed($vmid);
 
+       # do not allow destroy if there are replication jobs
+       my $repl_conf = PVE::ReplicationConfig->new();
+       $repl_conf->check_for_existing_jobs($vmid);
+
        # early tests (repeat after locking)
        die "VM $vmid is running - destroy failed\n"
            if PVE::QemuServer::check_running($vmid);
@@ -1264,7 +1418,7 @@ __PACKAGE__->register_method({
        my $node = $param->{node};
        my $websocket = $param->{websocket};
 
-       my $conf = PVE::QemuServer::load_config($vmid, $node); # check if VM exists
+       my $conf = PVE::QemuConfig->load_config($vmid, $node); # check if VM exists
 
        my $authpath = "/vms/$vmid";
 
@@ -1304,24 +1458,41 @@ __PACKAGE__->register_method({
                $cmd = ['/usr/bin/vncterm', '-rfbport', $port,
                        '-timeout', $timeout, '-authpath', $authpath,
                        '-perm', 'Sys.Console', '-c', @$remcmd, @$termcmd];
+               PVE::Tools::run_command($cmd);
            } else {
 
                $ENV{LC_PVE_TICKET} = $ticket if $websocket; # set ticket with "qm vncproxy"
 
-               my $qmcmd = [@$remcmd, "/usr/sbin/qm", 'vncproxy', $vmid];
-
-               my $qmstr = join(' ', @$qmcmd);
-
-               # also redirect stderr (else we get RFB protocol errors)
-               $cmd = ['/bin/nc6', '-l', '-p', $port, '-w', $timeout, '-e', "$qmstr 2>/dev/null"];
+               $cmd = [@$remcmd, "/usr/sbin/qm", 'vncproxy', $vmid];
+
+               my $sock = IO::Socket::IP->new(
+                   ReuseAddr => 1,
+                   Listen => 1,
+                   LocalPort => $port,
+                   Proto => 'tcp',
+                   GetAddrInfoFlags => 0,
+                   ) or die "failed to create socket: $!\n";
+               # Inside the worker we shouldn't have any previous alarms
+               # running anyway...:
+               alarm(0);
+               local $SIG{ALRM} = sub { die "connection timed out\n" };
+               alarm $timeout;
+               accept(my $cli, $sock) or die "connection failed: $!\n";
+               alarm(0);
+               close($sock);
+               if (PVE::Tools::run_command($cmd,
+                   output => '>&'.fileno($cli),
+                   input => '<&'.fileno($cli),
+                   noerr => 1) != 0)
+               {
+                   die "Failed to run vncproxy.\n";
+               }
            }
 
-           PVE::Tools::run_command($cmd);
-
            return;
        };
 
-       my $upid = $rpcenv->fork_worker('vncproxy', $vmid, $authuser, $realcmd);
+       my $upid = $rpcenv->fork_worker('vncproxy', $vmid, $authuser, $realcmd, 1);
 
        PVE::Tools::wait_for_vnc_port($port);
 
@@ -1381,7 +1552,7 @@ __PACKAGE__->register_method({
 
        PVE::AccessControl::verify_vnc_ticket($param->{vncticket}, $authuser, $authpath);
 
-       my $conf = PVE::QemuServer::load_config($vmid, $node); # VM exists ?
+       my $conf = PVE::QemuConfig->load_config($vmid, $node); # VM exists ?
 
        # Note: VNC ports are acessible from outside, so we do not gain any
        # security if we verify that $param->{port} belongs to VM $vmid. This
@@ -1422,7 +1593,7 @@ __PACKAGE__->register_method({
        my $node = $param->{node};
        my $proxy = $param->{proxy};
 
-       my $conf = PVE::QemuServer::load_config($vmid, $node);
+       my $conf = PVE::QemuConfig->load_config($vmid, $node);
        my $title = "VM $vmid";
        $title .= " - ". $conf->{name} if $conf->{name};
 
@@ -1467,7 +1638,7 @@ __PACKAGE__->register_method({
        my ($param) = @_;
 
        # test if VM exists
-       my $conf = PVE::QemuServer::load_config($param->{vmid});
+       my $conf = PVE::QemuConfig->load_config($param->{vmid});
 
        my $res = [
            { subdir => 'current' },
@@ -1500,12 +1671,12 @@ __PACKAGE__->register_method({
        my ($param) = @_;
 
        # test if VM exists
-       my $conf = PVE::QemuServer::load_config($param->{vmid});
+       my $conf = PVE::QemuConfig->load_config($param->{vmid});
 
        my $vmstatus = PVE::QemuServer::vmstatus($param->{vmid}, 1);
        my $status = $vmstatus->{$param->{vmid}};
 
-       $status->{ha} = PVE::HA::Config::vm_is_ha_managed($param->{vmid});
+       $status->{ha} = PVE::HA::Config::get_service_status("vm:$param->{vmid}");
 
        $status->{spice} = 1 if PVE::QemuServer::vga_conf_has_spice($conf->{vga});
 
@@ -1531,7 +1702,25 @@ __PACKAGE__->register_method({
            skiplock => get_standard_option('skiplock'),
            stateuri => get_standard_option('pve-qm-stateuri'),
            migratedfrom => get_standard_option('pve-node',{ optional => 1 }),
+           migration_type => {
+               type => 'string',
+               enum => ['secure', 'insecure'],
+               description => "Migration traffic is encrypted using an SSH " .
+                 "tunnel by default. On secure, completely private networks " .
+                 "this can be disabled to increase performance.",
+               optional => 1,
+           },
+           migration_network => {
+               type => 'string', format => 'CIDR',
+               description => "CIDR of the (sub) network that is used for migration.",
+               optional => 1,
+           },
            machine => get_standard_option('pve-qm-machine'),
+           targetstorage => {
+               description => "Target storage for the migration. (Can be '1' to use the same storage id as on the source node.)",
+               type => 'string',
+               optional => 1
+           }
        },
     },
     returns => {
@@ -1562,6 +1751,21 @@ __PACKAGE__->register_method({
        raise_param_exc({ migratedfrom => "Only root may use this option." })
            if $migratedfrom && $authuser ne 'root@pam';
 
+       my $migration_type = extract_param($param, 'migration_type');
+       raise_param_exc({ migration_type => "Only root may use this option." })
+           if $migration_type && $authuser ne 'root@pam';
+
+       my $migration_network = extract_param($param, 'migration_network');
+       raise_param_exc({ migration_network => "Only root may use this option." })
+           if $migration_network && $authuser ne 'root@pam';
+
+       my $targetstorage = extract_param($param, 'targetstorage');
+       raise_param_exc({ targetstorage => "Only root may use this option." })
+           if $targetstorage && $authuser ne 'root@pam';
+
+       raise_param_exc({ targetstorage => "targetstorage can only by used with migratedfrom." })
+           if $targetstorage && !$migratedfrom;
+
        # read spice ticket from STDIN
        my $spice_ticket;
        if ($stateuri && ($stateuri eq 'tcp') && $migratedfrom && ($rpcenv->{type} eq 'cli')) {
@@ -1571,6 +1775,8 @@ __PACKAGE__->register_method({
            }
        }
 
+       PVE::Cluster::check_cfs_quorum();
+
        my $storecfg = PVE::Storage::config();
 
        if (PVE::HA::Config::vm_is_ha_managed($vmid) && !$stateuri &&
@@ -1581,9 +1787,9 @@ __PACKAGE__->register_method({
 
                my $service = "vm:$vmid";
 
-               my $cmd = ['ha-manager', 'enable', $service];
+               my $cmd = ['ha-manager', 'set', $service, '--state', 'started'];
 
-               print "Executing HA start for VM $vmid\n";
+               print "Requesting HA start for VM $vmid\n";
 
                PVE::Tools::run_command($cmd);
 
@@ -1600,7 +1806,7 @@ __PACKAGE__->register_method({
                syslog('info', "start VM $vmid: $upid\n");
 
                PVE::QemuServer::vm_start($storecfg, $vmid, $stateuri, $skiplock, $migratedfrom, undef,
-                                         $machine, $spice_ticket);
+                                         $machine, $spice_ticket, $migration_network, $migration_type, $targetstorage);
 
                return;
            };
@@ -1615,7 +1821,8 @@ __PACKAGE__->register_method({
     method => 'POST',
     protected => 1,
     proxyto => 'node',
-    description => "Stop virtual machine.",
+    description => "Stop virtual machine. The qemu process will exit immediately. This" .
+       "is akin to pulling the power plug of a running computer and may damage the VM data",
     permissions => {
        check => ['perm', '/vms/{vmid}', [ 'VM.PowerMgmt' ]],
     },
@@ -1634,7 +1841,7 @@ __PACKAGE__->register_method({
                optional => 1,
            },
            keepActive => {
-               description => "Do not decativate storage volumes.",
+               description => "Do not deactivate storage volumes.",
                type => 'boolean',
                optional => 1,
                default => 0,
@@ -1677,9 +1884,9 @@ __PACKAGE__->register_method({
 
                my $service = "vm:$vmid";
 
-               my $cmd = ['ha-manager', 'disable', $service];
+               my $cmd = ['ha-manager', 'set', $service, '--state', 'stopped'];
 
-               print "Executing HA stop for VM $vmid\n";
+               print "Requesting HA stop for VM $vmid\n";
 
                PVE::Tools::run_command($cmd);
 
@@ -1760,7 +1967,8 @@ __PACKAGE__->register_method({
     method => 'POST',
     protected => 1,
     proxyto => 'node',
-    description => "Shutdown virtual machine.",
+    description => "Shutdown virtual machine. This is similar to pressing the power button on a physical machine." .
+       "This will send an ACPI event for the guest OS, which should then proceed to a clean shutdown.",
     permissions => {
        check => ['perm', '/vms/{vmid}', [ 'VM.PowerMgmt' ]],
     },
@@ -1784,7 +1992,7 @@ __PACKAGE__->register_method({
                default => 0,
            },
            keepActive => {
-               description => "Do not decativate storage volumes.",
+               description => "Do not deactivate storage volumes.",
                type => 'boolean',
                optional => 1,
                default => 0,
@@ -1815,18 +2023,63 @@ __PACKAGE__->register_method({
 
        my $storecfg = PVE::Storage::config();
 
-       my $realcmd = sub {
-           my $upid = shift;
+       my $shutdown = 1;
 
-           syslog('info', "shutdown VM $vmid: $upid\n");
+       # if vm is paused, do not shutdown (but stop if forceStop = 1)
+       # otherwise, we will infer a shutdown command, but run into the timeout,
+       # then when the vm is resumed, it will instantly shutdown
+       #
+       # checking the qmp status here to get feedback to the gui/cli/api
+       # and the status query should not take too long
+       my $qmpstatus;
+       eval {
+           $qmpstatus = PVE::QemuServer::vm_qmp_command($vmid, { execute => "query-status" }, 0);
+       };
+       my $err = $@ if $@;
 
-           PVE::QemuServer::vm_stop($storecfg, $vmid, $skiplock, 0, $param->{timeout},
-                                    1, $param->{forceStop}, $keepActive);
+       if (!$err && $qmpstatus->{status} eq "paused") {
+           if ($param->{forceStop}) {
+               warn "VM is paused - stop instead of shutdown\n";
+               $shutdown = 0;
+           } else {
+               die "VM is paused - cannot shutdown\n";
+           }
+       }
 
-           return;
-       };
+       if (PVE::HA::Config::vm_is_ha_managed($vmid) &&
+           ($rpcenv->{type} ne 'ha')) {
+
+           my $hacmd = sub {
+               my $upid = shift;
+
+               my $service = "vm:$vmid";
+
+               my $cmd = ['ha-manager', 'set', $service, '--state', 'stopped'];
+
+               print "Requesting HA stop for VM $vmid\n";
+
+               PVE::Tools::run_command($cmd);
+
+               return;
+           };
+
+           return $rpcenv->fork_worker('hastop', $vmid, $authuser, $hacmd);
+
+       } else {
 
-       return $rpcenv->fork_worker('qmshutdown', $vmid, $authuser, $realcmd);
+           my $realcmd = sub {
+               my $upid = shift;
+
+               syslog('info', "shutdown VM $vmid: $upid\n");
+
+               PVE::QemuServer::vm_stop($storecfg, $vmid, $skiplock, 0, $param->{timeout},
+                                        $shutdown, $param->{forceStop}, $keepActive);
+
+               return;
+           };
+
+           return $rpcenv->fork_worker('qmshutdown', $vmid, $authuser, $realcmd);
+       }
     }});
 
 __PACKAGE__->register_method({
@@ -2029,7 +2282,7 @@ __PACKAGE__->register_method({
 
        my $running = PVE::QemuServer::check_running($vmid);
 
-       my $conf = PVE::QemuServer::load_config($vmid);
+       my $conf = PVE::QemuConfig->load_config($vmid);
 
        if($snapname){
            my $snap = $conf->{snapshots}->{$snapname};
@@ -2039,7 +2292,7 @@ __PACKAGE__->register_method({
        my $storecfg = PVE::Storage::config();
 
        my $nodelist = PVE::QemuServer::shared_nodes($conf, $storecfg);
-       my $hasFeature = PVE::QemuServer::has_feature($feature, $conf, $storecfg, $snapname, $running);
+       my $hasFeature = PVE::QemuConfig->has_feature($feature, $conf, $storecfg, $snapname, $running);
 
        return {
            hasFeature => $hasFeature,
@@ -2178,9 +2431,9 @@ __PACKAGE__->register_method({
            # do all tests after lock
            # we also try to do all tests before we fork the worker
 
-           my $conf = PVE::QemuServer::load_config($vmid);
+           my $conf = PVE::QemuConfig->load_config($vmid);
 
-           PVE::QemuServer::check_lock($conf);
+           PVE::QemuConfig->check_lock($conf);
 
            my $verify_running = PVE::QemuServer::check_running($vmid) || 0;
 
@@ -2195,7 +2448,7 @@ __PACKAGE__->register_method({
 
            die "can't clone VM to node '$target' (VM uses local storage)\n" if $target && !$sharedvm;
 
-           my $conffile = PVE::QemuServer::config_file($newid);
+           my $conffile = PVE::QemuConfig->config_file($newid);
 
            die "unable to create VM $newid: config file already exists\n"
                if -f $conffile;
@@ -2218,21 +2471,22 @@ __PACKAGE__->register_method({
                # always change MAC! address
                if ($opt =~ m/^net(\d+)$/) {
                    my $net = PVE::QemuServer::parse_net($value);
-                   $net->{macaddr} =  PVE::Tools::random_ether_addr();
+                   my $dc = PVE::Cluster::cfs_read_file('datacenter.cfg');
+                   $net->{macaddr} =  PVE::Tools::random_ether_addr($dc->{mac_prefix});
                    $newconf->{$opt} = PVE::QemuServer::print_net($net);
-               } elsif (PVE::QemuServer::valid_drivename($opt)) {
+               } elsif (PVE::QemuServer::is_valid_drivename($opt)) {
                    my $drive = PVE::QemuServer::parse_drive($opt, $value);
                    die "unable to parse drive options for '$opt'\n" if !$drive;
                    if (PVE::QemuServer::drive_is_cdrom($drive)) {
                        $newconf->{$opt} = $value; # simply copy configuration
                    } else {
                        if ($param->{full}) {
-                           die "Full clone feature is not available"
+                           die "Full clone feature is not supported for drive '$opt'\n"
                                if !PVE::Storage::volume_has_feature($storecfg, 'copy', $drive->{file}, $snapname, $running);
                            $fullclone->{$opt} = 1;
                        } else {
                            # not full means clone instead of copy
-                           die "Linked clone feature is not available"
+                           die "Linked clone feature is not supported for drive '$opt'\n"
                                if !PVE::Storage::volume_has_feature($storecfg, 'clone', $drive->{file}, $snapname, $running);
                        }
                        $drives->{$opt} = $drive;
@@ -2275,31 +2529,42 @@ __PACKAGE__->register_method({
                my $upid = shift;
 
                my $newvollist = [];
+               my $jobs = {};
 
                eval {
-                   local $SIG{INT} = $SIG{TERM} = $SIG{QUIT} = $SIG{HUP} = sub { die "interrupted by signal\n"; };
+                   local $SIG{INT} =
+                       local $SIG{TERM} =
+                       local $SIG{QUIT} =
+                       local $SIG{HUP} = sub { die "interrupted by signal\n"; };
+
+                   PVE::Storage::activate_volumes($storecfg, $vollist, $snapname);
 
-                   PVE::Storage::activate_volumes($storecfg, $vollist);
+                   my $total_jobs = scalar(keys %{$drives});
+                   my $i = 1;
 
                    foreach my $opt (keys %$drives) {
                        my $drive = $drives->{$opt};
+                       my $skipcomplete = ($total_jobs != $i); # finish after last drive
 
                        my $newdrive = PVE::QemuServer::clone_disk($storecfg, $vmid, $running, $opt, $drive, $snapname,
-                                                                  $newid, $storage, $format, $fullclone->{$opt}, $newvollist);
+                                                                  $newid, $storage, $format, $fullclone->{$opt}, $newvollist,
+                                                                  $jobs, $skipcomplete, $oldconf->{agent});
 
                        $newconf->{$opt} = PVE::QemuServer::print_drive($vmid, $newdrive);
 
-                       PVE::QemuServer::update_config_nolock($newid, $newconf, 1);
+                       PVE::QemuConfig->write_config($newid, $newconf);
+                       $i++;
                    }
 
                    delete $newconf->{lock};
-                   PVE::QemuServer::update_config_nolock($newid, $newconf, 1);
+                   PVE::QemuConfig->write_config($newid, $newconf);
 
                     if ($target) {
                        # always deactivate volumes - avoid lvm LVs to be active on several nodes
-                       PVE::Storage::deactivate_volumes($storecfg, $vollist);
+                       PVE::Storage::deactivate_volumes($storecfg, $vollist, $snapname) if !$running;
+                       PVE::Storage::deactivate_volumes($storecfg, $newvollist);
 
-                       my $newconffile = PVE::QemuServer::config_file($newid, $target);
+                       my $newconffile = PVE::QemuConfig->config_file($newid, $target);
                        die "Failed to move config to node '$target' - rename failed: $!\n"
                            if !rename($conffile, $newconffile);
                    }
@@ -2309,6 +2574,8 @@ __PACKAGE__->register_method({
                if (my $err = $@) {
                    unlink $conffile;
 
+                   eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $jobs) };
+
                    sleep 1; # some storage like rbd need to wait before release volume - really?
 
                    foreach my $volid (@$newvollist) {
@@ -2326,9 +2593,9 @@ __PACKAGE__->register_method({
            return $rpcenv->fork_worker('qmclone', $vmid, $authuser, $realcmd);
        };
 
-       return PVE::QemuServer::lock_config_mode($vmid, 1, $shared_lock, sub {
+       return PVE::QemuConfig->lock_config_mode($vmid, 1, $shared_lock, sub {
            # Aquire exclusive lock lock for $newid
-           return PVE::QemuServer::lock_config_full($newid, 1, $clonefn);
+           return PVE::QemuConfig->lock_config_full($newid, 1, $clonefn);
        });
 
     }});
@@ -2341,23 +2608,21 @@ __PACKAGE__->register_method({
     proxyto => 'node',
     description => "Move volume to different storage.",
     permissions => {
-       description => "You need 'VM.Config.Disk' permissions on /vms/{vmid}, " .
-           "and 'Datastore.AllocateSpace' permissions on the storage.",
-       check =>
-       [ 'and',
-         ['perm', '/vms/{vmid}', [ 'VM.Config.Disk' ]],
-         ['perm', '/storage/{storage}', [ 'Datastore.AllocateSpace' ]],
-       ],
+       description => "You need 'VM.Config.Disk' permissions on /vms/{vmid}, and 'Datastore.AllocateSpace' permissions on the storage.",
+       check => [ 'and',
+                  ['perm', '/vms/{vmid}', [ 'VM.Config.Disk' ]],
+                  ['perm', '/storage/{storage}', [ 'Datastore.AllocateSpace' ]],
+           ],
     },
     parameters => {
         additionalProperties => 0,
-        properties => {
+       properties => {
            node => get_standard_option('pve-node'),
            vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
            disk => {
                type => 'string',
                description => "The disk you want to move.",
-               enum => [ PVE::QemuServer::disknames() ],
+               enum => [ PVE::QemuServer::valid_drive_names() ],
            },
             storage => get_standard_option('pve-storage-id', {
                description => "Target storage.",
@@ -2410,7 +2675,9 @@ __PACKAGE__->register_method({
 
        my $updatefn =  sub {
 
-           my $conf = PVE::QemuServer::load_config($vmid);
+           my $conf = PVE::QemuConfig->load_config($vmid);
+
+           PVE::QemuConfig->check_lock($conf);
 
            die "checksum missmatch (file change by other user?)\n"
                if $digest && $digest ne $conf->{digest};
@@ -2432,6 +2699,11 @@ __PACKAGE__->register_method({
            die "you can't move on the same storage with same format\n" if $oldstoreid eq $storeid &&
                 (!$format || !$oldfmt || $oldfmt eq $format);
 
+           # this only checks snapshots because $disk is passed!
+           my $snapshotted = PVE::QemuServer::is_volume_in_use($storecfg, $conf, $disk, $old_volid);
+           die "you can't move a disk with snapshots and delete the source\n"
+               if $snapshotted && $param->{delete};
+
            PVE::Cluster::log_msg('info', $authuser, "move disk VM $vmid: move --disk $disk --storage $storeid");
 
            my $running = PVE::QemuServer::check_running($vmid);
@@ -2445,14 +2717,21 @@ __PACKAGE__->register_method({
                eval {
                    local $SIG{INT} = $SIG{TERM} = $SIG{QUIT} = $SIG{HUP} = sub { die "interrupted by signal\n"; };
 
+                   warn "moving disk with snapshots, snapshots will not be moved!\n"
+                       if $snapshotted;
+
                    my $newdrive = PVE::QemuServer::clone_disk($storecfg, $vmid, $running, $disk, $drive, undef,
                                                               $vmid, $storeid, $format, 1, $newvollist);
 
                    $conf->{$disk} = PVE::QemuServer::print_drive($vmid, $newdrive);
 
-                   PVE::QemuServer::add_unused_volume($conf, $old_volid) if !$param->{delete};
+                   PVE::QemuConfig->add_unused_volume($conf, $old_volid) if !$param->{delete};
+
+                   # convert moved disk to base if part of template
+                   PVE::QemuServer::template_create($vmid, $conf, $disk)
+                       if PVE::QemuConfig->is_template($conf);
 
-                   PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
+                   PVE::QemuConfig->write_config($vmid, $conf);
 
                    eval {
                        # try to deactivate volumes - avoid lvm LVs to be active on several nodes
@@ -2471,21 +2750,18 @@ __PACKAGE__->register_method({
                 }
 
                if ($param->{delete}) {
-                    if (PVE::QemuServer::is_volume_in_use($storecfg, $conf, undef, $old_volid)) {
-                       warn "volume $old_volid still has snapshots, can't delete it\n";
-                       PVE::QemuServer::add_unused_volume($conf, $old_volid);
-                       PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
-                   } else {
-                       eval { PVE::Storage::vdisk_free($storecfg, $old_volid); };
-                       warn $@ if $@;
-                   }
+                   eval {
+                       PVE::Storage::deactivate_volumes($storecfg, [$old_volid]);
+                       PVE::Storage::vdisk_free($storecfg, $old_volid);
+                   };
+                   warn $@ if $@;
                }
            };
 
             return $rpcenv->fork_worker('qmmove', $vmid, $authuser, $realcmd);
        };
 
-       return PVE::QemuServer::lock_config($vmid, $updatefn);
+       return PVE::QemuConfig->lock_config($vmid, $updatefn);
     }});
 
 __PACKAGE__->register_method({
@@ -2517,6 +2793,27 @@ __PACKAGE__->register_method({
                description => "Allow to migrate VMs which use local devices. Only root may use this option.",
                optional => 1,
            },
+           migration_type => {
+               type => 'string',
+               enum => ['secure', 'insecure'],
+               description => "Migration traffic is encrypted using an SSH tunnel by default. On secure, completely private networks this can be disabled to increase performance.",
+               optional => 1,
+           },
+           migration_network => {
+               type => 'string', format => 'CIDR',
+               description => "CIDR of the (sub) network that is used for migration.",
+               optional => 1,
+           },
+           "with-local-disks" => {
+               type => 'boolean',
+               description => "Enable live storage migration for local disk",
+               optional => 1,
+           },
+            targetstorage => get_standard_option('pve-storage-id', {
+               description => "Default target storage.",
+               optional => 1,
+               completion => \&PVE::QemuServer::complete_storage,
+            }),
        },
     },
     returns => {
@@ -2543,15 +2840,25 @@ __PACKAGE__->register_method({
 
        my $vmid = extract_param($param, 'vmid');
 
+       raise_param_exc({ targetstorage => "Live storage migration can only be done online." })
+           if !$param->{online} && $param->{targetstorage};
+
        raise_param_exc({ force => "Only root may use this option." })
            if $param->{force} && $authuser ne 'root@pam';
 
+       raise_param_exc({ migration_type => "Only root may use this option." })
+           if $param->{migration_type} && $authuser ne 'root@pam';
+
+       # allow root only until better network permissions are available
+       raise_param_exc({ migration_network => "Only root may use this option." })
+           if $param->{migration_network} && $authuser ne 'root@pam';
+
        # test if VM exists
-       my $conf = PVE::QemuServer::load_config($vmid);
+       my $conf = PVE::QemuConfig->load_config($vmid);
 
        # try to detect errors early
 
-       PVE::QemuServer::check_lock($conf);
+       PVE::QemuConfig->check_lock($conf);
 
        if (PVE::QemuServer::check_running($vmid)) {
            die "cant migrate running VM without --online\n"
@@ -2559,7 +2866,12 @@ __PACKAGE__->register_method({
        }
 
        my $storecfg = PVE::Storage::config();
-       PVE::QemuServer::check_storage_availability($storecfg, $conf, $target);
+
+       if( $param->{targetstorage}) {
+           PVE::Storage::storage_check_node($storecfg, $param->{targetstorage}, $target);
+        } else {
+           PVE::QemuServer::check_storage_availability($storecfg, $conf, $target);
+       }
 
        if (PVE::HA::Config::vm_is_ha_managed($vmid) && $rpcenv->{type} ne 'ha') {
 
@@ -2570,7 +2882,7 @@ __PACKAGE__->register_method({
 
                my $cmd = ['ha-manager', 'migrate', $service, $target];
 
-               print "Executing HA migrate for VM $vmid to node $target\n";
+               print "Requesting HA migration for VM $vmid to node $target\n";
 
                PVE::Tools::run_command($cmd);
 
@@ -2582,12 +2894,14 @@ __PACKAGE__->register_method({
        } else {
 
            my $realcmd = sub {
-               my $upid = shift;
-
                PVE::QemuMigrate->migrate($target, $targetip, $vmid, $param);
            };
 
-           return $rpcenv->fork_worker('qmigrate', $vmid, $authuser, $realcmd);
+           my $worker = sub {
+               return PVE::GuestHelpers::guest_migration_lock($vmid, 10, $realcmd);
+           };
+
+           return $rpcenv->fork_worker('qmigrate', $vmid, $authuser, $worker);
        }
 
     }});
@@ -2600,7 +2914,8 @@ __PACKAGE__->register_method({
     proxyto => 'node',
     description => "Execute Qemu monitor commands.",
     permissions => {
-       check => ['perm', '/vms/{vmid}', [ 'VM.Monitor' ]],
+       description => "Sys.Modify is required for (sub)commands which are not read-only ('info *' and 'help')",
+        check => ['perm', '/vms/{vmid}', [ 'VM.Monitor' ]],
     },
     parameters => {
        additionalProperties => 0,
@@ -2617,9 +2932,21 @@ __PACKAGE__->register_method({
     code => sub {
        my ($param) = @_;
 
+       my $rpcenv = PVE::RPCEnvironment::get();
+       my $authuser = $rpcenv->get_user();
+
+       my $is_ro = sub {
+           my $command = shift;
+           return $command =~ m/^\s*info(\s+|$)/
+               || $command =~ m/^\s*help\s*$/;
+       };
+
+       $rpcenv->check_full($authuser, "/", ['Sys.Modify'])
+           if !&$is_ro($param->{command});
+
        my $vmid = $param->{vmid};
 
-       my $conf = PVE::QemuServer::load_config ($vmid); # check if VM exists
+       my $conf = PVE::QemuConfig->load_config ($vmid); # check if VM exists
 
        my $res = '';
        eval {
@@ -2630,6 +2957,70 @@ __PACKAGE__->register_method({
        return $res;
     }});
 
+my $guest_agent_commands = [
+    'ping',
+    'get-time',
+    'info',
+    'fsfreeze-status',
+    'fsfreeze-freeze',
+    'fsfreeze-thaw',
+    'fstrim',
+    'network-get-interfaces',
+    'get-vcpus',
+    'get-fsinfo',
+    'get-memory-blocks',
+    'get-memory-block-info',
+    'suspend-hybrid',
+    'suspend-ram',
+    'suspend-disk',
+    'shutdown',
+    ];
+
+__PACKAGE__->register_method({
+    name => 'agent',
+    path => '{vmid}/agent',
+    method => 'POST',
+    protected => 1,
+    proxyto => 'node',
+    description => "Execute Qemu Guest Agent commands.",
+    permissions => {
+       check => ['perm', '/vms/{vmid}', [ 'VM.Monitor' ]],
+    },
+    parameters => {
+       additionalProperties => 0,
+       properties => {
+           node => get_standard_option('pve-node'),
+           vmid => get_standard_option('pve-vmid', {
+                   completion => \&PVE::QemuServer::complete_vmid_running }),
+           command => {
+               type => 'string',
+               description => "The QGA command.",
+               enum => $guest_agent_commands,
+           },
+       },
+    },
+    returns => {
+       type => 'object',
+       description => "Returns an object with a single `result` property. The type of that
+property depends on the executed command.",
+    },
+    code => sub {
+       my ($param) = @_;
+
+       my $vmid = $param->{vmid};
+
+       my $conf = PVE::QemuConfig->load_config ($vmid); # check if VM exists
+
+       die "No Qemu Guest Agent\n" if !defined($conf->{agent});
+       die "VM $vmid is not running\n" if !PVE::QemuServer::check_running($vmid);
+
+       my $cmd = $param->{command};
+
+       my $res = PVE::QemuServer::vm_mon_cmd($vmid, "guest-$cmd");
+
+       return { result => $res };
+    }});
+
 __PACKAGE__->register_method({
     name => 'resize_vm',
     path => '{vmid}/resize',
@@ -2649,12 +3040,12 @@ __PACKAGE__->register_method({
            disk => {
                type => 'string',
                description => "The disk you want to resize.",
-               enum => [PVE::QemuServer::disknames()],
+               enum => [PVE::QemuServer::valid_drive_names()],
            },
            size => {
                type => 'string',
                pattern => '\+?\d+(\.\d+)?[KMGT]?',
-               description => "The new size. With the '+' sign the value is added to the actual size of the volume and without it, the value is taken as an absolute one. Shrinking disk size is not supported.",
+               description => "The new size. With the `+` sign the value is added to the actual size of the volume and without it, the value is taken as an absolute one. Shrinking disk size is not supported.",
            },
            digest => {
                type => 'string',
@@ -2690,11 +3081,11 @@ __PACKAGE__->register_method({
 
         my $updatefn =  sub {
 
-            my $conf = PVE::QemuServer::load_config($vmid);
+            my $conf = PVE::QemuConfig->load_config($vmid);
 
             die "checksum missmatch (file change by other user?)\n"
                 if $digest && $digest ne $conf->{digest};
-            PVE::QemuServer::check_lock($conf) if !$skiplock;
+            PVE::QemuConfig->check_lock($conf) if !$skiplock;
 
            die "disk '$disk' does not exist\n" if !$conf->{$disk};
 
@@ -2716,6 +3107,7 @@ __PACKAGE__->register_method({
 
            $rpcenv->check($authuser, "/storage/$storeid", ['Datastore.AllocateSpace']);
 
+           PVE::Storage::activate_volumes($storecfg, [$volid]);
            my $size = PVE::Storage::volume_size_info($storecfg, $volid, 5);
 
            die "internal error" if $sizestr !~ m/^(\+)?(\d+(\.\d+)?)([KMGT])?$/;
@@ -2734,7 +3126,7 @@ __PACKAGE__->register_method({
            $newsize += $size if $ext;
            $newsize = int($newsize);
 
-           die "unable to skrink disk size\n" if $newsize < $size;
+           die "shrinking disks is not supported\n" if $newsize < $size;
 
            return if $size == $newsize;
 
@@ -2745,10 +3137,10 @@ __PACKAGE__->register_method({
            $drive->{size} = $newsize;
            $conf->{$disk} = PVE::QemuServer::print_drive($vmid, $drive);
 
-           PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
+           PVE::QemuConfig->write_config($vmid, $conf);
        };
 
-        PVE::QemuServer::lock_config($vmid, $updatefn);
+        PVE::QemuConfig->lock_config($vmid, $updatefn);
         return undef;
     }});
 
@@ -2765,7 +3157,7 @@ __PACKAGE__->register_method({
     parameters => {
        additionalProperties => 0,
        properties => {
-           vmid => get_standard_option('pve-vmid'),
+           vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
            node => get_standard_option('pve-node'),
        },
     },
@@ -2782,7 +3174,7 @@ __PACKAGE__->register_method({
 
        my $vmid = $param->{vmid};
 
-       my $conf = PVE::QemuServer::load_config($vmid);
+       my $conf = PVE::QemuConfig->load_config($vmid);
        my $snaphash = $conf->{snapshots} || {};
 
        my $res = [];
@@ -2859,7 +3251,7 @@ __PACKAGE__->register_method({
 
        my $realcmd = sub {
            PVE::Cluster::log_msg('info', $authuser, "snapshot VM $vmid: $snapname");
-           PVE::QemuServer::snapshot_create($vmid, $snapname, $param->{vmstate}, 
+           PVE::QemuConfig->snapshot_create($vmid, $snapname, $param->{vmstate}, 
                                             $param->{description});
        };
 
@@ -2940,9 +3332,9 @@ __PACKAGE__->register_method({
 
        my $updatefn =  sub {
 
-           my $conf = PVE::QemuServer::load_config($vmid);
+           my $conf = PVE::QemuConfig->load_config($vmid);
 
-           PVE::QemuServer::check_lock($conf);
+           PVE::QemuConfig->check_lock($conf);
 
            my $snap = $conf->{snapshots}->{$snapname};
 
@@ -2950,10 +3342,10 @@ __PACKAGE__->register_method({
 
            $snap->{description} = $param->{description} if defined($param->{description});
 
-            PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
+            PVE::QemuConfig->write_config($vmid, $conf);
        };
 
-       PVE::QemuServer::lock_config($vmid, $updatefn);
+       PVE::QemuConfig->lock_config($vmid, $updatefn);
 
        return undef;
     }});
@@ -2987,7 +3379,7 @@ __PACKAGE__->register_method({
 
        my $snapname = extract_param($param, 'snapname');
 
-       my $conf = PVE::QemuServer::load_config($vmid);
+       my $conf = PVE::QemuConfig->load_config($vmid);
 
        my $snap = $conf->{snapshots}->{$snapname};
 
@@ -3033,10 +3425,15 @@ __PACKAGE__->register_method({
 
        my $realcmd = sub {
            PVE::Cluster::log_msg('info', $authuser, "rollback snapshot VM $vmid: $snapname");
-           PVE::QemuServer::snapshot_rollback($vmid, $snapname);
+           PVE::QemuConfig->snapshot_rollback($vmid, $snapname);
+       };
+
+       my $worker = sub {
+           # hold migration lock, this makes sure that nobody create replication snapshots
+           return PVE::GuestHelpers::guest_migration_lock($vmid, 10, $realcmd);
        };
 
-       return $rpcenv->fork_worker('qmrollback', $vmid, $authuser, $realcmd);
+       return $rpcenv->fork_worker('qmrollback', $vmid, $authuser, $worker);
     }});
 
 __PACKAGE__->register_method({
@@ -3081,7 +3478,7 @@ __PACKAGE__->register_method({
 
        my $realcmd = sub {
            PVE::Cluster::log_msg('info', $authuser, "delete snapshot VM $vmid: $snapname");
-           PVE::QemuServer::snapshot_delete($vmid, $snapname, $param->{force});
+           PVE::QemuConfig->snapshot_delete($vmid, $snapname, $param->{force});
        };
 
        return $rpcenv->fork_worker('qmdelsnapshot', $vmid, $authuser, $realcmd);
@@ -3107,7 +3504,7 @@ __PACKAGE__->register_method({
                optional => 1,
                type => 'string',
                description => "If you want to convert only 1 disk to base image.",
-               enum => [PVE::QemuServer::disknames()],
+               enum => [PVE::QemuServer::valid_drive_names()],
            },
 
        },
@@ -3128,15 +3525,15 @@ __PACKAGE__->register_method({
 
        my $updatefn =  sub {
 
-           my $conf = PVE::QemuServer::load_config($vmid);
+           my $conf = PVE::QemuConfig->load_config($vmid);
 
-           PVE::QemuServer::check_lock($conf);
+           PVE::QemuConfig->check_lock($conf);
 
            die "unable to create template, because VM contains snapshots\n"
                if $conf->{snapshots} && scalar(keys %{$conf->{snapshots}});
 
            die "you can't convert a template to a template\n"
-               if PVE::QemuServer::is_template($conf) && !$disk;
+               if PVE::QemuConfig->is_template($conf) && !$disk;
 
            die "you can't convert a VM to template if VM is running\n"
                if PVE::QemuServer::check_running($vmid);
@@ -3146,12 +3543,12 @@ __PACKAGE__->register_method({
            };
 
            $conf->{template} = 1;
-           PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
+           PVE::QemuConfig->write_config($vmid, $conf);
 
            return $rpcenv->fork_worker('qmtemplate', $vmid, $authuser, $realcmd);
        };
 
-       PVE::QemuServer::lock_config($vmid, $updatefn);
+       PVE::QemuConfig->lock_config($vmid, $updatefn);
        return undef;
     }});