]> git.proxmox.com Git - qemu-server.git/blobdiff - PVE/API2/Qemu.pm
copy_vm: new option to move final VM to other node (option target)
[qemu-server.git] / PVE / API2 / Qemu.pm
index 924af264f47aa3ebd55682b223ccb09d0cc55287..d33af929ffb6ba9e63ad76977198d8408bdee9d3 100644 (file)
@@ -16,6 +16,7 @@ use PVE::QemuMigrate;
 use PVE::RPCEnvironment;
 use PVE::AccessControl;
 use PVE::INotify;
+use PVE::Network;
 
 use Data::Dumper; # fixme: remove
 
@@ -58,6 +59,43 @@ my $check_storage_access = sub {
     });
 };
 
+my $check_storage_access_copy = sub {
+   my ($rpcenv, $authuser, $storecfg, $conf, $storage) = @_;
+
+   my $sharedvm = 1;
+
+   PVE::QemuServer::foreach_drive($conf, sub {
+       my ($ds, $drive) = @_;
+
+       my $isCDROM = PVE::QemuServer::drive_is_cdrom($drive);
+
+       my $volid = $drive->{file};
+
+       return if !$volid || $volid eq 'none';
+
+       if ($isCDROM) {
+           if ($volid eq 'cdrom') {
+               $rpcenv->check($authuser, "/", ['Sys.Console']);
+           } else {
+               # we simply allow access 
+               my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
+               my $scfg = PVE::Storage::storage_config($storecfg, $sid);
+               $sharedvm = 0 if !$scfg->{shared};
+
+           }
+       } else {
+           my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
+           my $scfg = PVE::Storage::storage_config($storecfg, $sid);
+           $sharedvm = 0 if !$scfg->{shared};
+
+           $sid = $storage if $storage;
+           $rpcenv->check($authuser, "/storage/$sid", ['Datastore.AllocateSpace']);
+       }
+    });
+
+   return $sharedvm;
+};
+
 # Note: $pool is only needed when creating a VM, because pool permissions
 # are automatically inherited if VM already exists inside a pool.
 my $create_disks = sub {
@@ -343,6 +381,7 @@ __PACKAGE__->register_method({
 
        my $restorefn = sub {
 
+           # fixme: this test does not work if VM exists on other node!
            if (-f $filename) {
                die "unable to restore vm $vmid: config file already exists\n"
                    if !$force;
@@ -646,8 +685,21 @@ my $vmconfig_delete_option = sub {
            $rpcenv->check($authuser, "/storage/$sid", ['Datastore.Allocate']);
        }
     }
-               
-    die "error hot-unplug $opt" if !PVE::QemuServer::vm_deviceunplug($vmid, $conf, $opt);
+
+    my $unplugwarning = "";
+    if($conf->{ostype} && $conf->{ostype} eq 'l26'){
+       $unplugwarning = "<br>verify that you have acpiphp && pci_hotplug modules loaded in your guest VM";
+    }elsif($conf->{ostype} && $conf->{ostype} eq 'l24'){
+       $unplugwarning = "<br>kernel 2.4 don't support hotplug, please disable hotplug in options";
+    }elsif(!$conf->{ostype} || ($conf->{ostype} && $conf->{ostype} eq 'other')){
+       $unplugwarning = "<br>verify that your guest support acpi hotplug";
+    }
+
+    if($opt eq 'tablet'){
+       PVE::QemuServer::vm_deviceplug(undef, $conf, $vmid, $opt);
+    }else{
+        die "error hot-unplug $opt $unplugwarning" if !PVE::QemuServer::vm_deviceunplug($vmid, $conf, $opt);
+    }
 
     if ($isDisk) {
        my $drive = PVE::QemuServer::parse_drive($opt, $conf->{$opt});
@@ -736,11 +788,34 @@ my $vmconfig_update_disk = sub {
 my $vmconfig_update_net = sub {
     my ($rpcenv, $authuser, $conf, $storecfg, $vmid, $opt, $value) = @_;
 
-    if ($conf->{$opt}) {
-       #if online update, then unplug first
-       die "error hot-unplug $opt for update" if !PVE::QemuServer::vm_deviceunplug($vmid, $conf, $opt);
-    }
+    if ($conf->{$opt} && PVE::QemuServer::check_running($vmid)) {
+       my $oldnet = PVE::QemuServer::parse_net($conf->{$opt});
+       my $newnet = PVE::QemuServer::parse_net($value);
+
+       if($oldnet->{model} ne $newnet->{model}){
+           #if model change, we try to hot-unplug
+            die "error hot-unplug $opt for update" if !PVE::QemuServer::vm_deviceunplug($vmid, $conf, $opt);
+       }else{
+               
+           if($newnet->{bridge} && $oldnet->{bridge}){
+               my $iface = "tap".$vmid."i".$1 if $opt =~ m/net(\d+)/;
+
+               if($newnet->{rate} ne $oldnet->{rate}){
+                   PVE::Network::tap_rate_limit($iface, $newnet->{rate});
+               }
+
+               if(($newnet->{bridge} ne $oldnet->{bridge}) || ($newnet->{tag} ne $oldnet->{tag})){
+                   eval{PVE::Network::tap_unplug($iface, $oldnet->{bridge}, $oldnet->{tag});};
+                   PVE::Network::tap_plug($iface, $newnet->{bridge}, $newnet->{tag});
+               }
 
+           }else{
+               #if bridge/nat mode change, we try to hot-unplug
+               die "error hot-unplug $opt for update" if !PVE::QemuServer::vm_deviceunplug($vmid, $conf, $opt);
+           }
+       }
+       
+    }
     $conf->{$opt} = $value;
     PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
     $conf = PVE::QemuServer::load_config($vmid); # update/reload
@@ -910,6 +985,12 @@ __PACKAGE__->register_method({
 
                } else {
 
+                   if($opt eq 'tablet' && $param->{$opt} == 1){
+                       PVE::QemuServer::vm_deviceplug(undef, $conf, $vmid, $opt);
+                   }elsif($opt eq 'tablet' && $param->{$opt} == 0){
+                       PVE::QemuServer::vm_deviceunplug($vmid, $conf, $opt);
+                   }
+
                    $conf->{$opt} = $param->{$opt};
                    PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
                }
@@ -1671,7 +1752,6 @@ __PACKAGE__->register_method({
                 optional => 1,
             }),
        },
-
     },
     returns => {
         type => 'boolean'
@@ -1703,6 +1783,279 @@ __PACKAGE__->register_method({
        return $res;
     }});
 
+__PACKAGE__->register_method({
+    name => 'copy_vm',
+    path => '{vmid}/copy',
+    method => 'POST',
+    protected => 1,
+    proxyto => 'node',
+    description => "Create a copy of virtual machine/template.",
+    permissions => {
+       description => "You need 'VM.Copy' permissions on /vms/{vmid}, and 'VM.Allocate' permissions " .
+           "on /vms/{newid} (or on the VM pool /pool/{pool}). You also need " .
+           "'Datastore.AllocateSpace' on any used storage.",
+       check => 
+       [ 'and', 
+         ['perm', '/vms/{vmid}', [ 'VM.Copy' ]],
+         [ 'or', 
+           [ 'perm', '/vms/{newid}', ['VM.Allocate']],
+           [ 'perm', '/pool/{pool}', ['VM.Allocate'], require_param => 'pool'],
+         ],
+       ]
+    },
+    parameters => {
+       additionalProperties => 0,
+       properties => {
+           node => get_standard_option('pve-node'),
+           vmid => get_standard_option('pve-vmid'),
+           newid => get_standard_option('pve-vmid', { description => 'VMID for the copy.' }),
+           name => {
+               optional => 1,
+               type => 'string', format => 'dns-name',
+               description => "Set a name for the new VM.",
+           },
+           description => {
+               optional => 1,
+               type => 'string',
+               description => "Description for the new VM.",
+           },
+           pool => { 
+               optional => 1,
+               type => 'string', format => 'pve-poolid',
+               description => "Add the new VM to the specified pool.",
+           },
+            snapname => get_standard_option('pve-snapshot-name', {
+               requires => 'full',
+               optional => 1,
+            }),
+           storage => get_standard_option('pve-storage-id', {
+               description => "Target storage for full copy.",
+               requires => 'full',
+               optional => 1,
+           }),
+           'format' => {
+               description => "Target format for file storage.",
+               requires => 'full',
+               type => 'string',
+               optional => 1,
+               enum => [ 'raw', 'qcow2', 'vmdk'],
+           },
+           full => {
+               optional => 1,
+               type => 'boolean',
+               description => "Create a full copy of all disk. This is always done when " .
+                   "you copy a normal VM. For VM templates, we try to create a linked copy by default.",
+               default => 0,
+           },
+           target => get_standard_option('pve-node', { 
+               description => "Target node. Only allowed if the original VM is on shared storage.",
+               optional => 1,
+           }),
+        },
+    },
+    returns => {
+       type => 'string',
+    },
+    code => sub {
+       my ($param) = @_;
+
+       my $rpcenv = PVE::RPCEnvironment::get();
+
+        my $authuser = $rpcenv->get_user();
+
+       my $node = extract_param($param, 'node');
+
+       my $vmid = extract_param($param, 'vmid');
+
+       my $newid = extract_param($param, 'newid');
+
+       # fixme: update pool after create
+       my $pool = extract_param($param, 'pool');
+
+       if (defined($pool)) {
+           $rpcenv->check_pool_exist($pool);
+       }
+
+        my $snapname = extract_param($param, 'snapname');
+
+       my $storage = extract_param($param, 'storage');
+
+       my $format = extract_param($param, 'format');
+
+       my $target = extract_param($param, 'target');
+
+        my $localnode = PVE::INotify::nodename();
+
+        undef $target if $target eq $localnode || $target eq 'localhost';
+
+       PVE::Cluster::check_node_exists($target) if $target;
+
+       my $storecfg = PVE::Storage::config();
+
+        PVE::Cluster::check_cfs_quorum();
+
+       my $running = PVE::QemuServer::check_running($vmid) || 0;
+
+       die "Copy running VM $vmid not implemented\n" if $running; # fixme: implement this
+
+       # exclusive lock if VM is running - else shared lock is enough;
+       my $shared_lock = $running ? 0 : 1;
+
+       # fixme: do early checks - re-check after lock 
+
+       # fixme: impl. target node parameter (mv VM config if all storages are shared)
+
+       my $copyfn = sub {
+
+           # all tests after lock
+           my $conf = PVE::QemuServer::load_config($vmid);
+
+           PVE::QemuServer::check_lock($conf);
+
+           my $verify_running = PVE::QemuServer::check_running($vmid) || 0;
+
+           die "unexpected state change\n" if $verify_running != $running;
+
+           die "snapshot '$snapname' does not exist\n" 
+               if $snapname && !defined( $conf->{snapshots}->{$snapname}); 
+
+           my $oldconf = $snapname ? $conf->{snapshots}->{$snapname} : $conf; 
+
+           my $sharedvm = &$check_storage_access_copy($rpcenv, $authuser, $storecfg, $oldconf, $storage);
+
+           die "can't copy VM to node '$target' (VM uses local storage)\n" if $target && !$sharedvm;
+           
+           my $conffile = PVE::QemuServer::config_file($newid);
+
+           die "unable to create VM $newid: config file already exists\n"
+               if -f $conffile;
+
+           # create empty/temp config - this fails if VM already exists on other node
+           PVE::Tools::file_set_contents($conffile, "# qmcopy temporary file\nlock: copy\n");
+
+           my $realcmd = sub {
+               my $upid = shift;
+
+               my $newvollist = [];
+
+               eval {
+                   my $newconf = { lock => 'copy' };
+                   my $drives = {};
+                   my $vollist = [];
+
+                   foreach my $opt (keys %$oldconf) {
+                       my $value = $oldconf->{$opt};
+
+                       # do not copy snapshot related info
+                       next if $opt eq 'snapshots' ||  $opt eq 'parent' || $opt eq 'snaptime' ||
+                           $opt eq 'vmstate' || $opt eq 'snapstate';
+
+                       # always change MAC! address
+                       if ($opt =~ m/^net(\d+)$/) {
+                           my $net = PVE::QemuServer::parse_net($value);
+                           $net->{macaddr} =  PVE::Tools::random_ether_addr();
+                           $newconf->{$opt} = PVE::QemuServer::print_net($net);
+                       } elsif (my $drive = PVE::QemuServer::parse_drive($opt, $value)) {
+                           if (PVE::QemuServer::drive_is_cdrom($drive)) {
+                               $newconf->{$opt} = $value; # simply copy configuration
+                           } else {
+                               $drives->{$opt} = $drive;
+                               push @$vollist, $drive->{file};
+                           }
+                       } else {
+                           # copy everything else
+                           $newconf->{$opt} = $value;  
+                       }
+                   }
+
+                   delete $newconf->{template};
+
+                   if ($param->{name}) {
+                       $newconf->{name} = $param->{name};
+                   } else {
+                       $newconf->{name} = "Copy-of-$oldconf->{name}";
+                   }
+
+                   if ($param->{description}) {
+                       $newconf->{description} = $param->{description};
+                   }
+                   
+                   PVE::Storage::activate_volumes($storecfg, $vollist);
+
+                   eval {
+                       local $SIG{INT} = $SIG{TERM} = $SIG{QUIT} = $SIG{HUP} = sub { die "interrupted by signal\n"; };
+
+                       foreach my $opt (keys %$drives) {
+                           my $drive = $drives->{$opt};
+
+                           my $newvolid;
+                           if (!$param->{full} && PVE::Storage::volume_is_base($storecfg,  $drive->{file})) {
+                               print "clone drive $opt ($drive->{file})\n";
+                               $newvolid = PVE::Storage::vdisk_clone($storecfg,  $drive->{file}, $newid);
+                           } else {
+                               my ($storeid, $volname) = PVE::Storage::parse_volume_id($drive->{file});
+                               $storeid = $storage if $storage;
+
+                               my $fmt = undef;
+                               if($format){
+                                   $fmt = $format;
+                               }else{
+                                   my $defformat = PVE::Storage::storage_default_format($storecfg, $storeid);
+                                   $fmt = $drive->{format} || $defformat;
+                               }
+
+                               my ($size) = PVE::Storage::volume_size_info($storecfg, $drive->{file}, 3);
+
+                               print "copy drive $opt ($drive->{file})\n";
+                               $newvolid = PVE::Storage::vdisk_alloc($storecfg, $storeid, $newid, $fmt, undef, ($size/1024));
+
+                               PVE::QemuServer::qemu_img_convert($drive->{file}, $newvolid, $size, $snapname);
+                           }
+
+                           my ($size) = PVE::Storage::volume_size_info($storecfg, $newvolid, 3);
+                           my $disk = { file => $newvolid, size => $size };
+                           $newconf->{$opt} = PVE::QemuServer::print_drive($vmid, $disk); 
+                           push @$newvollist, $newvolid;
+
+                           PVE::QemuServer::update_config_nolock($newid, $newconf, 1);
+                       }
+                   };
+                   die $@ if $@;
+
+                   delete $newconf->{lock};
+                   PVE::QemuServer::update_config_nolock($newid, $newconf, 1);
+
+                    if ($target) {
+                       my $newconffile = PVE::QemuServer::config_file($newid, $target);
+                       die "Failed to move config to node '$target' - rename failed: $!\n"
+                           if !rename($conffile, $newconffile);
+                   }
+               };
+               if (my $err = $@) { 
+                   unlink $conffile;
+
+                   sleep 1; # some storage like rbd need to wait before release volume - really?
+
+                   foreach my $volid (@$newvollist) {
+                       eval { PVE::Storage::vdisk_free($storecfg, $volid); };
+                       warn $@ if $@;
+                   }
+                   die "copy failed: $err";
+               }
+
+               return;
+           };
+
+           return $rpcenv->fork_worker('qmcopy', $vmid, $authuser, $realcmd);
+       };
+
+       return PVE::QemuServer::lock_config_mode($vmid, 1, $shared_lock, sub {
+           # Aquire exclusive lock lock for $newid
+           return PVE::QemuServer::lock_config_full($newid, 1, $copyfn);
+       });
+
+    }});
+
 __PACKAGE__->register_method({
     name => 'migrate_vm',
     path => '{vmid}/migrate',
@@ -1918,6 +2271,9 @@ __PACKAGE__->register_method({
 
            die "you can't resize a cdrom\n" if PVE::QemuServer::drive_is_cdrom($drive);
 
+           die "you can't online resize a virtio windows bootdisk\n" 
+               if PVE::QemuServer::check_running($vmid) && $conf->{bootdisk} eq $disk && $conf->{ostype} =~ m/^w/ && $disk =~ m/^virtio/;
+
            my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid);
 
            $rpcenv->check($authuser, "/storage/$storeid", ['Datastore.AllocateSpace']);
@@ -2306,7 +2662,11 @@ __PACKAGE__->register_method({
     proxyto => 'node',
     description => "Create a Template.",
     permissions => {
-       check => ['perm', '/vms/{vmid}', [ 'VM.Template' ]],
+       description => "You need 'VM.Allocate' permissions on /vms/{vmid} or on the VM pool /pool/{pool}.",
+       check => [ 'or', 
+                  [ 'perm', '/vms/{vmid}', ['VM.Allocate']],
+                  [ 'perm', '/pool/{pool}', ['VM.Allocate'], require_param => 'pool'],
+           ],
     },
     parameters => {
        additionalProperties => 0,
@@ -2342,13 +2702,23 @@ __PACKAGE__->register_method({
 
            PVE::QemuServer::check_lock($conf);
 
-           die "you can't convert a template to a template" if PVE::QemuServer::is_template($conf) && !$disk;
+           die "unable to create template, because VM contains snapshots\n" 
+               if $conf->{snapshots} && scalar(keys %{$conf->{snapshots}});
+
+           die "you can't convert a template to a template\n" 
+               if PVE::QemuServer::is_template($conf) && !$disk;
+
+           die "you can't convert a VM to template if VM is running\n" 
+               if PVE::QemuServer::check_running($vmid);
+
            my $realcmd = sub {
                PVE::QemuServer::template_create($vmid, $conf, $disk);
            };
-           return $rpcenv->fork_worker('qmtemplate', $vmid, $authuser, $realcmd);
 
+           $conf->{template} = 1;
            PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
+
+           return $rpcenv->fork_worker('qmtemplate', $vmid, $authuser, $realcmd);
        };
 
        PVE::QemuServer::lock_config($vmid, $updatefn);