]> git.proxmox.com Git - qemu-server.git/blobdiff - PVE/API2/Qemu.pm
don't send qmp balloon commands if vm is started with a state file
[qemu-server.git] / PVE / API2 / Qemu.pm
index f6d4eaec738257025c891aa27ff50c0998809ff3..8a15abcca655be18dfbda1a567e4ab03665f6695 100644 (file)
@@ -59,6 +59,32 @@ my $check_storage_access = sub {
     });
 };
 
+my $check_storage_access_copy = sub {
+   my ($rpcenv, $authuser, $storecfg, $conf) = @_;
+
+   PVE::QemuServer::foreach_drive($conf, sub {
+       my ($ds, $drive) = @_;
+
+       my $isCDROM = PVE::QemuServer::drive_is_cdrom($drive);
+
+       my $volid = $drive->{file};
+
+       return if !$volid || $volid eq 'none';
+
+       if ($isCDROM) {
+           if ($volid eq 'cdrom') {
+               $rpcenv->check($authuser, "/", ['Sys.Console']);
+           } else {
+               # we simply allow access 
+           }
+       } else {
+           my ($sid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
+           die "unable to copy arbitrary files\n" if !$sid;
+           $rpcenv->check($authuser, "/storage/$sid", ['Datastore.AllocateSpace']);
+       }
+    });
+};
+
 # Note: $pool is only needed when creating a VM, because pool permissions
 # are automatically inherited if VM already exists inside a pool.
 my $create_disks = sub {
@@ -344,6 +370,7 @@ __PACKAGE__->register_method({
 
        my $restorefn = sub {
 
+           # fixme: this test does not work if VM exists on other node!
            if (-f $filename) {
                die "unable to restore vm $vmid: config file already exists\n"
                    if !$force;
@@ -1746,6 +1773,200 @@ __PACKAGE__->register_method({
        return $res;
     }});
 
+__PACKAGE__->register_method({
+    name => 'copy_vm',
+    path => '{vmid}/copy',
+    method => 'POST',
+    protected => 1,
+    proxyto => 'node',
+    description => "Create a copy of virtual machine/template.",
+    permissions => {
+       description => "You need 'VM.Copy' permissions on /vms/{vmid}, and 'VM.Allocate' permissions " .
+           "on /vms/{newid} (or on the VM pool /pool/{pool}). You also need " .
+           "'Datastore.AllocateSpace' on any used storage.",
+       check => 
+       [ 'and', 
+         ['perm', '/vms/{vmid}', [ 'VM.Copy' ]],
+         [ 'or', 
+           [ 'perm', '/vms/{newid}', ['VM.Allocate']],
+           [ 'perm', '/pool/{pool}', ['VM.Allocate'], require_param => 'pool'],
+         ],
+       ]
+    },
+    parameters => {
+       additionalProperties => 0,
+       properties => {
+           # fixme: add other parameters like name and description?
+           node => get_standard_option('pve-node'),
+           vmid => get_standard_option('pve-vmid'),
+           newid => get_standard_option('pve-vmid', { 
+               description => 'VMID for the copy.' }),
+           pool => { 
+               optional => 1,
+               type => 'string', format => 'pve-poolid',
+               description => "Add the new VM to the specified pool.",
+           },
+           full => {
+               optional => 1,
+               type => 'boolean',
+               description => "Create a full copy of all disk. This is always done when " .
+                   "you copy a normal VM. For VM templates, we try to create a linked copy by default.",
+               default => 0,
+           },
+       },
+    },
+    returns => {
+       type => 'string',
+    },
+    code => sub {
+       my ($param) = @_;
+
+       my $rpcenv = PVE::RPCEnvironment::get();
+
+       my $authuser = $rpcenv->get_user();
+
+       my $node = extract_param($param, 'node');
+
+       my $vmid = extract_param($param, 'vmid');
+
+       my $newid = extract_param($param, 'newid');
+
+       # fixme: update pool after create
+       my $pool = extract_param($param, 'pool');
+
+       if (defined($pool)) {
+           $rpcenv->check_pool_exist($pool);
+       }
+
+       my $storecfg = PVE::Storage::config();
+
+       PVE::Cluster::check_cfs_quorum();
+
+       # fixme: do early checks - re-check after lock 
+
+       # fixme: impl. target node parameter (mv VM config if all storages are shared)
+
+       my $copyfn = sub {
+
+           # all tests after lock
+           my $conf = PVE::QemuServer::load_config($vmid);
+
+           PVE::QemuServer::check_lock($conf);
+
+           my $running = PVE::QemuServer::check_running($vmid);
+
+           die "Copy running VM $vmid not implemented\n" if $running;
+
+           &$check_storage_access_copy($rpcenv, $authuser, $storecfg, $conf);
+
+           # fixme: snapshots??
+
+           my $conffile = PVE::QemuServer::config_file($newid);
+
+           die "unable to create VM $newid: config file already exists\n"
+               if -f $conffile;
+
+           # create empty/temp config - this fails if VM already exists on other node
+           PVE::Tools::file_set_contents($conffile, "# qmcopy temporary file\nlock: copy\n");
+
+           my $realcmd = sub {
+               my $upid = shift;
+
+               my $newvollist = [];
+
+               eval {
+                   my $newconf = { lock => 'copy' };
+                   my $drives = {};
+                   my $vollist = [];
+                   foreach my $opt (keys %$conf) {
+                       my $value = $conf->{$opt};
+
+                       next if $opt eq 'snapshots'; #  do not copy snapshot info
+
+                       # always change MAC! address
+                       if ($opt =~ m/^net(\d+)$/) {
+                           my $net = PVE::QemuServer::parse_net($value);
+                           $net->{macaddr} =  PVE::Tools::random_ether_addr();
+                           $newconf->{$opt} = PVE::QemuServer::print_net($net);
+                       } elsif (my $drive = PVE::QemuServer::parse_drive($opt, $value)) {
+                           if (PVE::QemuServer::drive_is_cdrom($drive)) {
+                               $newconf->{$opt} = $value; # simply copy configuration
+                           } else {
+                               $drives->{$opt} = $drive;
+                               push @$vollist, $drive->{file};
+                           }
+                       } else {
+                           # copy everything else
+                           $newconf->{$opt} = $value;  
+                       }
+                   }
+
+                   delete $newconf->{template};
+                   
+                   PVE::Storage::activate_volumes($storecfg, $vollist);
+
+                   eval {
+                       local $SIG{INT} = $SIG{TERM} = $SIG{QUIT} = $SIG{HUP} = sub { die "interrupted by signal\n"; };
+
+                       foreach my $opt (keys %$drives) {
+                           my $drive = $drives->{$opt};
+
+                           my $newvolid;
+                           if (!$param->{full} && PVE::Storage::volume_is_base($storecfg,  $drive->{file})) {
+                               print "clone drive $opt ($drive->{file})\n";
+                               $newvolid = PVE::Storage::vdisk_clone($storecfg,  $drive->{file}, $newid);
+                           } else {
+                               my ($storeid, $volname) = PVE::Storage::parse_volume_id($drive->{file});
+                               my $defformat = PVE::Storage::storage_default_format($storecfg, $storeid);
+                               my $fmt = $drive->{format} || $defformat;
+
+                               my ($size) = PVE::Storage::volume_size_info($storecfg, $drive->{file}, 3);
+
+                               print "copy drive $opt ($drive->{file})\n";
+                               $newvolid = PVE::Storage::vdisk_alloc($storecfg, $storeid, $newid, $fmt, undef, ($size/1024));
+
+                               PVE::QemuServer::qemu_img_convert($drive->{file}, $newvolid, $size);
+                           }
+
+                           my ($size) = PVE::Storage::volume_size_info($storecfg, $newvolid, 3);
+                           my $disk = { file => $newvolid, size => $size };
+                           $newconf->{$opt} = PVE::QemuServer::print_drive($vmid, $disk); 
+                           push @$newvollist, $newvolid;
+
+                           PVE::QemuServer::update_config_nolock($newid, $newconf, 1);
+                       }
+                   };
+                   die $@ if $@;
+
+                   delete $newconf->{lock};
+                   PVE::QemuServer::update_config_nolock($newid, $newconf, 1);
+               };
+               if (my $err = $@) { 
+                   unlink $conffile;
+
+                   sleep 1; # some storage like rbd need to wait before release volume - really?
+
+                   foreach my $volid (@$newvollist) {
+                       eval { PVE::Storage::vdisk_free($storecfg, $volid); };
+                       warn $@ if $@;
+                   }
+                   die "copy failed: $err";
+               }
+
+               return;
+           };
+
+           return $rpcenv->fork_worker('qmcopy', $vmid, $authuser, $realcmd);
+       };
+
+       # Aquire shared lock for $vmid
+       return PVE::QemuServer::lock_config_shared($vmid, 1, sub {
+           # Aquire exclusive lock lock for $newid
+           return PVE::QemuServer::lock_config_full($newid, 1, $copyfn);
+       });
+
+    }});
+
 __PACKAGE__->register_method({
     name => 'migrate_vm',
     path => '{vmid}/migrate',
@@ -2393,13 +2614,13 @@ __PACKAGE__->register_method({
            PVE::QemuServer::check_lock($conf);
 
            die "unable to create template, because VM contains snapshots\n" 
-               if $conf->{snapshots};
+               if $conf->{snapshots} && scalar(keys %{$conf->{snapshots}});
 
            die "you can't convert a template to a template\n" 
                if PVE::QemuServer::is_template($conf) && !$disk;
 
            die "you can't convert a VM to template if VM is running\n" 
-               if check_running($vmid);
+               if PVE::QemuServer::check_running($vmid);
 
            my $realcmd = sub {
                PVE::QemuServer::template_create($vmid, $conf, $disk);