use strict;
use warnings;
use Cwd 'abs_path';
+use Net::SSLeay;
use PVE::Cluster qw (cfs_read_file cfs_write_file);;
use PVE::SafeSyslog;
use PVE::Tools qw(extract_param);
-use PVE::Exception qw(raise raise_param_exc);
+use PVE::Exception qw(raise raise_param_exc raise_perm_exc);
use PVE::Storage;
use PVE::JSONSchema qw(get_standard_option);
use PVE::RESTHandler;
use PVE::AccessControl;
use PVE::INotify;
use PVE::Network;
+use PVE::API2::Firewall::VM;
use Data::Dumper; # fixme: remove
});
};
+my $check_storage_access_clone = sub {
+ my ($rpcenv, $authuser, $storecfg, $conf, $storage) = @_;
+
+ my $sharedvm = 1;
+
+ PVE::QemuServer::foreach_drive($conf, sub {
+ my ($ds, $drive) = @_;
+
+ my $isCDROM = PVE::QemuServer::drive_is_cdrom($drive);
+
+ my $volid = $drive->{file};
+
+ return if !$volid || $volid eq 'none';
+
+ if ($isCDROM) {
+ if ($volid eq 'cdrom') {
+ $rpcenv->check($authuser, "/", ['Sys.Console']);
+ } else {
+ # we simply allow access
+ my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
+ my $scfg = PVE::Storage::storage_config($storecfg, $sid);
+ $sharedvm = 0 if !$scfg->{shared};
+
+ }
+ } else {
+ my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
+ my $scfg = PVE::Storage::storage_config($storecfg, $sid);
+ $sharedvm = 0 if !$scfg->{shared};
+
+ $sid = $storage if $storage;
+ $rpcenv->check($authuser, "/storage/$sid", ['Datastore.AllocateSpace']);
+ }
+ });
+
+ return $sharedvm;
+};
+
# Note: $pool is only needed when creating a VM, because pool permissions
# are automatically inherited if VM already exists inside a pool.
my $create_disks = sub {
$res->{$ds} = PVE::QemuServer::print_drive($vmid, $disk);
} else {
- my $path = $rpcenv->check_volume_access($authuser, $storecfg, $vmid, $volid);
-
- my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
+ $rpcenv->check_volume_access($authuser, $storecfg, $vmid, $volid);
- my $foundvolid = undef;
+ my $volid_is_new = 1;
- if ($storeid) {
- PVE::Storage::activate_volumes($storecfg, [ $volid ]);
- my $dl = PVE::Storage::vdisk_list($storecfg, $storeid, undef);
+ if ($conf->{$ds}) {
+ my $olddrive = PVE::QemuServer::parse_drive($ds, $conf->{$ds});
+ $volid_is_new = undef if $olddrive->{file} && $olddrive->{file} eq $volid;
+ }
- PVE::Storage::foreach_volid($dl, sub {
- my ($volumeid) = @_;
- if($volumeid eq $volid) {
- $foundvolid = 1;
- return;
- }
- });
+ if ($volid_is_new) {
+
+ my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
+
+ PVE::Storage::activate_volumes($storecfg, [ $volid ]) if $storeid;
+
+ my $size = PVE::Storage::volume_size_info($storecfg, $volid);
+
+ die "volume $volid does not exists\n" if !$size;
+
+ $disk->{size} = $size;
}
-
- die "image '$path' does not exists\n" if (!(-f $path || -b $path || $foundvolid));
- my ($size) = PVE::Storage::volume_size_info($storecfg, $volid, 1);
- $disk->{size} = $size;
$res->{$ds} = PVE::QemuServer::print_drive($vmid, $disk);
}
});
next if PVE::QemuServer::valid_drivename($opt);
if ($opt eq 'sockets' || $opt eq 'cores' ||
- $opt eq 'cpu' || $opt eq 'smp' ||
+ $opt eq 'cpu' || $opt eq 'smp' ||
$opt eq 'cpulimit' || $opt eq 'cpuunits') {
$rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.CPU']);
} elsif ($opt eq 'boot' || $opt eq 'bootdisk') {
$rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.Memory']);
} elsif ($opt eq 'args' || $opt eq 'lock') {
die "only root can set '$opt' config\n";
- } elsif ($opt eq 'cpu' || $opt eq 'kvm' || $opt eq 'acpi' ||
+ } elsif ($opt eq 'cpu' || $opt eq 'kvm' || $opt eq 'acpi' || $opt eq 'machine' ||
$opt eq 'vga' || $opt eq 'watchdog' || $opt eq 'tablet') {
$rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.HWType']);
} elsif ($opt =~ m/^net\d+$/) {
return $res;
}});
+
+
__PACKAGE__->register_method({
name => 'create_vm',
path => '',
method => 'POST',
description => "Create or restore a virtual machine.",
permissions => {
- description => "You need 'VM.Allocate' permissions on /vms/{vmid} or on the VM pool /pool/{pool}. If you create disks you need 'Datastore.AllocateSpace' on any used storage.",
- check => [ 'or',
- [ 'perm', '/vms/{vmid}', ['VM.Allocate']],
- [ 'perm', '/pool/{pool}', ['VM.Allocate'], require_param => 'pool'],
- ],
+ description => "You need 'VM.Allocate' permissions on /vms/{vmid} or on the VM pool /pool/{pool}. " .
+ "For restore (option 'archive'), it is enough if the user has 'VM.Backup' permission and the VM already exists. " .
+ "If you create disks you need 'Datastore.AllocateSpace' on any used storage.",
+ user => 'all', # check inside
},
protected => 1,
proxyto => 'node',
description => "Assign a unique random ethernet address.",
requires => 'archive',
},
- pool => {
+ pool => {
optional => 1,
type => 'string', format => 'pve-poolid',
description => "Add the VM to the specified pool.",
my $force = extract_param($param, 'force');
my $unique = extract_param($param, 'unique');
-
+
my $pool = extract_param($param, 'pool');
my $filename = PVE::QemuServer::config_file($vmid);
if (defined($pool)) {
$rpcenv->check_pool_exist($pool);
- }
+ }
$rpcenv->check($authuser, "/storage/$storage", ['Datastore.AllocateSpace'])
if defined($storage);
+ if ($rpcenv->check($authuser, "/vms/$vmid", ['VM.Allocate'], 1)) {
+ # OK
+ } elsif ($pool && $rpcenv->check($authuser, "/pool/$pool", ['VM.Allocate'], 1)) {
+ # OK
+ } elsif ($archive && $force && (-f $filename) &&
+ $rpcenv->check($authuser, "/vms/$vmid", ['VM.Backup'], 1)) {
+ # OK: user has VM.Backup permissions, and want to restore an existing VM
+ } else {
+ raise_perm_exc();
+ }
+
if (!$archive) {
&$resolve_cdrom_alias($param);
die "pipe requires cli environment\n"
if $rpcenv->{type} ne 'cli';
} else {
- my $path = $rpcenv->check_volume_access($authuser, $storecfg, $vmid, $archive);
-
- PVE::Storage::activate_volumes($storecfg, [ $archive ])
- if PVE::Storage::parse_volume_id ($archive, 1);
-
- die "can't find archive file '$archive'\n" if !($path && -f $path);
- $archive = $path;
+ $rpcenv->check_volume_access($authuser, $storecfg, $vmid, $archive);
+ $archive = PVE::Storage::abs_filesystem_path($storecfg, $archive);
}
}
- my $addVMtoPoolFn = sub {
- my $usercfg = cfs_read_file("user.cfg");
- if (my $data = $usercfg->{pools}->{$pool}) {
- $data->{vms}->{$vmid} = 1;
- $usercfg->{vms}->{$vmid} = $pool;
- cfs_write_file("user.cfg", $usercfg);
- }
- };
-
my $restorefn = sub {
+ # fixme: this test does not work if VM exists on other node!
if (-f $filename) {
die "unable to restore vm $vmid: config file already exists\n"
if !$force;
pool => $pool,
unique => $unique });
- PVE::AccessControl::lock_user_config($addVMtoPoolFn, "can't add VM to pool") if $pool;
+ PVE::AccessControl::add_vm_to_pool($vmid, $pool) if $pool;
};
return $rpcenv->fork_worker('qmrestore', $vmid, $authuser, $realcmd);
die "create failed - $err";
}
- PVE::AccessControl::lock_user_config($addVMtoPoolFn, "can't add VM to pool") if $pool;
+ PVE::AccessControl::add_vm_to_pool($vmid, $pool) if $pool;
};
return $rpcenv->fork_worker('qmcreate', $vmid, $authuser, $realcmd);
{ subdir => 'vncproxy' },
{ subdir => 'migrate' },
{ subdir => 'resize' },
+ { subdir => 'move' },
{ subdir => 'rrd' },
{ subdir => 'rrddata' },
{ subdir => 'monitor' },
{ subdir => 'snapshot' },
+ { subdir => 'spiceproxy' },
+ { subdir => 'sendkey' },
+ { subdir => 'firewall' },
];
return $res;
}});
+__PACKAGE__->register_method ({
+ subclass => "PVE::API2::Firewall::VM",
+ path => '{vmid}/firewall',
+});
+
__PACKAGE__->register_method({
name => 'rrd',
path => '{vmid}/rrd',
if (!PVE::QemuServer::drive_is_cdrom($drive)) {
my $volid = $drive->{file};
+
if (&$vm_is_volid_owner($storecfg, $vmid, $volid)) {
if ($force || $key =~ m/^unused/) {
- eval { PVE::Storage::vdisk_free($storecfg, $volid); };
+ eval {
+ # check if the disk is really unused
+ my $used_paths = PVE::QemuServer::get_used_paths($vmid, $storecfg, $conf, 1, $key);
+ my $path = PVE::Storage::path($storecfg, $volid);
+
+ die "unable to delete '$volid' - volume is still in use (snapshot?)\n"
+ if $used_paths->{$path};
+
+ PVE::Storage::vdisk_free($storecfg, $volid);
+ };
die $@ if $@;
} else {
PVE::QemuServer::add_unused_volume($conf, $volid, $vmid);
$rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.Disk']);
my $drive = PVE::QemuServer::parse_drive($opt, $conf->{$opt});
- if (my $sid = &$test_deallocate_drive($storecfg, $vmid, $opt, $drive, $force)) {
- $rpcenv->check($authuser, "/storage/$sid", ['Datastore.Allocate']);
+ if (my $sid = &$test_deallocate_drive($storecfg, $vmid, $opt, $drive, $force)) {
+ $rpcenv->check($authuser, "/storage/$sid", ['Datastore.AllocateSpace']);
}
}
my $unplugwarning = "";
- if($conf->{ostype} && $conf->{ostype} eq 'l26'){
+ if ($conf->{ostype} && $conf->{ostype} eq 'l26') {
$unplugwarning = "<br>verify that you have acpiphp && pci_hotplug modules loaded in your guest VM";
- }elsif($conf->{ostype} && $conf->{ostype} eq 'l24'){
+ } elsif ($conf->{ostype} && $conf->{ostype} eq 'l24') {
$unplugwarning = "<br>kernel 2.4 don't support hotplug, please disable hotplug in options";
- }elsif(!$conf->{ostype} || ($conf->{ostype} && $conf->{ostype} eq 'other')){
+ } elsif (!$conf->{ostype} || ($conf->{ostype} && $conf->{ostype} eq 'other')) {
$unplugwarning = "<br>verify that your guest support acpi hotplug";
}
- if($opt eq 'tablet'){
+ if ($opt eq 'tablet') {
PVE::QemuServer::vm_deviceplug(undef, $conf, $vmid, $opt);
- }else{
+ } else {
die "error hot-unplug $opt $unplugwarning" if !PVE::QemuServer::vm_deviceunplug($vmid, $conf, $opt);
}
my $safe_num_ne = sub {
my ($a, $b) = @_;
- return 0 if !defined($a) && !defined($b);
- return 1 if !defined($a);
- return 1 if !defined($b);
+ return 0 if !defined($a) && !defined($b);
+ return 1 if !defined($a);
+ return 1 if !defined($b);
return $a != $b;
};
&$safe_num_ne($drive->{mbps_wr}, $old_drive->{mbps_wr}) ||
&$safe_num_ne($drive->{iops}, $old_drive->{iops}) ||
&$safe_num_ne($drive->{iops_rd}, $old_drive->{iops_rd}) ||
- &$safe_num_ne($drive->{iops_wr}, $old_drive->{iops_wr})) {
- PVE::QemuServer::qemu_block_set_io_throttle($vmid,"drive-$opt", $drive->{mbps}*1024*1024,
- $drive->{mbps_rd}*1024*1024, $drive->{mbps_wr}*1024*1024,
- $drive->{iops}, $drive->{iops_rd}, $drive->{iops_wr})
+ &$safe_num_ne($drive->{iops_wr}, $old_drive->{iops_wr}) ||
+ &$safe_num_ne($drive->{mbps_max}, $old_drive->{mbps_max}) ||
+ &$safe_num_ne($drive->{mbps_rd_max}, $old_drive->{mbps_rd_max}) ||
+ &$safe_num_ne($drive->{mbps_wr_max}, $old_drive->{mbps_wr_max}) ||
+ &$safe_num_ne($drive->{iops_max}, $old_drive->{iops_max}) ||
+ &$safe_num_ne($drive->{iops_rd_max}, $old_drive->{iops_rd_max}) ||
+ &$safe_num_ne($drive->{iops_wr_max}, $old_drive->{iops_wr_max})) {
+ PVE::QemuServer::qemu_block_set_io_throttle($vmid,"drive-$opt",
+ ($drive->{mbps} || 0)*1024*1024,
+ ($drive->{mbps_rd} || 0)*1024*1024,
+ ($drive->{mbps_wr} || 0)*1024*1024,
+ $drive->{iops} || 0,
+ $drive->{iops_rd} || 0,
+ $drive->{iops_wr} || 0,
+ ($drive->{mbps_max} || 0)*1024*1024,
+ ($drive->{mbps_rd_max} || 0)*1024*1024,
+ ($drive->{mbps_wr_max} || 0)*1024*1024,
+ $drive->{iops_max} || 0,
+ $drive->{iops_rd_max} || 0,
+ $drive->{iops_wr_max} || 0)
if !PVE::QemuServer::drive_is_cdrom($drive);
}
}
#if model change, we try to hot-unplug
die "error hot-unplug $opt for update" if !PVE::QemuServer::vm_deviceunplug($vmid, $conf, $opt);
}else{
-
+
if($newnet->{bridge} && $oldnet->{bridge}){
my $iface = "tap".$vmid."i".$1 if $opt =~ m/net(\d+)/;
PVE::Network::tap_rate_limit($iface, $newnet->{rate});
}
- if(($newnet->{bridge} ne $oldnet->{bridge}) || ($newnet->{tag} ne $oldnet->{tag})){
- eval{PVE::Network::tap_unplug($iface, $oldnet->{bridge}, $oldnet->{tag});};
- PVE::Network::tap_plug($iface, $newnet->{bridge}, $newnet->{tag});
+ if(($newnet->{bridge} ne $oldnet->{bridge}) || ($newnet->{tag} ne $oldnet->{tag}) || ($newnet->{firewall} ne $oldnet->{firewall})){
+ PVE::Network::tap_unplug($iface);
+ PVE::Network::tap_plug($iface, $newnet->{bridge}, $newnet->{tag}, $newnet->{firewall});
}
}else{
die "error hot-unplug $opt for update" if !PVE::QemuServer::vm_deviceunplug($vmid, $conf, $opt);
}
}
-
+
}
$conf->{$opt} = $value;
PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
die "error hotplug $opt" if !PVE::QemuServer::vm_deviceplug($storecfg, $conf, $vmid, $opt, $net);
};
-my $vm_config_perm_list = [
- 'VM.Config.Disk',
- 'VM.Config.CDROM',
- 'VM.Config.CPU',
- 'VM.Config.Memory',
- 'VM.Config.Network',
- 'VM.Config.HWType',
- 'VM.Config.Options',
- ];
+# POST/PUT {vmid}/config implementation
+#
+# The original API used PUT (idempotent) an we assumed that all operations
+# are fast. But it turned out that almost any configuration change can
+# involve hot-plug actions, or disk alloc/free. Such actions can take long
+# time to complete and have side effects (not idempotent).
+#
+# The new implementation uses POST and forks a worker process. We added
+# a new option 'background_delay'. If specified we wait up to
+# 'background_delay' second for the worker task to complete. It returns null
+# if the task is finished within that time, else we return the UPID.
-__PACKAGE__->register_method({
- name => 'update_vm',
- path => '{vmid}/config',
- method => 'PUT',
- protected => 1,
- proxyto => 'node',
- description => "Set virtual machine options.",
- permissions => {
- check => ['perm', '/vms/{vmid}', $vm_config_perm_list, any => 1],
- },
- parameters => {
- additionalProperties => 0,
- properties => PVE::QemuServer::json_config_properties(
- {
- node => get_standard_option('pve-node'),
- vmid => get_standard_option('pve-vmid'),
- skiplock => get_standard_option('skiplock'),
- delete => {
- type => 'string', format => 'pve-configid-list',
- description => "A list of settings you want to delete.",
- optional => 1,
- },
- force => {
- type => 'boolean',
- description => $opt_force_description,
- optional => 1,
- requires => 'delete',
- },
- digest => {
- type => 'string',
- description => 'Prevent changes if current configuration file has different SHA1 digest. This can be used to prevent concurrent modifications.',
- maxLength => 40,
- optional => 1,
- }
- }),
- },
- returns => { type => 'null'},
- code => sub {
- my ($param) = @_;
+my $update_vm_api = sub {
+ my ($param, $sync) = @_;
- my $rpcenv = PVE::RPCEnvironment::get();
+ my $rpcenv = PVE::RPCEnvironment::get();
- my $authuser = $rpcenv->get_user();
+ my $authuser = $rpcenv->get_user();
- my $node = extract_param($param, 'node');
+ my $node = extract_param($param, 'node');
- my $vmid = extract_param($param, 'vmid');
+ my $vmid = extract_param($param, 'vmid');
- my $digest = extract_param($param, 'digest');
+ my $digest = extract_param($param, 'digest');
- my @paramarr = (); # used for log message
- foreach my $key (keys %$param) {
- push @paramarr, "-$key", $param->{$key};
- }
+ my $background_delay = extract_param($param, 'background_delay');
- my $skiplock = extract_param($param, 'skiplock');
- raise_param_exc({ skiplock => "Only root may use this option." })
- if $skiplock && $authuser ne 'root@pam';
+ my @paramarr = (); # used for log message
+ foreach my $key (keys %$param) {
+ push @paramarr, "-$key", $param->{$key};
+ }
- my $delete_str = extract_param($param, 'delete');
+ my $skiplock = extract_param($param, 'skiplock');
+ raise_param_exc({ skiplock => "Only root may use this option." })
+ if $skiplock && $authuser ne 'root@pam';
- my $force = extract_param($param, 'force');
+ my $delete_str = extract_param($param, 'delete');
- die "no options specified\n" if !$delete_str && !scalar(keys %$param);
+ my $force = extract_param($param, 'force');
- my $storecfg = PVE::Storage::config();
+ die "no options specified\n" if !$delete_str && !scalar(keys %$param);
- my $defaults = PVE::QemuServer::load_defaults();
+ my $storecfg = PVE::Storage::config();
- &$resolve_cdrom_alias($param);
+ my $defaults = PVE::QemuServer::load_defaults();
- # now try to verify all parameters
+ &$resolve_cdrom_alias($param);
- my @delete = ();
- foreach my $opt (PVE::Tools::split_list($delete_str)) {
- $opt = 'ide2' if $opt eq 'cdrom';
- raise_param_exc({ delete => "you can't use '-$opt' and " .
- "-delete $opt' at the same time" })
- if defined($param->{$opt});
+ # now try to verify all parameters
- if (!PVE::QemuServer::option_exists($opt)) {
- raise_param_exc({ delete => "unknown option '$opt'" });
- }
+ my @delete = ();
+ foreach my $opt (PVE::Tools::split_list($delete_str)) {
+ $opt = 'ide2' if $opt eq 'cdrom';
+ raise_param_exc({ delete => "you can't use '-$opt' and " .
+ "-delete $opt' at the same time" })
+ if defined($param->{$opt});
- push @delete, $opt;
+ if (!PVE::QemuServer::option_exists($opt)) {
+ raise_param_exc({ delete => "unknown option '$opt'" });
}
- foreach my $opt (keys %$param) {
- if (PVE::QemuServer::valid_drivename($opt)) {
- # cleanup drive path
- my $drive = PVE::QemuServer::parse_drive($opt, $param->{$opt});
- PVE::QemuServer::cleanup_drive_path($opt, $storecfg, $drive);
- $param->{$opt} = PVE::QemuServer::print_drive($vmid, $drive);
- } elsif ($opt =~ m/^net(\d+)$/) {
- # add macaddr
- my $net = PVE::QemuServer::parse_net($param->{$opt});
- $param->{$opt} = PVE::QemuServer::print_net($net);
- }
+ push @delete, $opt;
+ }
+
+ foreach my $opt (keys %$param) {
+ if (PVE::QemuServer::valid_drivename($opt)) {
+ # cleanup drive path
+ my $drive = PVE::QemuServer::parse_drive($opt, $param->{$opt});
+ PVE::QemuServer::cleanup_drive_path($opt, $storecfg, $drive);
+ $param->{$opt} = PVE::QemuServer::print_drive($vmid, $drive);
+ } elsif ($opt =~ m/^net(\d+)$/) {
+ # add macaddr
+ my $net = PVE::QemuServer::parse_net($param->{$opt});
+ $param->{$opt} = PVE::QemuServer::print_net($net);
}
+ }
- &$check_vm_modify_config_perm($rpcenv, $authuser, $vmid, undef, [@delete]);
+ &$check_vm_modify_config_perm($rpcenv, $authuser, $vmid, undef, [@delete]);
- &$check_vm_modify_config_perm($rpcenv, $authuser, $vmid, undef, [keys %$param]);
+ &$check_vm_modify_config_perm($rpcenv, $authuser, $vmid, undef, [keys %$param]);
- &$check_storage_access($rpcenv, $authuser, $storecfg, $vmid, $param);
+ &$check_storage_access($rpcenv, $authuser, $storecfg, $vmid, $param);
- my $updatefn = sub {
+ my $updatefn = sub {
- my $conf = PVE::QemuServer::load_config($vmid);
+ my $conf = PVE::QemuServer::load_config($vmid);
- die "checksum missmatch (file change by other user?)\n"
- if $digest && $digest ne $conf->{digest};
+ die "checksum missmatch (file change by other user?)\n"
+ if $digest && $digest ne $conf->{digest};
- PVE::QemuServer::check_lock($conf) if !$skiplock;
+ PVE::QemuServer::check_lock($conf) if !$skiplock;
- if ($param->{memory} || defined($param->{balloon})) {
- my $maxmem = $param->{memory} || $conf->{memory} || $defaults->{memory};
- my $balloon = defined($param->{balloon}) ? $param->{balloon} : $conf->{balloon};
+ if ($param->{memory} || defined($param->{balloon})) {
+ my $maxmem = $param->{memory} || $conf->{memory} || $defaults->{memory};
+ my $balloon = defined($param->{balloon}) ? $param->{balloon} : $conf->{balloon};
- die "balloon value too large (must be smaller than assigned memory)\n"
- if $balloon > $maxmem;
- }
+ die "balloon value too large (must be smaller than assigned memory)\n"
+ if $balloon && $balloon > $maxmem;
+ }
+
+ PVE::Cluster::log_msg('info', $authuser, "update VM $vmid: " . join (' ', @paramarr));
+
+ my $worker = sub {
- PVE::Cluster::log_msg('info', $authuser, "update VM $vmid: " . join (' ', @paramarr));
+ print "update VM $vmid: " . join (' ', @paramarr) . "\n";
foreach my $opt (@delete) { # delete
$conf = PVE::QemuServer::load_config($vmid); # update/reload
if (PVE::QemuServer::valid_drivename($opt)) {
- &$vmconfig_update_disk($rpcenv, $authuser, $conf, $storecfg, $vmid,
+ &$vmconfig_update_disk($rpcenv, $authuser, $conf, $storecfg, $vmid,
$opt, $param->{$opt}, $force);
-
+
} elsif ($opt =~ m/^net(\d+)$/) { #nics
- &$vmconfig_update_net($rpcenv, $authuser, $conf, $storecfg, $vmid,
+ &$vmconfig_update_net($rpcenv, $authuser, $conf, $storecfg, $vmid,
$opt, $param->{$opt});
} else {
if($opt eq 'tablet' && $param->{$opt} == 1){
PVE::QemuServer::vm_deviceplug(undef, $conf, $vmid, $opt);
- }elsif($opt eq 'tablet' && $param->{$opt} == 0){
+ } elsif($opt eq 'tablet' && $param->{$opt} == 0){
PVE::QemuServer::vm_deviceunplug($vmid, $conf, $opt);
}
+
+ if($opt eq 'cores' && $conf->{maxcpus}){
+ PVE::QemuServer::qemu_cpu_hotplug($vmid, $conf, $param->{$opt});
+ }
$conf->{$opt} = $param->{$opt};
PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
}
# allow manual ballooning if shares is set to zero
- if ($running && defined($param->{balloon}) &&
+ if ($running && defined($param->{balloon}) &&
defined($conf->{shares}) && ($conf->{shares} == 0)) {
my $balloon = $param->{'balloon'} || $conf->{memory} || $defaults->{memory};
PVE::QemuServer::vm_mon_cmd($vmid, "balloon", value => $balloon*1024*1024);
}
-
};
- PVE::QemuServer::lock_config($vmid, $updatefn);
+ if ($sync) {
+ &$worker();
+ return undef;
+ } else {
+ my $upid = $rpcenv->fork_worker('qmconfig', $vmid, $authuser, $worker);
+
+ if ($background_delay) {
+
+ # Note: It would be better to do that in the Event based HTTPServer
+ # to avoid blocking call to sleep.
+
+ my $end_time = time() + $background_delay;
+
+ my $task = PVE::Tools::upid_decode($upid);
+
+ my $running = 1;
+ while (time() < $end_time) {
+ $running = PVE::ProcFSTools::check_process_running($task->{pid}, $task->{pstart});
+ last if !$running;
+ sleep(1); # this gets interrupted when child process ends
+ }
+
+ if (!$running) {
+ my $status = PVE::Tools::upid_read_status($upid);
+ return undef if $status eq 'OK';
+ die $status;
+ }
+ }
+
+ return $upid;
+ }
+ };
+
+ return PVE::QemuServer::lock_config($vmid, $updatefn);
+};
+
+my $vm_config_perm_list = [
+ 'VM.Config.Disk',
+ 'VM.Config.CDROM',
+ 'VM.Config.CPU',
+ 'VM.Config.Memory',
+ 'VM.Config.Network',
+ 'VM.Config.HWType',
+ 'VM.Config.Options',
+ ];
+
+__PACKAGE__->register_method({
+ name => 'update_vm_async',
+ path => '{vmid}/config',
+ method => 'POST',
+ protected => 1,
+ proxyto => 'node',
+ description => "Set virtual machine options (asynchrounous API).",
+ permissions => {
+ check => ['perm', '/vms/{vmid}', $vm_config_perm_list, any => 1],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => PVE::QemuServer::json_config_properties(
+ {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid'),
+ skiplock => get_standard_option('skiplock'),
+ delete => {
+ type => 'string', format => 'pve-configid-list',
+ description => "A list of settings you want to delete.",
+ optional => 1,
+ },
+ force => {
+ type => 'boolean',
+ description => $opt_force_description,
+ optional => 1,
+ requires => 'delete',
+ },
+ digest => {
+ type => 'string',
+ description => 'Prevent changes if current configuration file has different SHA1 digest. This can be used to prevent concurrent modifications.',
+ maxLength => 40,
+ optional => 1,
+ },
+ background_delay => {
+ type => 'integer',
+ description => "Time to wait for the task to finish. We return 'null' if the task finish within that time.",
+ minimum => 1,
+ maximum => 30,
+ optional => 1,
+ },
+ }),
+ },
+ returns => {
+ type => 'string',
+ optional => 1,
+ },
+ code => $update_vm_api,
+});
+__PACKAGE__->register_method({
+ name => 'update_vm',
+ path => '{vmid}/config',
+ method => 'PUT',
+ protected => 1,
+ proxyto => 'node',
+ description => "Set virtual machine options (synchrounous API) - You should consider using the POST method instead for any actions involving hotplug or storage allocation.",
+ permissions => {
+ check => ['perm', '/vms/{vmid}', $vm_config_perm_list, any => 1],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => PVE::QemuServer::json_config_properties(
+ {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid'),
+ skiplock => get_standard_option('skiplock'),
+ delete => {
+ type => 'string', format => 'pve-configid-list',
+ description => "A list of settings you want to delete.",
+ optional => 1,
+ },
+ force => {
+ type => 'boolean',
+ description => $opt_force_description,
+ optional => 1,
+ requires => 'delete',
+ },
+ digest => {
+ type => 'string',
+ description => 'Prevent changes if current configuration file has different SHA1 digest. This can be used to prevent concurrent modifications.',
+ maxLength => 40,
+ optional => 1,
+ },
+ }),
+ },
+ returns => { type => 'null' },
+ code => sub {
+ my ($param) = @_;
+ &$update_vm_api($param, 1);
return undef;
- }});
+ }
+});
__PACKAGE__->register_method({
my $storecfg = PVE::Storage::config();
- my $delVMfromPoolFn = sub {
+ my $delVMfromPoolFn = sub {
my $usercfg = cfs_read_file("user.cfg");
if (my $pool = $usercfg->{vms}->{$vmid}) {
if (my $data = $usercfg->{pools}->{$pool}) {
PVE::QemuServer::vm_destroy($storecfg, $vmid, $skiplock);
- PVE::AccessControl::lock_user_config($delVMfromPoolFn, "pool cleanup failed");
+ PVE::AccessControl::remove_vm_from_pool($vmid);
};
return $rpcenv->fork_worker('qmdestroy', $vmid, $authuser, $realcmd);
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid'),
+ websocket => {
+ optional => 1,
+ type => 'boolean',
+ description => "starts websockify instead of vncproxy",
+ },
},
},
returns => {
my $vmid = $param->{vmid};
my $node = $param->{node};
+ my $websocket = $param->{websocket};
+
+ my $conf = PVE::QemuServer::load_config($vmid, $node); # check if VM exists
my $authpath = "/vms/$vmid";
my $port = PVE::Tools::next_vnc_port();
my $remip;
+ my $remcmd = [];
if ($node ne 'localhost' && $node ne PVE::INotify::nodename()) {
$remip = PVE::Cluster::remote_node_ip($node);
+ # NOTE: kvm VNC traffic is already TLS encrypted or is known unsecure
+ $remcmd = ['/usr/bin/ssh', '-T', '-o', 'BatchMode=yes', $remip];
}
- # NOTE: kvm VNC traffic is already TLS encrypted
- my $remcmd = $remip ? ['/usr/bin/ssh', '-T', '-o', 'BatchMode=yes', $remip] : [];
-
my $timeout = 10;
my $realcmd = sub {
syslog('info', "starting vnc proxy $upid\n");
- my $qmcmd = [@$remcmd, "/usr/sbin/qm", 'vncproxy', $vmid];
+ my $cmd;
- my $qmstr = join(' ', @$qmcmd);
+ if ($conf->{vga} && ($conf->{vga} =~ m/^serial\d+$/)) {
- # also redirect stderr (else we get RFB protocol errors)
- my $cmd = ['/bin/nc', '-l', '-p', $port, '-w', $timeout, '-c', "$qmstr 2>/dev/null"];
+ die "Websocket mode is not supported in vga serial mode!" if $websocket;
+
+ my $termcmd = [ '/usr/sbin/qm', 'terminal', $vmid, '-iface', $conf->{vga} ];
+ #my $termcmd = "/usr/bin/qm terminal -iface $conf->{vga}";
+ $cmd = ['/usr/bin/vncterm', '-rfbport', $port,
+ '-timeout', $timeout, '-authpath', $authpath,
+ '-perm', 'Sys.Console', '-c', @$remcmd, @$termcmd];
+ } else {
+
+ $ENV{LC_PVE_TICKET} = $ticket if $websocket; # set ticket with "qm vncproxy"
+
+ my $qmcmd = [@$remcmd, "/usr/sbin/qm", 'vncproxy', $vmid];
+
+ my $qmstr = join(' ', @$qmcmd);
+
+ # also redirect stderr (else we get RFB protocol errors)
+ $cmd = ['/bin/nc', '-l', '-p', $port, '-w', $timeout, '-c', "$qmstr 2>/dev/null"];
+ }
PVE::Tools::run_command($cmd);
};
}});
+__PACKAGE__->register_method({
+ name => 'vncwebsocket',
+ path => '{vmid}/vncwebsocket',
+ method => 'GET',
+ proxyto => 'node',
+ permissions => {
+ check => ['perm', '/vms/{vmid}', [ 'VM.Console' ]],
+ },
+ description => "Opens a weksocket for VNV traffic.",
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid'),
+ port => {
+ description => "Port number returned by previous vncproxy call.",
+ type => 'integer',
+ minimum => 5900,
+ maximum => 5999,
+ },
+ },
+ },
+ returns => {
+ type => "object",
+ properties => {
+ port => { type => 'string' },
+ },
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+
+ my $authuser = $rpcenv->get_user();
+
+ my $vmid = $param->{vmid};
+ my $node = $param->{node};
+
+ my $conf = PVE::QemuServer::load_config($vmid, $node); # VM exists ?
+
+ # Note: VNC ports are acessible from outside, so we do not gain any
+ # security if we verify that $param->{port} belongs to VM $vmid. This
+ # check is done by verifying the VNC ticket (inside VNC protocol).
+
+ my $port = $param->{port};
+
+ return { port => $port };
+ }});
+
+__PACKAGE__->register_method({
+ name => 'spiceproxy',
+ path => '{vmid}/spiceproxy',
+ method => 'POST',
+ protected => 1,
+ proxyto => 'node',
+ permissions => {
+ check => ['perm', '/vms/{vmid}', [ 'VM.Console' ]],
+ },
+ description => "Returns a SPICE configuration to connect to the VM.",
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid'),
+ proxy => get_standard_option('spice-proxy', { optional => 1 }),
+ },
+ },
+ returns => get_standard_option('remote-viewer-config'),
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+
+ my $authuser = $rpcenv->get_user();
+
+ my $vmid = $param->{vmid};
+ my $node = $param->{node};
+ my $proxy = $param->{proxy};
+
+ my $conf = PVE::QemuServer::load_config($vmid, $node);
+ my $title = "VM $vmid - $conf->{'name'}",
+
+ my $port = PVE::QemuServer::spice_port($vmid);
+
+ my ($ticket, undef, $remote_viewer_config) =
+ PVE::AccessControl::remote_viewer_config($authuser, $vmid, $node, $proxy, $title, $port);
+
+ PVE::QemuServer::vm_mon_cmd($vmid, "set_password", protocol => 'spice', password => $ticket);
+ PVE::QemuServer::vm_mon_cmd($vmid, "expire_password", protocol => 'spice', time => "+30");
+
+ return $remote_viewer_config;
+ }});
+
__PACKAGE__->register_method({
name => 'vmcmdidx',
path => '{vmid}/status',
my $cc = PVE::Cluster::cfs_read_file('cluster.conf');
if (PVE::Cluster::cluster_conf_lookup_pvevm($cc, 0, $vmid, 1)) {
return 1;
- }
+ }
return 0;
};
$status->{ha} = &$vm_is_ha_managed($param->{vmid});
+ $status->{spice} = 1 if PVE::QemuServer::vga_conf_has_spice($conf->{vga});
+
return $status;
}});
skiplock => get_standard_option('skiplock'),
stateuri => get_standard_option('pve-qm-stateuri'),
migratedfrom => get_standard_option('pve-node',{ optional => 1 }),
-
+ machine => get_standard_option('pve-qm-machine'),
},
},
returns => {
my $vmid = extract_param($param, 'vmid');
+ my $machine = extract_param($param, 'machine');
+
my $stateuri = extract_param($param, 'stateuri');
raise_param_exc({ stateuri => "Only root may use this option." })
if $stateuri && $authuser ne 'root@pam';
raise_param_exc({ migratedfrom => "Only root may use this option." })
if $migratedfrom && $authuser ne 'root@pam';
+ # read spice ticket from STDIN
+ my $spice_ticket;
+ if ($stateuri && ($stateuri eq 'tcp') && $migratedfrom && ($rpcenv->{type} eq 'cli')) {
+ if (defined(my $line = <>)) {
+ chomp $line;
+ $spice_ticket = $line;
+ }
+ }
+
my $storecfg = PVE::Storage::config();
if (&$vm_is_ha_managed($vmid) && !$stateuri &&
syslog('info', "start VM $vmid: $upid\n");
- PVE::QemuServer::vm_start($storecfg, $vmid, $stateuri, $skiplock, $migratedfrom);
+ PVE::QemuServer::vm_start($storecfg, $vmid, $stateuri, $skiplock, $migratedfrom, undef,
+ $machine, $spice_ticket);
return;
};
path => '{vmid}/feature',
method => 'GET',
proxyto => 'node',
- protected => 1,
+ protected => 1,
description => "Check if feature for virtual machine is available.",
permissions => {
check => ['perm', '/vms/{vmid}', [ 'VM.Audit' ]],
feature => {
description => "Feature to check.",
type => 'string',
- enum => [ 'snapshot', 'clone' ],
+ enum => [ 'snapshot', 'clone', 'copy' ],
},
snapname => get_standard_option('pve-snapshot-name', {
optional => 1,
}),
},
-
},
returns => {
- type => 'boolean'
+ type => "object",
+ properties => {
+ hasFeature => { type => 'boolean' },
+ nodes => {
+ type => 'array',
+ items => { type => 'string' },
+ }
+ },
},
code => sub {
my ($param) = @_;
}
my $storecfg = PVE::Storage::config();
- my $hasfeature = PVE::QemuServer::has_feature($feature, $conf, $storecfg, $snapname, $running);
- my $res = $hasfeature ? 1 : 0 ;
- return $res;
+ my $nodelist = PVE::QemuServer::shared_nodes($conf, $storecfg);
+ my $hasFeature = PVE::QemuServer::has_feature($feature, $conf, $storecfg, $snapname, $running);
+
+ return {
+ hasFeature => $hasFeature,
+ nodes => [ keys %$nodelist ],
+ };
+ }});
+
+__PACKAGE__->register_method({
+ name => 'clone_vm',
+ path => '{vmid}/clone',
+ method => 'POST',
+ protected => 1,
+ proxyto => 'node',
+ description => "Create a copy of virtual machine/template.",
+ permissions => {
+ description => "You need 'VM.Clone' permissions on /vms/{vmid}, and 'VM.Allocate' permissions " .
+ "on /vms/{newid} (or on the VM pool /pool/{pool}). You also need " .
+ "'Datastore.AllocateSpace' on any used storage.",
+ check =>
+ [ 'and',
+ ['perm', '/vms/{vmid}', [ 'VM.Clone' ]],
+ [ 'or',
+ [ 'perm', '/vms/{newid}', ['VM.Allocate']],
+ [ 'perm', '/pool/{pool}', ['VM.Allocate'], require_param => 'pool'],
+ ],
+ ]
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid'),
+ newid => get_standard_option('pve-vmid', { description => 'VMID for the clone.' }),
+ name => {
+ optional => 1,
+ type => 'string', format => 'dns-name',
+ description => "Set a name for the new VM.",
+ },
+ description => {
+ optional => 1,
+ type => 'string',
+ description => "Description for the new VM.",
+ },
+ pool => {
+ optional => 1,
+ type => 'string', format => 'pve-poolid',
+ description => "Add the new VM to the specified pool.",
+ },
+ snapname => get_standard_option('pve-snapshot-name', {
+ requires => 'full',
+ optional => 1,
+ }),
+ storage => get_standard_option('pve-storage-id', {
+ description => "Target storage for full clone.",
+ requires => 'full',
+ optional => 1,
+ }),
+ 'format' => {
+ description => "Target format for file storage.",
+ requires => 'full',
+ type => 'string',
+ optional => 1,
+ enum => [ 'raw', 'qcow2', 'vmdk'],
+ },
+ full => {
+ optional => 1,
+ type => 'boolean',
+ description => "Create a full copy of all disk. This is always done when " .
+ "you clone a normal VM. For VM templates, we try to create a linked clone by default.",
+ default => 0,
+ },
+ target => get_standard_option('pve-node', {
+ description => "Target node. Only allowed if the original VM is on shared storage.",
+ optional => 1,
+ }),
+ },
+ },
+ returns => {
+ type => 'string',
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+
+ my $authuser = $rpcenv->get_user();
+
+ my $node = extract_param($param, 'node');
+
+ my $vmid = extract_param($param, 'vmid');
+
+ my $newid = extract_param($param, 'newid');
+
+ my $pool = extract_param($param, 'pool');
+
+ if (defined($pool)) {
+ $rpcenv->check_pool_exist($pool);
+ }
+
+ my $snapname = extract_param($param, 'snapname');
+
+ my $storage = extract_param($param, 'storage');
+
+ my $format = extract_param($param, 'format');
+
+ my $target = extract_param($param, 'target');
+
+ my $localnode = PVE::INotify::nodename();
+
+ undef $target if $target && ($target eq $localnode || $target eq 'localhost');
+
+ PVE::Cluster::check_node_exists($target) if $target;
+
+ my $storecfg = PVE::Storage::config();
+
+ if ($storage) {
+ # check if storage is enabled on local node
+ PVE::Storage::storage_check_enabled($storecfg, $storage);
+ if ($target) {
+ # check if storage is available on target node
+ PVE::Storage::storage_check_node($storecfg, $storage, $target);
+ # clone only works if target storage is shared
+ my $scfg = PVE::Storage::storage_config($storecfg, $storage);
+ die "can't clone to non-shared storage '$storage'\n" if !$scfg->{shared};
+ }
+ }
+
+ PVE::Cluster::check_cfs_quorum();
+
+ my $running = PVE::QemuServer::check_running($vmid) || 0;
+
+ # exclusive lock if VM is running - else shared lock is enough;
+ my $shared_lock = $running ? 0 : 1;
+
+ my $clonefn = sub {
+
+ # do all tests after lock
+ # we also try to do all tests before we fork the worker
+
+ my $conf = PVE::QemuServer::load_config($vmid);
+
+ PVE::QemuServer::check_lock($conf);
+
+ my $verify_running = PVE::QemuServer::check_running($vmid) || 0;
+
+ die "unexpected state change\n" if $verify_running != $running;
+
+ die "snapshot '$snapname' does not exist\n"
+ if $snapname && !defined( $conf->{snapshots}->{$snapname});
+
+ my $oldconf = $snapname ? $conf->{snapshots}->{$snapname} : $conf;
+
+ my $sharedvm = &$check_storage_access_clone($rpcenv, $authuser, $storecfg, $oldconf, $storage);
+
+ die "can't clone VM to node '$target' (VM uses local storage)\n" if $target && !$sharedvm;
+
+ my $conffile = PVE::QemuServer::config_file($newid);
+
+ die "unable to create VM $newid: config file already exists\n"
+ if -f $conffile;
+
+ my $newconf = { lock => 'clone' };
+ my $drives = {};
+ my $vollist = [];
+
+ foreach my $opt (keys %$oldconf) {
+ my $value = $oldconf->{$opt};
+
+ # do not copy snapshot related info
+ next if $opt eq 'snapshots' || $opt eq 'parent' || $opt eq 'snaptime' ||
+ $opt eq 'vmstate' || $opt eq 'snapstate';
+
+ # always change MAC! address
+ if ($opt =~ m/^net(\d+)$/) {
+ my $net = PVE::QemuServer::parse_net($value);
+ $net->{macaddr} = PVE::Tools::random_ether_addr();
+ $newconf->{$opt} = PVE::QemuServer::print_net($net);
+ } elsif (PVE::QemuServer::valid_drivename($opt)) {
+ my $drive = PVE::QemuServer::parse_drive($opt, $value);
+ die "unable to parse drive options for '$opt'\n" if !$drive;
+ if (PVE::QemuServer::drive_is_cdrom($drive)) {
+ $newconf->{$opt} = $value; # simply copy configuration
+ } else {
+ if ($param->{full} || !PVE::Storage::volume_is_base($storecfg, $drive->{file})) {
+ die "Full clone feature is not available"
+ if !PVE::Storage::volume_has_feature($storecfg, 'copy', $drive->{file}, $snapname, $running);
+ $drive->{full} = 1;
+ }
+ $drives->{$opt} = $drive;
+ push @$vollist, $drive->{file};
+ }
+ } else {
+ # copy everything else
+ $newconf->{$opt} = $value;
+ }
+ }
+
+ delete $newconf->{template};
+
+ if ($param->{name}) {
+ $newconf->{name} = $param->{name};
+ } else {
+ if ($oldconf->{name}) {
+ $newconf->{name} = "Copy-of-$oldconf->{name}";
+ } else {
+ $newconf->{name} = "Copy-of-VM-$vmid";
+ }
+ }
+
+ if ($param->{description}) {
+ $newconf->{description} = $param->{description};
+ }
+
+ # create empty/temp config - this fails if VM already exists on other node
+ PVE::Tools::file_set_contents($conffile, "# qmclone temporary file\nlock: clone\n");
+
+ my $realcmd = sub {
+ my $upid = shift;
+
+ my $newvollist = [];
+
+ eval {
+ local $SIG{INT} = $SIG{TERM} = $SIG{QUIT} = $SIG{HUP} = sub { die "interrupted by signal\n"; };
+
+ PVE::Storage::activate_volumes($storecfg, $vollist);
+
+ foreach my $opt (keys %$drives) {
+ my $drive = $drives->{$opt};
+
+ my $newdrive = PVE::QemuServer::clone_disk($storecfg, $vmid, $running, $opt, $drive, $snapname,
+ $newid, $storage, $format, $drive->{full}, $newvollist);
+
+ $newconf->{$opt} = PVE::QemuServer::print_drive($vmid, $newdrive);
+
+ PVE::QemuServer::update_config_nolock($newid, $newconf, 1);
+ }
+
+ delete $newconf->{lock};
+ PVE::QemuServer::update_config_nolock($newid, $newconf, 1);
+
+ if ($target) {
+ # always deactivate volumes - avoid lvm LVs to be active on several nodes
+ PVE::Storage::deactivate_volumes($storecfg, $vollist);
+
+ my $newconffile = PVE::QemuServer::config_file($newid, $target);
+ die "Failed to move config to node '$target' - rename failed: $!\n"
+ if !rename($conffile, $newconffile);
+ }
+
+ PVE::AccessControl::add_vm_to_pool($newid, $pool) if $pool;
+ };
+ if (my $err = $@) {
+ unlink $conffile;
+
+ sleep 1; # some storage like rbd need to wait before release volume - really?
+
+ foreach my $volid (@$newvollist) {
+ eval { PVE::Storage::vdisk_free($storecfg, $volid); };
+ warn $@ if $@;
+ }
+ die "clone failed: $err";
+ }
+
+ return;
+ };
+
+ return $rpcenv->fork_worker('qmclone', $vmid, $authuser, $realcmd);
+ };
+
+ return PVE::QemuServer::lock_config_mode($vmid, 1, $shared_lock, sub {
+ # Aquire exclusive lock lock for $newid
+ return PVE::QemuServer::lock_config_full($newid, 1, $clonefn);
+ });
+
+ }});
+
+__PACKAGE__->register_method({
+ name => 'move_vm_disk',
+ path => '{vmid}/move_disk',
+ method => 'POST',
+ protected => 1,
+ proxyto => 'node',
+ description => "Move volume to different storage.",
+ permissions => {
+ description => "You need 'VM.Config.Disk' permissions on /vms/{vmid}, " .
+ "and 'Datastore.AllocateSpace' permissions on the storage.",
+ check =>
+ [ 'and',
+ ['perm', '/vms/{vmid}', [ 'VM.Config.Disk' ]],
+ ['perm', '/storage/{storage}', [ 'Datastore.AllocateSpace' ]],
+ ],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid'),
+ disk => {
+ type => 'string',
+ description => "The disk you want to move.",
+ enum => [ PVE::QemuServer::disknames() ],
+ },
+ storage => get_standard_option('pve-storage-id', { description => "Target Storage." }),
+ 'format' => {
+ type => 'string',
+ description => "Target Format.",
+ enum => [ 'raw', 'qcow2', 'vmdk' ],
+ optional => 1,
+ },
+ delete => {
+ type => 'boolean',
+ description => "Delete the original disk after successful copy. By default the original disk is kept as unused disk.",
+ optional => 1,
+ default => 0,
+ },
+ digest => {
+ type => 'string',
+ description => 'Prevent changes if current configuration file has different SHA1 digest. This can be used to prevent concurrent modifications.',
+ maxLength => 40,
+ optional => 1,
+ },
+ },
+ },
+ returns => {
+ type => 'string',
+ description => "the task ID.",
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+
+ my $authuser = $rpcenv->get_user();
+
+ my $node = extract_param($param, 'node');
+
+ my $vmid = extract_param($param, 'vmid');
+
+ my $digest = extract_param($param, 'digest');
+
+ my $disk = extract_param($param, 'disk');
+
+ my $storeid = extract_param($param, 'storage');
+
+ my $format = extract_param($param, 'format');
+
+ my $storecfg = PVE::Storage::config();
+
+ my $updatefn = sub {
+
+ my $conf = PVE::QemuServer::load_config($vmid);
+
+ die "checksum missmatch (file change by other user?)\n"
+ if $digest && $digest ne $conf->{digest};
+
+ die "disk '$disk' does not exist\n" if !$conf->{$disk};
+
+ my $drive = PVE::QemuServer::parse_drive($disk, $conf->{$disk});
+
+ my $old_volid = $drive->{file} || die "disk '$disk' has no associated volume\n";
+
+ die "you can't move a cdrom\n" if PVE::QemuServer::drive_is_cdrom($drive);
+
+ my $oldfmt;
+ my ($oldstoreid, $oldvolname) = PVE::Storage::parse_volume_id($old_volid);
+ if ($oldvolname =~ m/\.(raw|qcow2|vmdk)$/){
+ $oldfmt = $1;
+ }
+
+ die "you can't move on the same storage with same format\n" if $oldstoreid eq $storeid &&
+ (!$format || !$oldfmt || $oldfmt eq $format);
+
+ PVE::Cluster::log_msg('info', $authuser, "move disk VM $vmid: move --disk $disk --storage $storeid");
+
+ my $running = PVE::QemuServer::check_running($vmid);
+
+ PVE::Storage::activate_volumes($storecfg, [ $drive->{file} ]);
+
+ my $realcmd = sub {
+
+ my $newvollist = [];
+
+ eval {
+ local $SIG{INT} = $SIG{TERM} = $SIG{QUIT} = $SIG{HUP} = sub { die "interrupted by signal\n"; };
+
+ my $newdrive = PVE::QemuServer::clone_disk($storecfg, $vmid, $running, $disk, $drive, undef,
+ $vmid, $storeid, $format, 1, $newvollist);
+
+ $conf->{$disk} = PVE::QemuServer::print_drive($vmid, $newdrive);
+
+ PVE::QemuServer::add_unused_volume($conf, $old_volid) if !$param->{delete};
+
+ PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
+
+ eval {
+ # try to deactivate volumes - avoid lvm LVs to be active on several nodes
+ PVE::Storage::deactivate_volumes($storecfg, [ $newdrive->{file} ])
+ if !$running;
+ };
+ warn $@ if $@;
+ };
+ if (my $err = $@) {
+
+ foreach my $volid (@$newvollist) {
+ eval { PVE::Storage::vdisk_free($storecfg, $volid); };
+ warn $@ if $@;
+ }
+ die "storage migration failed: $err";
+ }
+
+ if ($param->{delete}) {
+ my $used_paths = PVE::QemuServer::get_used_paths($vmid, $storecfg, $conf, 1, 1);
+ my $path = PVE::Storage::path($storecfg, $old_volid);
+ if ($used_paths->{$path}){
+ warn "volume $old_volid have snapshots. Can't delete it\n";
+ PVE::QemuServer::add_unused_volume($conf, $old_volid);
+ PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
+ } else {
+ eval { PVE::Storage::vdisk_free($storecfg, $old_volid); };
+ warn $@ if $@;
+ }
+ }
+ };
+
+ return $rpcenv->fork_worker('qmmove', $vmid, $authuser, $realcmd);
+ };
+
+ return PVE::QemuServer::lock_config($vmid, $updatefn);
}});
__PACKAGE__->register_method({
my $digest = extract_param($param, 'digest');
my $disk = extract_param($param, 'disk');
-
+
my $sizestr = extract_param($param, 'size');
my $skiplock = extract_param($param, 'skiplock');
die "you can't resize a cdrom\n" if PVE::QemuServer::drive_is_cdrom($drive);
- die "you can't online resize a virtio windows bootdisk\n"
+ die "you can't online resize a virtio windows bootdisk\n"
if PVE::QemuServer::check_running($vmid) && $conf->{bootdisk} eq $disk && $conf->{ostype} =~ m/^w/ && $disk =~ m/^virtio/;
my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid);
PVE::Cluster::log_msg('info', $authuser, "update VM $vmid: resize --disk $disk --size $sizestr");
PVE::QemuServer::qemu_block_resize($vmid, "drive-$disk", $storecfg, $volid, $newsize);
-
+
$drive->{size} = $newsize;
$conf->{$disk} = PVE::QemuServer::print_drive($vmid, $drive);
foreach my $name (keys %$snaphash) {
my $d = $snaphash->{$name};
- my $item = {
- name => $name,
- snaptime => $d->{snaptime} || 0,
+ my $item = {
+ name => $name,
+ snaptime => $d->{snaptime} || 0,
vmstate => $d->{vmstate} ? 1 : 0,
description => $d->{description} || '',
};
my $realcmd = sub {
PVE::Cluster::log_msg('info', $authuser, "snapshot VM $vmid: $snapname");
- PVE::QemuServer::snapshot_create($vmid, $snapname, $param->{vmstate},
+ PVE::QemuServer::snapshot_create($vmid, $snapname, $param->{vmstate},
$param->{freezefs}, $param->{description});
};
my $snap = $conf->{snapshots}->{$snapname};
- die "snapshot '$snapname' does not exist\n" if !defined($snap);
-
+ die "snapshot '$snapname' does not exist\n" if !defined($snap);
+
$snap->{description} = $param->{description} if defined($param->{description});
PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
my $snap = $conf->{snapshots}->{$snapname};
- die "snapshot '$snapname' does not exist\n" if !defined($snap);
-
+ die "snapshot '$snapname' does not exist\n" if !defined($snap);
+
return $snap;
}});
proxyto => 'node',
description => "Create a Template.",
permissions => {
- description => "You need 'VM.Allocate' permissions on /vms/{vmid} or on the VM pool /pool/{pool}.",
- check => [ 'or',
- [ 'perm', '/vms/{vmid}', ['VM.Allocate']],
- [ 'perm', '/pool/{pool}', ['VM.Allocate'], require_param => 'pool'],
- ],
+ description => "You need 'VM.Allocate' permissions on /vms/{vmid}",
+ check => [ 'perm', '/vms/{vmid}', ['VM.Allocate']],
},
parameters => {
additionalProperties => 0,
PVE::QemuServer::check_lock($conf);
- die "unable to create template, because VM contains snapshots\n"
+ die "unable to create template, because VM contains snapshots\n"
if $conf->{snapshots} && scalar(keys %{$conf->{snapshots}});
- die "you can't convert a template to a template\n"
+ die "you can't convert a template to a template\n"
if PVE::QemuServer::is_template($conf) && !$disk;
- die "you can't convert a VM to template if VM is running\n"
+ die "you can't convert a VM to template if VM is running\n"
if PVE::QemuServer::check_running($vmid);
my $realcmd = sub {
return undef;
}});
-
-
1;