use PVE::Cluster qw (cfs_read_file cfs_write_file);;
use PVE::SafeSyslog;
use PVE::Tools qw(extract_param);
-use PVE::Exception qw(raise raise_param_exc);
+use PVE::Exception qw(raise raise_param_exc raise_perm_exc);
use PVE::Storage;
use PVE::JSONSchema qw(get_standard_option);
use PVE::RESTHandler;
use PVE::RPCEnvironment;
use PVE::AccessControl;
use PVE::INotify;
+use PVE::Network;
use Data::Dumper; # fixme: remove
});
};
+my $check_storage_access_clone = sub {
+ my ($rpcenv, $authuser, $storecfg, $conf, $storage) = @_;
+
+ my $sharedvm = 1;
+
+ PVE::QemuServer::foreach_drive($conf, sub {
+ my ($ds, $drive) = @_;
+
+ my $isCDROM = PVE::QemuServer::drive_is_cdrom($drive);
+
+ my $volid = $drive->{file};
+
+ return if !$volid || $volid eq 'none';
+
+ if ($isCDROM) {
+ if ($volid eq 'cdrom') {
+ $rpcenv->check($authuser, "/", ['Sys.Console']);
+ } else {
+ # we simply allow access
+ my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
+ my $scfg = PVE::Storage::storage_config($storecfg, $sid);
+ $sharedvm = 0 if !$scfg->{shared};
+
+ }
+ } else {
+ my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
+ my $scfg = PVE::Storage::storage_config($storecfg, $sid);
+ $sharedvm = 0 if !$scfg->{shared};
+
+ $sid = $storage if $storage;
+ $rpcenv->check($authuser, "/storage/$sid", ['Datastore.AllocateSpace']);
+ }
+ });
+
+ return $sharedvm;
+};
+
# Note: $pool is only needed when creating a VM, because pool permissions
# are automatically inherited if VM already exists inside a pool.
my $create_disks = sub {
my $volid = $disk->{file};
if (!$volid || $volid eq 'none' || $volid eq 'cdrom') {
- $res->{$ds} = $settings->{$ds};
+ delete $disk->{size};
+ $res->{$ds} = PVE::QemuServer::print_drive($vmid, $disk);
} elsif ($volid =~ m/^(([^:\s]+):)?(\d+(\.\d+)?)$/) {
my ($storeid, $size) = ($2 || $default_storage, $3);
die "no storage ID specified (and no default storage)\n" if !$storeid;
} else {
my $path = $rpcenv->check_volume_access($authuser, $storecfg, $vmid, $volid);
-
- my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
- my $foundvolid = undef;
+ my $volid_is_new = 1;
- if ($storeid) {
- PVE::Storage::activate_volumes($storecfg, [ $volid ]);
- my $dl = PVE::Storage::vdisk_list($storecfg, $storeid, undef);
+ if ($conf->{$ds}) {
+ my $olddrive = PVE::QemuServer::parse_drive($ds, $conf->{$ds});
+ $volid_is_new = undef if $olddrive->{file} && $olddrive->{file} eq $volid;
+ }
- PVE::Storage::foreach_volid($dl, sub {
- my ($volumeid) = @_;
- if($volumeid eq $volid) {
- $foundvolid = 1;
- return;
- }
- });
+ if ($volid_is_new) {
+
+ my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
+
+ PVE::Storage::activate_volumes($storecfg, [ $volid ]) if $storeid;
+
+ my $size = PVE::Storage::volume_size_info($storecfg, $volid);
+
+ die "volume $volid does not exists\n" if !$size;
+
+ $disk->{size} = $size;
}
-
- die "image '$path' does not exists\n" if (!(-f $path || -b $path || $foundvolid));
- my ($size) = PVE::Storage::volume_size_info($storecfg, $volid, 1);
- $disk->{size} = $size;
$res->{$ds} = PVE::QemuServer::print_drive($vmid, $disk);
}
});
next if PVE::QemuServer::valid_drivename($opt);
if ($opt eq 'sockets' || $opt eq 'cores' ||
- $opt eq 'cpu' || $opt eq 'smp' ||
+ $opt eq 'cpu' || $opt eq 'smp' ||
$opt eq 'cpulimit' || $opt eq 'cpuunits') {
$rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.CPU']);
} elsif ($opt eq 'boot' || $opt eq 'bootdisk') {
$rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.Disk']);
- } elsif ($opt eq 'memory' || $opt eq 'balloon') {
+ } elsif ($opt eq 'memory' || $opt eq 'balloon' || $opt eq 'shares') {
$rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.Memory']);
} elsif ($opt eq 'args' || $opt eq 'lock') {
die "only root can set '$opt' config\n";
- } elsif ($opt eq 'cpu' || $opt eq 'kvm' || $opt eq 'acpi' ||
+ } elsif ($opt eq 'cpu' || $opt eq 'kvm' || $opt eq 'acpi' || $opt eq 'machine' ||
$opt eq 'vga' || $opt eq 'watchdog' || $opt eq 'tablet') {
$rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.HWType']);
} elsif ($opt =~ m/^net\d+$/) {
return $res;
}});
+
+
__PACKAGE__->register_method({
name => 'create_vm',
path => '',
method => 'POST',
description => "Create or restore a virtual machine.",
permissions => {
- description => "You need 'VM.Allocate' permissions on /vms/{vmid} or on the VM pool /pool/{pool}. If you create disks you need 'Datastore.AllocateSpace' on any used storage.",
- check => [ 'or',
- [ 'perm', '/vms/{vmid}', ['VM.Allocate']],
- [ 'perm', '/pool/{pool}', ['VM.Allocate'], require_param => 'pool'],
- ],
+ description => "You need 'VM.Allocate' permissions on /vms/{vmid} or on the VM pool /pool/{pool}. " .
+ "For restore (option 'archive'), it is enough if the user has 'VM.Backup' permission and the VM already exists. " .
+ "If you create disks you need 'Datastore.AllocateSpace' on any used storage.",
+ user => 'all', # check inside
},
protected => 1,
proxyto => 'node',
description => "Assign a unique random ethernet address.",
requires => 'archive',
},
- pool => {
+ pool => {
optional => 1,
type => 'string', format => 'pve-poolid',
description => "Add the VM to the specified pool.",
my $force = extract_param($param, 'force');
my $unique = extract_param($param, 'unique');
-
+
my $pool = extract_param($param, 'pool');
my $filename = PVE::QemuServer::config_file($vmid);
if (defined($pool)) {
$rpcenv->check_pool_exist($pool);
- }
+ }
$rpcenv->check($authuser, "/storage/$storage", ['Datastore.AllocateSpace'])
if defined($storage);
+ if ($rpcenv->check($authuser, "/vms/$vmid", ['VM.Allocate'], 1)) {
+ # OK
+ } elsif ($pool && $rpcenv->check($authuser, "/pool/$pool", ['VM.Allocate'], 1)) {
+ # OK
+ } elsif ($archive && $force && (-f $filename) &&
+ $rpcenv->check($authuser, "/vms/$vmid", ['VM.Backup'], 1)) {
+ # OK: user has VM.Backup permissions, and want to restore an existing VM
+ } else {
+ raise_perm_exc();
+ }
+
if (!$archive) {
&$resolve_cdrom_alias($param);
}
}
- my $addVMtoPoolFn = sub {
- my $usercfg = cfs_read_file("user.cfg");
- if (my $data = $usercfg->{pools}->{$pool}) {
- $data->{vms}->{$vmid} = 1;
- $usercfg->{vms}->{$vmid} = $pool;
- cfs_write_file("user.cfg", $usercfg);
- }
- };
-
my $restorefn = sub {
+ # fixme: this test does not work if VM exists on other node!
if (-f $filename) {
die "unable to restore vm $vmid: config file already exists\n"
if !$force;
die "unable to restore vm $vmid: vm is running\n"
if PVE::QemuServer::check_running($vmid);
-
- # destroy existing data - keep empty config
- PVE::QemuServer::destroy_vm($storecfg, $vmid, 1);
}
my $realcmd = sub {
pool => $pool,
unique => $unique });
- PVE::AccessControl::lock_user_config($addVMtoPoolFn, "can't add VM to pool") if $pool;
+ PVE::AccessControl::add_vm_to_pool($vmid, $pool) if $pool;
};
return $rpcenv->fork_worker('qmrestore', $vmid, $authuser, $realcmd);
die "create failed - $err";
}
- PVE::AccessControl::lock_user_config($addVMtoPoolFn, "can't add VM to pool") if $pool;
+ PVE::AccessControl::add_vm_to_pool($vmid, $pool) if $pool;
};
return $rpcenv->fork_worker('qmcreate', $vmid, $authuser, $realcmd);
{ subdir => 'vncproxy' },
{ subdir => 'migrate' },
{ subdir => 'resize' },
+ { subdir => 'move' },
{ subdir => 'rrd' },
{ subdir => 'rrddata' },
{ subdir => 'monitor' },
{ subdir => 'snapshot' },
+ { subdir => 'spiceproxy' },
];
return $res;
if (!PVE::QemuServer::drive_is_cdrom($drive)) {
my $volid = $drive->{file};
+
if (&$vm_is_volid_owner($storecfg, $vmid, $volid)) {
if ($force || $key =~ m/^unused/) {
- eval { PVE::Storage::vdisk_free($storecfg, $volid); };
+ eval {
+ # check if the disk is really unused
+ my $used_paths = PVE::QemuServer::get_used_paths($vmid, $storecfg, $conf, 1, $key);
+ my $path = PVE::Storage::path($storecfg, $volid);
+
+ die "unable to delete '$volid' - volume is still in use (snapshot?)\n"
+ if $used_paths->{$path};
+
+ PVE::Storage::vdisk_free($storecfg, $volid);
+ };
die $@ if $@;
} else {
PVE::QemuServer::add_unused_volume($conf, $volid, $vmid);
$rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.Disk']);
my $drive = PVE::QemuServer::parse_drive($opt, $conf->{$opt});
- if (my $sid = &$test_deallocate_drive($storecfg, $vmid, $opt, $drive, $force)) {
+ if (my $sid = &$test_deallocate_drive($storecfg, $vmid, $opt, $drive, $force)) {
$rpcenv->check($authuser, "/storage/$sid", ['Datastore.Allocate']);
}
}
-
- die "error hot-unplug $opt" if !PVE::QemuServer::vm_deviceunplug($vmid, $conf, $opt);
+
+ my $unplugwarning = "";
+ if($conf->{ostype} && $conf->{ostype} eq 'l26'){
+ $unplugwarning = "<br>verify that you have acpiphp && pci_hotplug modules loaded in your guest VM";
+ }elsif($conf->{ostype} && $conf->{ostype} eq 'l24'){
+ $unplugwarning = "<br>kernel 2.4 don't support hotplug, please disable hotplug in options";
+ }elsif(!$conf->{ostype} || ($conf->{ostype} && $conf->{ostype} eq 'other')){
+ $unplugwarning = "<br>verify that your guest support acpi hotplug";
+ }
+
+ if($opt eq 'tablet'){
+ PVE::QemuServer::vm_deviceplug(undef, $conf, $vmid, $opt);
+ }else{
+ die "error hot-unplug $opt $unplugwarning" if !PVE::QemuServer::vm_deviceunplug($vmid, $conf, $opt);
+ }
if ($isDisk) {
my $drive = PVE::QemuServer::parse_drive($opt, $conf->{$opt});
my $safe_num_ne = sub {
my ($a, $b) = @_;
- return 0 if !defined($a) && !defined($b);
- return 1 if !defined($a);
- return 1 if !defined($b);
+ return 0 if !defined($a) && !defined($b);
+ return 1 if !defined($a);
+ return 1 if !defined($b);
return $a != $b;
};
&$safe_num_ne($drive->{iops}, $old_drive->{iops}) ||
&$safe_num_ne($drive->{iops_rd}, $old_drive->{iops_rd}) ||
&$safe_num_ne($drive->{iops_wr}, $old_drive->{iops_wr})) {
- PVE::QemuServer::qemu_block_set_io_throttle($vmid,"drive-$opt", $drive->{mbps}*1024*1024,
- $drive->{mbps_rd}*1024*1024, $drive->{mbps_wr}*1024*1024,
- $drive->{iops}, $drive->{iops_rd}, $drive->{iops_wr})
+ PVE::QemuServer::qemu_block_set_io_throttle($vmid,"drive-$opt",
+ ($drive->{mbps} || 0)*1024*1024,
+ ($drive->{mbps_rd} || 0)*1024*1024,
+ ($drive->{mbps_wr} || 0)*1024*1024,
+ $drive->{iops} || 0,
+ $drive->{iops_rd} || 0,
+ $drive->{iops_wr} || 0)
if !PVE::QemuServer::drive_is_cdrom($drive);
}
}
my $vmconfig_update_net = sub {
my ($rpcenv, $authuser, $conf, $storecfg, $vmid, $opt, $value) = @_;
- if ($conf->{$opt}) {
- #if online update, then unplug first
- die "error hot-unplug $opt for update" if !PVE::QemuServer::vm_deviceunplug($vmid, $conf, $opt);
- }
+ if ($conf->{$opt} && PVE::QemuServer::check_running($vmid)) {
+ my $oldnet = PVE::QemuServer::parse_net($conf->{$opt});
+ my $newnet = PVE::QemuServer::parse_net($value);
+
+ if($oldnet->{model} ne $newnet->{model}){
+ #if model change, we try to hot-unplug
+ die "error hot-unplug $opt for update" if !PVE::QemuServer::vm_deviceunplug($vmid, $conf, $opt);
+ }else{
+
+ if($newnet->{bridge} && $oldnet->{bridge}){
+ my $iface = "tap".$vmid."i".$1 if $opt =~ m/net(\d+)/;
+
+ if($newnet->{rate} ne $oldnet->{rate}){
+ PVE::Network::tap_rate_limit($iface, $newnet->{rate});
+ }
+
+ if(($newnet->{bridge} ne $oldnet->{bridge}) || ($newnet->{tag} ne $oldnet->{tag})){
+ eval{PVE::Network::tap_unplug($iface, $oldnet->{bridge}, $oldnet->{tag});};
+ PVE::Network::tap_plug($iface, $newnet->{bridge}, $newnet->{tag});
+ }
+ }else{
+ #if bridge/nat mode change, we try to hot-unplug
+ die "error hot-unplug $opt for update" if !PVE::QemuServer::vm_deviceunplug($vmid, $conf, $opt);
+ }
+ }
+
+ }
$conf->{$opt} = $value;
PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
$conf = PVE::QemuServer::load_config($vmid); # update/reload
die "error hotplug $opt" if !PVE::QemuServer::vm_deviceplug($storecfg, $conf, $vmid, $opt, $net);
};
-my $vm_config_perm_list = [
- 'VM.Config.Disk',
- 'VM.Config.CDROM',
- 'VM.Config.CPU',
- 'VM.Config.Memory',
- 'VM.Config.Network',
- 'VM.Config.HWType',
- 'VM.Config.Options',
- ];
+# POST/PUT {vmid}/config implementation
+#
+# The original API used PUT (idempotent) an we assumed that all operations
+# are fast. But it turned out that almost any configuration change can
+# involve hot-plug actions, or disk alloc/free. Such actions can take long
+# time to complete and have side effects (not idempotent).
+#
+# The new implementation uses POST and forks a worker process. We added
+# a new option 'background_delay'. If specified we wait up to
+# 'background_delay' second for the worker task to complete. It returns null
+# if the task is finished within that time, else we return the UPID.
-__PACKAGE__->register_method({
- name => 'update_vm',
- path => '{vmid}/config',
- method => 'PUT',
- protected => 1,
- proxyto => 'node',
- description => "Set virtual machine options.",
- permissions => {
- check => ['perm', '/vms/{vmid}', $vm_config_perm_list, any => 1],
- },
- parameters => {
- additionalProperties => 0,
- properties => PVE::QemuServer::json_config_properties(
- {
- node => get_standard_option('pve-node'),
- vmid => get_standard_option('pve-vmid'),
- skiplock => get_standard_option('skiplock'),
- delete => {
- type => 'string', format => 'pve-configid-list',
- description => "A list of settings you want to delete.",
- optional => 1,
- },
- force => {
- type => 'boolean',
- description => $opt_force_description,
- optional => 1,
- requires => 'delete',
- },
- digest => {
- type => 'string',
- description => 'Prevent changes if current configuration file has different SHA1 digest. This can be used to prevent concurrent modifications.',
- maxLength => 40,
- optional => 1,
- }
- }),
- },
- returns => { type => 'null'},
- code => sub {
- my ($param) = @_;
+my $update_vm_api = sub {
+ my ($param, $sync) = @_;
- my $rpcenv = PVE::RPCEnvironment::get();
+ my $rpcenv = PVE::RPCEnvironment::get();
- my $authuser = $rpcenv->get_user();
+ my $authuser = $rpcenv->get_user();
- my $node = extract_param($param, 'node');
+ my $node = extract_param($param, 'node');
- my $vmid = extract_param($param, 'vmid');
+ my $vmid = extract_param($param, 'vmid');
- my $digest = extract_param($param, 'digest');
+ my $digest = extract_param($param, 'digest');
- my @paramarr = (); # used for log message
- foreach my $key (keys %$param) {
- push @paramarr, "-$key", $param->{$key};
- }
+ my $background_delay = extract_param($param, 'background_delay');
- my $skiplock = extract_param($param, 'skiplock');
- raise_param_exc({ skiplock => "Only root may use this option." })
- if $skiplock && $authuser ne 'root@pam';
+ my @paramarr = (); # used for log message
+ foreach my $key (keys %$param) {
+ push @paramarr, "-$key", $param->{$key};
+ }
- my $delete_str = extract_param($param, 'delete');
+ my $skiplock = extract_param($param, 'skiplock');
+ raise_param_exc({ skiplock => "Only root may use this option." })
+ if $skiplock && $authuser ne 'root@pam';
- my $force = extract_param($param, 'force');
+ my $delete_str = extract_param($param, 'delete');
- die "no options specified\n" if !$delete_str && !scalar(keys %$param);
+ my $force = extract_param($param, 'force');
- my $storecfg = PVE::Storage::config();
+ die "no options specified\n" if !$delete_str && !scalar(keys %$param);
- &$resolve_cdrom_alias($param);
+ my $storecfg = PVE::Storage::config();
- # now try to verify all parameters
+ my $defaults = PVE::QemuServer::load_defaults();
- my @delete = ();
- foreach my $opt (PVE::Tools::split_list($delete_str)) {
- $opt = 'ide2' if $opt eq 'cdrom';
- raise_param_exc({ delete => "you can't use '-$opt' and " .
- "-delete $opt' at the same time" })
- if defined($param->{$opt});
+ &$resolve_cdrom_alias($param);
- if (!PVE::QemuServer::option_exists($opt)) {
- raise_param_exc({ delete => "unknown option '$opt'" });
- }
+ # now try to verify all parameters
+
+ my @delete = ();
+ foreach my $opt (PVE::Tools::split_list($delete_str)) {
+ $opt = 'ide2' if $opt eq 'cdrom';
+ raise_param_exc({ delete => "you can't use '-$opt' and " .
+ "-delete $opt' at the same time" })
+ if defined($param->{$opt});
- push @delete, $opt;
+ if (!PVE::QemuServer::option_exists($opt)) {
+ raise_param_exc({ delete => "unknown option '$opt'" });
}
- foreach my $opt (keys %$param) {
- if (PVE::QemuServer::valid_drivename($opt)) {
- # cleanup drive path
- my $drive = PVE::QemuServer::parse_drive($opt, $param->{$opt});
- PVE::QemuServer::cleanup_drive_path($opt, $storecfg, $drive);
- $param->{$opt} = PVE::QemuServer::print_drive($vmid, $drive);
- } elsif ($opt =~ m/^net(\d+)$/) {
- # add macaddr
- my $net = PVE::QemuServer::parse_net($param->{$opt});
- $param->{$opt} = PVE::QemuServer::print_net($net);
- }
+ push @delete, $opt;
+ }
+
+ foreach my $opt (keys %$param) {
+ if (PVE::QemuServer::valid_drivename($opt)) {
+ # cleanup drive path
+ my $drive = PVE::QemuServer::parse_drive($opt, $param->{$opt});
+ PVE::QemuServer::cleanup_drive_path($opt, $storecfg, $drive);
+ $param->{$opt} = PVE::QemuServer::print_drive($vmid, $drive);
+ } elsif ($opt =~ m/^net(\d+)$/) {
+ # add macaddr
+ my $net = PVE::QemuServer::parse_net($param->{$opt});
+ $param->{$opt} = PVE::QemuServer::print_net($net);
}
+ }
- &$check_vm_modify_config_perm($rpcenv, $authuser, $vmid, undef, [@delete]);
+ &$check_vm_modify_config_perm($rpcenv, $authuser, $vmid, undef, [@delete]);
- &$check_vm_modify_config_perm($rpcenv, $authuser, $vmid, undef, [keys %$param]);
+ &$check_vm_modify_config_perm($rpcenv, $authuser, $vmid, undef, [keys %$param]);
- &$check_storage_access($rpcenv, $authuser, $storecfg, $vmid, $param);
+ &$check_storage_access($rpcenv, $authuser, $storecfg, $vmid, $param);
- my $updatefn = sub {
+ my $updatefn = sub {
- my $conf = PVE::QemuServer::load_config($vmid);
+ my $conf = PVE::QemuServer::load_config($vmid);
- die "checksum missmatch (file change by other user?)\n"
- if $digest && $digest ne $conf->{digest};
+ die "checksum missmatch (file change by other user?)\n"
+ if $digest && $digest ne $conf->{digest};
+
+ PVE::QemuServer::check_lock($conf) if !$skiplock;
+
+ if ($param->{memory} || defined($param->{balloon})) {
+ my $maxmem = $param->{memory} || $conf->{memory} || $defaults->{memory};
+ my $balloon = defined($param->{balloon}) ? $param->{balloon} : $conf->{balloon};
+
+ die "balloon value too large (must be smaller than assigned memory)\n"
+ if $balloon && $balloon > $maxmem;
+ }
+
+ PVE::Cluster::log_msg('info', $authuser, "update VM $vmid: " . join (' ', @paramarr));
- PVE::QemuServer::check_lock($conf) if !$skiplock;
+ my $worker = sub {
- PVE::Cluster::log_msg('info', $authuser, "update VM $vmid: " . join (' ', @paramarr));
+ print "update VM $vmid: " . join (' ', @paramarr) . "\n";
foreach my $opt (@delete) { # delete
$conf = PVE::QemuServer::load_config($vmid); # update/reload
&$vmconfig_delete_option($rpcenv, $authuser, $conf, $storecfg, $vmid, $opt, $force);
}
+ my $running = PVE::QemuServer::check_running($vmid);
+
foreach my $opt (keys %$param) { # add/change
$conf = PVE::QemuServer::load_config($vmid); # update/reload
if (PVE::QemuServer::valid_drivename($opt)) {
- &$vmconfig_update_disk($rpcenv, $authuser, $conf, $storecfg, $vmid,
+ &$vmconfig_update_disk($rpcenv, $authuser, $conf, $storecfg, $vmid,
$opt, $param->{$opt}, $force);
-
+
} elsif ($opt =~ m/^net(\d+)$/) { #nics
- &$vmconfig_update_net($rpcenv, $authuser, $conf, $storecfg, $vmid,
+ &$vmconfig_update_net($rpcenv, $authuser, $conf, $storecfg, $vmid,
$opt, $param->{$opt});
} else {
+ if($opt eq 'tablet' && $param->{$opt} == 1){
+ PVE::QemuServer::vm_deviceplug(undef, $conf, $vmid, $opt);
+ } elsif($opt eq 'tablet' && $param->{$opt} == 0){
+ PVE::QemuServer::vm_deviceunplug($vmid, $conf, $opt);
+ }
+
$conf->{$opt} = $param->{$opt};
PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
}
}
+
+ # allow manual ballooning if shares is set to zero
+ if ($running && defined($param->{balloon}) &&
+ defined($conf->{shares}) && ($conf->{shares} == 0)) {
+ my $balloon = $param->{'balloon'} || $conf->{memory} || $defaults->{memory};
+ PVE::QemuServer::vm_mon_cmd($vmid, "balloon", value => $balloon*1024*1024);
+ }
};
- PVE::QemuServer::lock_config($vmid, $updatefn);
+ if ($sync) {
+ &$worker();
+ return undef;
+ } else {
+ my $upid = $rpcenv->fork_worker('qmconfig', $vmid, $authuser, $worker);
+
+ if ($background_delay) {
+
+ # Note: It would be better to do that in the Event based HTTPServer
+ # to avoid blocking call to sleep.
+
+ my $end_time = time() + $background_delay;
+
+ my $task = PVE::Tools::upid_decode($upid);
+
+ my $running = 1;
+ while (time() < $end_time) {
+ $running = PVE::ProcFSTools::check_process_running($task->{pid}, $task->{pstart});
+ last if !$running;
+ sleep(1); # this gets interrupted when child process ends
+ }
+
+ if (!$running) {
+ my $status = PVE::Tools::upid_read_status($upid);
+ return undef if $status eq 'OK';
+ die $status;
+ }
+ }
+
+ return $upid;
+ }
+ };
+
+ return PVE::QemuServer::lock_config($vmid, $updatefn);
+};
+
+my $vm_config_perm_list = [
+ 'VM.Config.Disk',
+ 'VM.Config.CDROM',
+ 'VM.Config.CPU',
+ 'VM.Config.Memory',
+ 'VM.Config.Network',
+ 'VM.Config.HWType',
+ 'VM.Config.Options',
+ ];
+
+__PACKAGE__->register_method({
+ name => 'update_vm_async',
+ path => '{vmid}/config',
+ method => 'POST',
+ protected => 1,
+ proxyto => 'node',
+ description => "Set virtual machine options (asynchrounous API).",
+ permissions => {
+ check => ['perm', '/vms/{vmid}', $vm_config_perm_list, any => 1],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => PVE::QemuServer::json_config_properties(
+ {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid'),
+ skiplock => get_standard_option('skiplock'),
+ delete => {
+ type => 'string', format => 'pve-configid-list',
+ description => "A list of settings you want to delete.",
+ optional => 1,
+ },
+ force => {
+ type => 'boolean',
+ description => $opt_force_description,
+ optional => 1,
+ requires => 'delete',
+ },
+ digest => {
+ type => 'string',
+ description => 'Prevent changes if current configuration file has different SHA1 digest. This can be used to prevent concurrent modifications.',
+ maxLength => 40,
+ optional => 1,
+ },
+ background_delay => {
+ type => 'integer',
+ description => "Time to wait for the task to finish. We return 'null' if the task finish within that time.",
+ minimum => 1,
+ maximum => 30,
+ optional => 1,
+ },
+ }),
+ },
+ returns => {
+ type => 'string',
+ optional => 1,
+ },
+ code => $update_vm_api,
+});
+__PACKAGE__->register_method({
+ name => 'update_vm',
+ path => '{vmid}/config',
+ method => 'PUT',
+ protected => 1,
+ proxyto => 'node',
+ description => "Set virtual machine options (synchrounous API) - You should consider using the POST method instead for any actions involving hotplug or storage allocation.",
+ permissions => {
+ check => ['perm', '/vms/{vmid}', $vm_config_perm_list, any => 1],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => PVE::QemuServer::json_config_properties(
+ {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid'),
+ skiplock => get_standard_option('skiplock'),
+ delete => {
+ type => 'string', format => 'pve-configid-list',
+ description => "A list of settings you want to delete.",
+ optional => 1,
+ },
+ force => {
+ type => 'boolean',
+ description => $opt_force_description,
+ optional => 1,
+ requires => 'delete',
+ },
+ digest => {
+ type => 'string',
+ description => 'Prevent changes if current configuration file has different SHA1 digest. This can be used to prevent concurrent modifications.',
+ maxLength => 40,
+ optional => 1,
+ },
+ }),
+ },
+ returns => { type => 'null' },
+ code => sub {
+ my ($param) = @_;
+ &$update_vm_api($param, 1);
return undef;
- }});
+ }
+});
__PACKAGE__->register_method({
my $storecfg = PVE::Storage::config();
- my $delVMfromPoolFn = sub {
+ my $delVMfromPoolFn = sub {
my $usercfg = cfs_read_file("user.cfg");
if (my $pool = $usercfg->{vms}->{$vmid}) {
if (my $data = $usercfg->{pools}->{$pool}) {
PVE::QemuServer::vm_destroy($storecfg, $vmid, $skiplock);
- PVE::AccessControl::lock_user_config($delVMfromPoolFn, "pool cleanup failed");
+ PVE::AccessControl::remove_vm_from_pool($vmid);
};
return $rpcenv->fork_worker('qmdestroy', $vmid, $authuser, $realcmd);
$remip = PVE::Cluster::remote_node_ip($node);
}
- # NOTE: kvm VNC traffic is already TLS encrypted,
- # so we select the fastest chipher here (or 'none'?)
- my $remcmd = $remip ? ['/usr/bin/ssh', '-T', '-o', 'BatchMode=yes',
- '-c', 'blowfish-cbc', $remip] : [];
+ # NOTE: kvm VNC traffic is already TLS encrypted
+ my $remcmd = $remip ? ['/usr/bin/ssh', '-T', '-o', 'BatchMode=yes', $remip] : [];
my $timeout = 10;
my $upid = $rpcenv->fork_worker('vncproxy', $vmid, $authuser, $realcmd);
+ PVE::Tools::wait_for_vnc_port($port);
+
return {
user => $authuser,
ticket => $ticket,
}});
__PACKAGE__->register_method({
- name => 'vmcmdidx',
- path => '{vmid}/status',
+ name => 'spiceproxy',
+ path => '{vmid}/spiceproxy',
method => 'GET',
- proxyto => 'node',
- description => "Directory index",
+ protected => 1,
+ proxyto => 'node', # fixme: use direct connections or ssh tunnel?
permissions => {
- user => 'all',
+ check => ['perm', '/vms/{vmid}', [ 'VM.Console' ]],
},
+ description => "Returns a SPICE configuration to connect to the VM.",
parameters => {
additionalProperties => 0,
properties => {
},
},
returns => {
- type => 'array',
- items => {
- type => "object",
- properties => {
- subdir => { type => 'string' },
- },
+ additionalProperties => 1,
+ properties => {
+ type => { type => 'string' },
+ password => { type => 'string' },
+ proxy => { type => 'string' },
+ host => { type => 'string' },
+ port => { type => 'integer' },
},
- links => [ { rel => 'child', href => "{subdir}" } ],
},
code => sub {
my ($param) = @_;
- # test if VM exists
- my $conf = PVE::QemuServer::load_config($param->{vmid});
+ my $rpcenv = PVE::RPCEnvironment::get();
- my $res = [
+ my $authuser = $rpcenv->get_user();
+
+ my $vmid = $param->{vmid};
+ my $node = $param->{node};
+
+ my $remip;
+
+ # Note: we currectly use "proxyto => 'node'", so this code will never trigger
+ if ($node ne 'localhost' && $node ne PVE::INotify::nodename()) {
+ $remip = PVE::Cluster::remote_node_ip($node);
+ }
+
+ my ($ticket, $proxyticket) = PVE::AccessControl::assemble_spice_ticket($authuser, $vmid, $node);
+
+ my $timeout = 10;
+
+ # Note: this only works if VM is on local node
+ PVE::QemuServer::vm_mon_cmd($vmid, "set_password", protocol => 'spice', password => $ticket);
+ PVE::QemuServer::vm_mon_cmd($vmid, "expire_password", protocol => 'spice', time => "+30");
+
+ # allow access for group www-data to the spice socket,
+ # so that spiceproxy can access it
+ my $socket = PVE::QemuServer::spice_socket($vmid);
+ my $gid = getgrnam('www-data') || die "getgrnam failed - $!\n";
+ chown 0, $gid, $socket;
+ chmod 0770, $socket;
+
+ # fimxe: ??
+ my $host = `hostname -f` || PVE::INotify::nodename();
+ chomp $host;
+
+ return {
+ type => 'spice',
+ host => $proxyticket,
+ proxy => $host,
+ port => 0, # not used for now
+ password => $ticket
+ };
+ }});
+
+__PACKAGE__->register_method({
+ name => 'vmcmdidx',
+ path => '{vmid}/status',
+ method => 'GET',
+ proxyto => 'node',
+ description => "Directory index",
+ permissions => {
+ user => 'all',
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid'),
+ },
+ },
+ returns => {
+ type => 'array',
+ items => {
+ type => "object",
+ properties => {
+ subdir => { type => 'string' },
+ },
+ },
+ links => [ { rel => 'child', href => "{subdir}" } ],
+ },
+ code => sub {
+ my ($param) = @_;
+
+ # test if VM exists
+ my $conf = PVE::QemuServer::load_config($param->{vmid});
+
+ my $res = [
{ subdir => 'current' },
{ subdir => 'start' },
{ subdir => 'stop' },
my $cc = PVE::Cluster::cfs_read_file('cluster.conf');
if (PVE::Cluster::cluster_conf_lookup_pvevm($cc, 0, $vmid, 1)) {
return 1;
- }
+ }
return 0;
};
$status->{ha} = &$vm_is_ha_managed($param->{vmid});
+ if ($conf->{vga} && ($conf->{vga} eq 'qxl')) {
+ $status->{spice} = 1;
+ }
+
return $status;
}});
skiplock => get_standard_option('skiplock'),
stateuri => get_standard_option('pve-qm-stateuri'),
migratedfrom => get_standard_option('pve-node',{ optional => 1 }),
-
+ machine => get_standard_option('pve-qm-machine'),
},
},
returns => {
my $vmid = extract_param($param, 'vmid');
+ my $machine = extract_param($param, 'machine');
+
my $stateuri = extract_param($param, 'stateuri');
raise_param_exc({ stateuri => "Only root may use this option." })
if $stateuri && $authuser ne 'root@pam';
syslog('info', "start VM $vmid: $upid\n");
- PVE::QemuServer::vm_start($storecfg, $vmid, $stateuri, $skiplock, $migratedfrom);
+ PVE::QemuServer::vm_start($storecfg, $vmid, $stateuri, $skiplock, $migratedfrom, undef, $machine);
return;
};
return;
}});
+__PACKAGE__->register_method({
+ name => 'vm_feature',
+ path => '{vmid}/feature',
+ method => 'GET',
+ proxyto => 'node',
+ protected => 1,
+ description => "Check if feature for virtual machine is available.",
+ permissions => {
+ check => ['perm', '/vms/{vmid}', [ 'VM.Audit' ]],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid'),
+ feature => {
+ description => "Feature to check.",
+ type => 'string',
+ enum => [ 'snapshot', 'clone', 'copy' ],
+ },
+ snapname => get_standard_option('pve-snapshot-name', {
+ optional => 1,
+ }),
+ },
+ },
+ returns => {
+ type => "object",
+ properties => {
+ hasFeature => { type => 'boolean' },
+ nodes => {
+ type => 'array',
+ items => { type => 'string' },
+ }
+ },
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $node = extract_param($param, 'node');
+
+ my $vmid = extract_param($param, 'vmid');
+
+ my $snapname = extract_param($param, 'snapname');
+
+ my $feature = extract_param($param, 'feature');
+
+ my $running = PVE::QemuServer::check_running($vmid);
+
+ my $conf = PVE::QemuServer::load_config($vmid);
+
+ if($snapname){
+ my $snap = $conf->{snapshots}->{$snapname};
+ die "snapshot '$snapname' does not exist\n" if !defined($snap);
+ $conf = $snap;
+ }
+ my $storecfg = PVE::Storage::config();
+
+ my $nodelist = PVE::QemuServer::shared_nodes($conf, $storecfg);
+ my $hasFeature = PVE::QemuServer::has_feature($feature, $conf, $storecfg, $snapname, $running);
+
+ return {
+ hasFeature => $hasFeature,
+ nodes => [ keys %$nodelist ],
+ };
+ }});
+
+__PACKAGE__->register_method({
+ name => 'clone_vm',
+ path => '{vmid}/clone',
+ method => 'POST',
+ protected => 1,
+ proxyto => 'node',
+ description => "Create a copy of virtual machine/template.",
+ permissions => {
+ description => "You need 'VM.Clone' permissions on /vms/{vmid}, and 'VM.Allocate' permissions " .
+ "on /vms/{newid} (or on the VM pool /pool/{pool}). You also need " .
+ "'Datastore.AllocateSpace' on any used storage.",
+ check =>
+ [ 'and',
+ ['perm', '/vms/{vmid}', [ 'VM.Clone' ]],
+ [ 'or',
+ [ 'perm', '/vms/{newid}', ['VM.Allocate']],
+ [ 'perm', '/pool/{pool}', ['VM.Allocate'], require_param => 'pool'],
+ ],
+ ]
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid'),
+ newid => get_standard_option('pve-vmid', { description => 'VMID for the clone.' }),
+ name => {
+ optional => 1,
+ type => 'string', format => 'dns-name',
+ description => "Set a name for the new VM.",
+ },
+ description => {
+ optional => 1,
+ type => 'string',
+ description => "Description for the new VM.",
+ },
+ pool => {
+ optional => 1,
+ type => 'string', format => 'pve-poolid',
+ description => "Add the new VM to the specified pool.",
+ },
+ snapname => get_standard_option('pve-snapshot-name', {
+ requires => 'full',
+ optional => 1,
+ }),
+ storage => get_standard_option('pve-storage-id', {
+ description => "Target storage for full clone.",
+ requires => 'full',
+ optional => 1,
+ }),
+ 'format' => {
+ description => "Target format for file storage.",
+ requires => 'full',
+ type => 'string',
+ optional => 1,
+ enum => [ 'raw', 'qcow2', 'vmdk'],
+ },
+ full => {
+ optional => 1,
+ type => 'boolean',
+ description => "Create a full copy of all disk. This is always done when " .
+ "you clone a normal VM. For VM templates, we try to create a linked clone by default.",
+ default => 0,
+ },
+ target => get_standard_option('pve-node', {
+ description => "Target node. Only allowed if the original VM is on shared storage.",
+ optional => 1,
+ }),
+ },
+ },
+ returns => {
+ type => 'string',
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+
+ my $authuser = $rpcenv->get_user();
+
+ my $node = extract_param($param, 'node');
+
+ my $vmid = extract_param($param, 'vmid');
+
+ my $newid = extract_param($param, 'newid');
+
+ my $pool = extract_param($param, 'pool');
+
+ if (defined($pool)) {
+ $rpcenv->check_pool_exist($pool);
+ }
+
+ my $snapname = extract_param($param, 'snapname');
+
+ my $storage = extract_param($param, 'storage');
+
+ my $format = extract_param($param, 'format');
+
+ my $target = extract_param($param, 'target');
+
+ my $localnode = PVE::INotify::nodename();
+
+ undef $target if $target && ($target eq $localnode || $target eq 'localhost');
+
+ PVE::Cluster::check_node_exists($target) if $target;
+
+ my $storecfg = PVE::Storage::config();
+
+ if ($storage) {
+ # check if storage is enabled on local node
+ PVE::Storage::storage_check_enabled($storecfg, $storage);
+ if ($target) {
+ # check if storage is available on target node
+ PVE::Storage::storage_check_node($storecfg, $storage, $target);
+ # clone only works if target storage is shared
+ my $scfg = PVE::Storage::storage_config($storecfg, $storage);
+ die "can't clone to non-shared storage '$storage'\n" if !$scfg->{shared};
+ }
+ }
+
+ PVE::Cluster::check_cfs_quorum();
+
+ my $running = PVE::QemuServer::check_running($vmid) || 0;
+
+ # exclusive lock if VM is running - else shared lock is enough;
+ my $shared_lock = $running ? 0 : 1;
+
+ my $clonefn = sub {
+
+ # do all tests after lock
+ # we also try to do all tests before we fork the worker
+
+ my $conf = PVE::QemuServer::load_config($vmid);
+
+ PVE::QemuServer::check_lock($conf);
+
+ my $verify_running = PVE::QemuServer::check_running($vmid) || 0;
+
+ die "unexpected state change\n" if $verify_running != $running;
+
+ die "snapshot '$snapname' does not exist\n"
+ if $snapname && !defined( $conf->{snapshots}->{$snapname});
+
+ my $oldconf = $snapname ? $conf->{snapshots}->{$snapname} : $conf;
+
+ my $sharedvm = &$check_storage_access_clone($rpcenv, $authuser, $storecfg, $oldconf, $storage);
+
+ die "can't clone VM to node '$target' (VM uses local storage)\n" if $target && !$sharedvm;
+
+ my $conffile = PVE::QemuServer::config_file($newid);
+
+ die "unable to create VM $newid: config file already exists\n"
+ if -f $conffile;
+
+ my $newconf = { lock => 'clone' };
+ my $drives = {};
+ my $vollist = [];
+
+ foreach my $opt (keys %$oldconf) {
+ my $value = $oldconf->{$opt};
+
+ # do not copy snapshot related info
+ next if $opt eq 'snapshots' || $opt eq 'parent' || $opt eq 'snaptime' ||
+ $opt eq 'vmstate' || $opt eq 'snapstate';
+
+ # always change MAC! address
+ if ($opt =~ m/^net(\d+)$/) {
+ my $net = PVE::QemuServer::parse_net($value);
+ $net->{macaddr} = PVE::Tools::random_ether_addr();
+ $newconf->{$opt} = PVE::QemuServer::print_net($net);
+ } elsif (my $drive = PVE::QemuServer::parse_drive($opt, $value)) {
+ if (PVE::QemuServer::drive_is_cdrom($drive)) {
+ $newconf->{$opt} = $value; # simply copy configuration
+ } else {
+ if ($param->{full} || !PVE::Storage::volume_is_base($storecfg, $drive->{file})) {
+ die "Full clone feature is not available"
+ if !PVE::Storage::volume_has_feature($storecfg, 'copy', $drive->{file}, $snapname, $running);
+ $drive->{full} = 1;
+ }
+ $drives->{$opt} = $drive;
+ push @$vollist, $drive->{file};
+ }
+ } else {
+ # copy everything else
+ $newconf->{$opt} = $value;
+ }
+ }
+
+ delete $newconf->{template};
+
+ if ($param->{name}) {
+ $newconf->{name} = $param->{name};
+ } else {
+ if ($oldconf->{name}) {
+ $newconf->{name} = "Copy-of-$oldconf->{name}";
+ } else {
+ $newconf->{name} = "Copy-of-VM-$vmid";
+ }
+ }
+
+ if ($param->{description}) {
+ $newconf->{description} = $param->{description};
+ }
+
+ # create empty/temp config - this fails if VM already exists on other node
+ PVE::Tools::file_set_contents($conffile, "# qmclone temporary file\nlock: clone\n");
+
+ my $realcmd = sub {
+ my $upid = shift;
+
+ my $newvollist = [];
+
+ eval {
+ local $SIG{INT} = $SIG{TERM} = $SIG{QUIT} = $SIG{HUP} = sub { die "interrupted by signal\n"; };
+
+ PVE::Storage::activate_volumes($storecfg, $vollist);
+
+ foreach my $opt (keys %$drives) {
+ my $drive = $drives->{$opt};
+
+ my $newdrive = PVE::QemuServer::clone_disk($storecfg, $vmid, $running, $opt, $drive, $snapname,
+ $newid, $storage, $format, $drive->{full}, $newvollist);
+
+ $newconf->{$opt} = PVE::QemuServer::print_drive($vmid, $newdrive);
+
+ PVE::QemuServer::update_config_nolock($newid, $newconf, 1);
+ }
+
+ delete $newconf->{lock};
+ PVE::QemuServer::update_config_nolock($newid, $newconf, 1);
+
+ if ($target) {
+ my $newconffile = PVE::QemuServer::config_file($newid, $target);
+ die "Failed to move config to node '$target' - rename failed: $!\n"
+ if !rename($conffile, $newconffile);
+ }
+
+ PVE::AccessControl::add_vm_to_pool($newid, $pool) if $pool;
+ };
+ if (my $err = $@) {
+ unlink $conffile;
+
+ sleep 1; # some storage like rbd need to wait before release volume - really?
+
+ foreach my $volid (@$newvollist) {
+ eval { PVE::Storage::vdisk_free($storecfg, $volid); };
+ warn $@ if $@;
+ }
+ die "clone failed: $err";
+ }
+
+ return;
+ };
+
+ return $rpcenv->fork_worker('qmclone', $vmid, $authuser, $realcmd);
+ };
+
+ return PVE::QemuServer::lock_config_mode($vmid, 1, $shared_lock, sub {
+ # Aquire exclusive lock lock for $newid
+ return PVE::QemuServer::lock_config_full($newid, 1, $clonefn);
+ });
+
+ }});
+
+__PACKAGE__->register_method({
+ name => 'move_vm_disk',
+ path => '{vmid}/move_disk',
+ method => 'POST',
+ protected => 1,
+ proxyto => 'node',
+ description => "Move volume to different storage.",
+ permissions => {
+ description => "You need 'VM.Config.Disk' permissions on /vms/{vmid}, " .
+ "and 'Datastore.AllocateSpace' permissions on the storage.",
+ check =>
+ [ 'and',
+ ['perm', '/vms/{vmid}', [ 'VM.Config.Disk' ]],
+ ['perm', '/storage/{storage}', [ 'Datastore.AllocateSpace' ]],
+ ],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid'),
+ disk => {
+ type => 'string',
+ description => "The disk you want to move.",
+ enum => [ PVE::QemuServer::disknames() ],
+ },
+ storage => get_standard_option('pve-storage-id', { description => "Target Storage." }),
+ 'format' => {
+ type => 'string',
+ description => "Target Format.",
+ enum => [ 'raw', 'qcow2', 'vmdk' ],
+ optional => 1,
+ },
+ delete => {
+ type => 'boolean',
+ description => "Delete the original disk after successful copy. By default the original disk is kept as unused disk.",
+ optional => 1,
+ default => 0,
+ },
+ digest => {
+ type => 'string',
+ description => 'Prevent changes if current configuration file has different SHA1 digest. This can be used to prevent concurrent modifications.',
+ maxLength => 40,
+ optional => 1,
+ },
+ },
+ },
+ returns => {
+ type => 'string',
+ description => "the task ID.",
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+
+ my $authuser = $rpcenv->get_user();
+
+ my $node = extract_param($param, 'node');
+
+ my $vmid = extract_param($param, 'vmid');
+
+ my $digest = extract_param($param, 'digest');
+
+ my $disk = extract_param($param, 'disk');
+
+ my $storeid = extract_param($param, 'storage');
+
+ my $format = extract_param($param, 'format');
+
+ my $storecfg = PVE::Storage::config();
+
+ my $updatefn = sub {
+
+ my $conf = PVE::QemuServer::load_config($vmid);
+
+ die "checksum missmatch (file change by other user?)\n"
+ if $digest && $digest ne $conf->{digest};
+
+ die "disk '$disk' does not exist\n" if !$conf->{$disk};
+
+ my $drive = PVE::QemuServer::parse_drive($disk, $conf->{$disk});
+
+ my $old_volid = $drive->{file} || die "disk '$disk' has no associated volume\n";
+
+ die "you can't move a cdrom\n" if PVE::QemuServer::drive_is_cdrom($drive);
+
+ my $oldfmt;
+ my ($oldstoreid, $oldvolname) = PVE::Storage::parse_volume_id($old_volid);
+ if ($oldvolname =~ m/\.(raw|qcow2|vmdk)$/){
+ $oldfmt = $1;
+ }
+
+ die "you can't move on the same storage with same format\n" if $oldstoreid eq $storeid &&
+ (!$format || !$oldfmt || $oldfmt eq $format);
+
+ PVE::Cluster::log_msg('info', $authuser, "move disk VM $vmid: move --disk $disk --storage $storeid");
+
+ my $running = PVE::QemuServer::check_running($vmid);
+
+ PVE::Storage::activate_volumes($storecfg, [ $drive->{file} ]);
+
+ my $realcmd = sub {
+
+ my $newvollist = [];
+
+ eval {
+ local $SIG{INT} = $SIG{TERM} = $SIG{QUIT} = $SIG{HUP} = sub { die "interrupted by signal\n"; };
+
+ my $newdrive = PVE::QemuServer::clone_disk($storecfg, $vmid, $running, $disk, $drive, undef,
+ $vmid, $storeid, $format, 1, $newvollist);
+
+ $conf->{$disk} = PVE::QemuServer::print_drive($vmid, $newdrive);
+
+ PVE::QemuServer::add_unused_volume($conf, $old_volid) if !$param->{delete};
+
+ PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
+ };
+ if (my $err = $@) {
+
+ foreach my $volid (@$newvollist) {
+ eval { PVE::Storage::vdisk_free($storecfg, $volid); };
+ warn $@ if $@;
+ }
+ die "storage migration failed: $err";
+ }
+
+ if ($param->{delete}) {
+ eval { PVE::Storage::vdisk_free($storecfg, $old_volid); };
+ warn $@ if $@;
+ }
+ };
+
+ return $rpcenv->fork_worker('qmmove', $vmid, $authuser, $realcmd);
+ };
+
+ return PVE::QemuServer::lock_config($vmid, $updatefn);
+ }});
+
__PACKAGE__->register_method({
name => 'migrate_vm',
path => '{vmid}/migrate',
my $digest = extract_param($param, 'digest');
my $disk = extract_param($param, 'disk');
-
+
my $sizestr = extract_param($param, 'size');
my $skiplock = extract_param($param, 'skiplock');
die "you can't resize a cdrom\n" if PVE::QemuServer::drive_is_cdrom($drive);
+ die "you can't online resize a virtio windows bootdisk\n"
+ if PVE::QemuServer::check_running($vmid) && $conf->{bootdisk} eq $disk && $conf->{ostype} =~ m/^w/ && $disk =~ m/^virtio/;
+
my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid);
$rpcenv->check($authuser, "/storage/$storeid", ['Datastore.AllocateSpace']);
PVE::Cluster::log_msg('info', $authuser, "update VM $vmid: resize --disk $disk --size $sizestr");
PVE::QemuServer::qemu_block_resize($vmid, "drive-$disk", $storecfg, $volid, $newsize);
-
+
$drive->{size} = $newsize;
$conf->{$disk} = PVE::QemuServer::print_drive($vmid, $drive);
code => sub {
my ($param) = @_;
- my $conf = PVE::QemuServer::load_config($param->{vmid});
+ my $vmid = $param->{vmid};
+
+ my $conf = PVE::QemuServer::load_config($vmid);
my $snaphash = $conf->{snapshots} || {};
my $res = [];
foreach my $name (keys %$snaphash) {
my $d = $snaphash->{$name};
- my $item = { name => $name, description => $d->{description} };
+ my $item = {
+ name => $name,
+ snaptime => $d->{snaptime} || 0,
+ vmstate => $d->{vmstate} ? 1 : 0,
+ description => $d->{description} || '',
+ };
$item->{parent} = $d->{parent} if $d->{parent};
+ $item->{snapstate} = $d->{snapstate} if $d->{snapstate};
push @$res, $item;
}
- if ($conf->{parent}) {
- push @$res, { name => '__current', parent => $conf->{parent} };
- } else {
- push @$res, { name => '__current' };
- }
+ my $running = PVE::QemuServer::check_running($vmid, 1) ? 1 : 0;
+ my $current = { name => 'current', digest => $conf->{digest}, running => $running };
+ $current->{parent} = $conf->{parent} if $conf->{parent};
+
+ push @$res, $current;
return $res;
}});
type => 'boolean',
description => "Freeze the filesystem",
},
+ description => {
+ optional => 1,
+ type => 'string',
+ description => "A textual description or comment.",
+ },
},
},
returns => {
my $snapname = extract_param($param, 'snapname');
- my $vmstate = extract_param($param, 'vmstate');
-
- my $freezefs = extract_param($param, 'freezefs');
+ die "unable to use snapshot name 'current' (reserved name)\n"
+ if $snapname eq 'current';
my $realcmd = sub {
PVE::Cluster::log_msg('info', $authuser, "snapshot VM $vmid: $snapname");
- PVE::QemuServer::snapshot_create($vmid, $snapname, $vmstate, $freezefs);
+ PVE::QemuServer::snapshot_create($vmid, $snapname, $param->{vmstate},
+ $param->{freezefs}, $param->{description});
};
return $rpcenv->fork_worker('qmsnapshot', $vmid, $authuser, $realcmd);
my $res = [];
push @$res, { cmd => 'rollback' };
+ push @$res, { cmd => 'config' };
return $res;
}});
+__PACKAGE__->register_method({
+ name => 'update_snapshot_config',
+ path => '{vmid}/snapshot/{snapname}/config',
+ method => 'PUT',
+ protected => 1,
+ proxyto => 'node',
+ description => "Update snapshot metadata.",
+ permissions => {
+ check => ['perm', '/vms/{vmid}', [ 'VM.Snapshot' ]],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid'),
+ snapname => get_standard_option('pve-snapshot-name'),
+ description => {
+ optional => 1,
+ type => 'string',
+ description => "A textual description or comment.",
+ },
+ },
+ },
+ returns => { type => 'null' },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+
+ my $authuser = $rpcenv->get_user();
+
+ my $vmid = extract_param($param, 'vmid');
+
+ my $snapname = extract_param($param, 'snapname');
+
+ return undef if !defined($param->{description});
+
+ my $updatefn = sub {
+
+ my $conf = PVE::QemuServer::load_config($vmid);
+
+ PVE::QemuServer::check_lock($conf);
+
+ my $snap = $conf->{snapshots}->{$snapname};
+
+ die "snapshot '$snapname' does not exist\n" if !defined($snap);
+
+ $snap->{description} = $param->{description} if defined($param->{description});
+
+ PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
+ };
+
+ PVE::QemuServer::lock_config($vmid, $updatefn);
+
+ return undef;
+ }});
+
+__PACKAGE__->register_method({
+ name => 'get_snapshot_config',
+ path => '{vmid}/snapshot/{snapname}/config',
+ method => 'GET',
+ proxyto => 'node',
+ description => "Get snapshot configuration",
+ permissions => {
+ check => ['perm', '/vms/{vmid}', [ 'VM.Snapshot' ]],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid'),
+ snapname => get_standard_option('pve-snapshot-name'),
+ },
+ },
+ returns => { type => "object" },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+
+ my $authuser = $rpcenv->get_user();
+
+ my $vmid = extract_param($param, 'vmid');
+
+ my $snapname = extract_param($param, 'snapname');
+
+ my $conf = PVE::QemuServer::load_config($vmid);
+
+ my $snap = $conf->{snapshots}->{$snapname};
+
+ die "snapshot '$snapname' does not exist\n" if !defined($snap);
+
+ return $snap;
+ }});
+
__PACKAGE__->register_method({
name => 'rollback',
path => '{vmid}/snapshot/{snapname}/rollback',
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid'),
snapname => get_standard_option('pve-snapshot-name'),
+ force => {
+ optional => 1,
+ type => 'boolean',
+ description => "For removal from config file, even if removing disk snapshots fails.",
+ },
},
},
returns => {
my $realcmd = sub {
PVE::Cluster::log_msg('info', $authuser, "delete snapshot VM $vmid: $snapname");
- PVE::QemuServer::snapshot_delete($vmid, $snapname);
+ PVE::QemuServer::snapshot_delete($vmid, $snapname, $param->{force});
};
return $rpcenv->fork_worker('qmdelsnapshot', $vmid, $authuser, $realcmd);
}});
+__PACKAGE__->register_method({
+ name => 'template',
+ path => '{vmid}/template',
+ method => 'POST',
+ protected => 1,
+ proxyto => 'node',
+ description => "Create a Template.",
+ permissions => {
+ description => "You need 'VM.Allocate' permissions on /vms/{vmid}",
+ check => [ 'perm', '/vms/{vmid}', ['VM.Allocate']],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid'),
+ disk => {
+ optional => 1,
+ type => 'string',
+ description => "If you want to convert only 1 disk to base image.",
+ enum => [PVE::QemuServer::disknames()],
+ },
+
+ },
+ },
+ returns => { type => 'null'},
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+
+ my $authuser = $rpcenv->get_user();
+
+ my $node = extract_param($param, 'node');
+
+ my $vmid = extract_param($param, 'vmid');
+
+ my $disk = extract_param($param, 'disk');
+
+ my $updatefn = sub {
+
+ my $conf = PVE::QemuServer::load_config($vmid);
+
+ PVE::QemuServer::check_lock($conf);
+
+ die "unable to create template, because VM contains snapshots\n"
+ if $conf->{snapshots} && scalar(keys %{$conf->{snapshots}});
+
+ die "you can't convert a template to a template\n"
+ if PVE::QemuServer::is_template($conf) && !$disk;
+
+ die "you can't convert a VM to template if VM is running\n"
+ if PVE::QemuServer::check_running($vmid);
+
+ my $realcmd = sub {
+ PVE::QemuServer::template_create($vmid, $conf, $disk);
+ };
+
+ $conf->{template} = 1;
+ PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
+
+ return $rpcenv->fork_worker('qmtemplate', $vmid, $authuser, $realcmd);
+ };
+
+ PVE::QemuServer::lock_config($vmid, $updatefn);
+ return undef;
+ }});
+
1;