use PVE::Cluster qw (cfs_read_file cfs_write_file);;
use PVE::SafeSyslog;
use PVE::Tools qw(extract_param);
-use PVE::Exception qw(raise raise_param_exc);
+use PVE::Exception qw(raise raise_param_exc raise_perm_exc);
use PVE::Storage;
use PVE::JSONSchema qw(get_standard_option);
use PVE::RESTHandler;
use PVE::RPCEnvironment;
use PVE::AccessControl;
use PVE::INotify;
+use PVE::Network;
use Data::Dumper; # fixme: remove
});
};
+my $check_storage_access_clone = sub {
+ my ($rpcenv, $authuser, $storecfg, $conf, $storage) = @_;
+
+ my $sharedvm = 1;
+
+ PVE::QemuServer::foreach_drive($conf, sub {
+ my ($ds, $drive) = @_;
+
+ my $isCDROM = PVE::QemuServer::drive_is_cdrom($drive);
+
+ my $volid = $drive->{file};
+
+ return if !$volid || $volid eq 'none';
+
+ if ($isCDROM) {
+ if ($volid eq 'cdrom') {
+ $rpcenv->check($authuser, "/", ['Sys.Console']);
+ } else {
+ # we simply allow access
+ my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
+ my $scfg = PVE::Storage::storage_config($storecfg, $sid);
+ $sharedvm = 0 if !$scfg->{shared};
+
+ }
+ } else {
+ my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
+ my $scfg = PVE::Storage::storage_config($storecfg, $sid);
+ $sharedvm = 0 if !$scfg->{shared};
+
+ $sid = $storage if $storage;
+ $rpcenv->check($authuser, "/storage/$sid", ['Datastore.AllocateSpace']);
+ }
+ });
+
+ return $sharedvm;
+};
+
# Note: $pool is only needed when creating a VM, because pool permissions
# are automatically inherited if VM already exists inside a pool.
my $create_disks = sub {
my $volid = $disk->{file};
if (!$volid || $volid eq 'none' || $volid eq 'cdrom') {
- $res->{$ds} = $settings->{$ds};
+ delete $disk->{size};
+ $res->{$ds} = PVE::QemuServer::print_drive($vmid, $disk);
} elsif ($volid =~ m/^(([^:\s]+):)?(\d+(\.\d+)?)$/) {
my ($storeid, $size) = ($2 || $default_storage, $3);
die "no storage ID specified (and no default storage)\n" if !$storeid;
} else {
my $path = $rpcenv->check_volume_access($authuser, $storecfg, $vmid, $volid);
-
+
my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
my $foundvolid = undef;
}
});
}
-
+
die "image '$path' does not exists\n" if (!(-f $path || -b $path || $foundvolid));
my ($size) = PVE::Storage::volume_size_info($storecfg, $volid, 1);
next if PVE::QemuServer::valid_drivename($opt);
if ($opt eq 'sockets' || $opt eq 'cores' ||
- $opt eq 'cpu' || $opt eq 'smp' ||
+ $opt eq 'cpu' || $opt eq 'smp' ||
$opt eq 'cpulimit' || $opt eq 'cpuunits') {
$rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.CPU']);
} elsif ($opt eq 'boot' || $opt eq 'bootdisk') {
$rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.Disk']);
- } elsif ($opt eq 'memory' || $opt eq 'balloon') {
+ } elsif ($opt eq 'memory' || $opt eq 'balloon' || $opt eq 'shares') {
$rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.Memory']);
} elsif ($opt eq 'args' || $opt eq 'lock') {
die "only root can set '$opt' config\n";
- } elsif ($opt eq 'cpu' || $opt eq 'kvm' || $opt eq 'acpi' ||
+ } elsif ($opt eq 'cpu' || $opt eq 'kvm' || $opt eq 'acpi' ||
$opt eq 'vga' || $opt eq 'watchdog' || $opt eq 'tablet') {
$rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.HWType']);
} elsif ($opt =~ m/^net\d+$/) {
return $res;
}});
+
+
__PACKAGE__->register_method({
name => 'create_vm',
path => '',
method => 'POST',
description => "Create or restore a virtual machine.",
permissions => {
- description => "You need 'VM.Allocate' permissions on /vms/{vmid} or on the VM pool /pool/{pool}. If you create disks you need 'Datastore.AllocateSpace' on any used storage.",
- check => [ 'or',
- [ 'perm', '/vms/{vmid}', ['VM.Allocate']],
- [ 'perm', '/pool/{pool}', ['VM.Allocate'], require_param => 'pool'],
- ],
+ description => "You need 'VM.Allocate' permissions on /vms/{vmid} or on the VM pool /pool/{pool}. " .
+ "For restore (option 'archive'), it is enough if the user has 'VM.Backup' permission and the VM already exists. " .
+ "If you create disks you need 'Datastore.AllocateSpace' on any used storage.",
+ user => 'all', # check inside
},
protected => 1,
proxyto => 'node',
description => "Assign a unique random ethernet address.",
requires => 'archive',
},
- pool => {
+ pool => {
optional => 1,
type => 'string', format => 'pve-poolid',
description => "Add the VM to the specified pool.",
my $force = extract_param($param, 'force');
my $unique = extract_param($param, 'unique');
-
+
my $pool = extract_param($param, 'pool');
my $filename = PVE::QemuServer::config_file($vmid);
if (defined($pool)) {
$rpcenv->check_pool_exist($pool);
- }
+ }
$rpcenv->check($authuser, "/storage/$storage", ['Datastore.AllocateSpace'])
if defined($storage);
+ if ($rpcenv->check($authuser, "/vms/$vmid", ['VM.Allocate'], 1)) {
+ # OK
+ } elsif ($pool && $rpcenv->check($authuser, "/pool/$pool", ['VM.Allocate'], 1)) {
+ # OK
+ } elsif ($archive && $force && (-f $filename) &&
+ $rpcenv->check($authuser, "/vms/$vmid", ['VM.Backup'], 1)) {
+ # OK: user has VM.Backup permissions, and want to restore an existing VM
+ } else {
+ raise_perm_exc();
+ }
+
if (!$archive) {
&$resolve_cdrom_alias($param);
}
}
- my $addVMtoPoolFn = sub {
- my $usercfg = cfs_read_file("user.cfg");
- if (my $data = $usercfg->{pools}->{$pool}) {
- $data->{vms}->{$vmid} = 1;
- $usercfg->{vms}->{$vmid} = $pool;
- cfs_write_file("user.cfg", $usercfg);
- }
- };
-
my $restorefn = sub {
+ # fixme: this test does not work if VM exists on other node!
if (-f $filename) {
die "unable to restore vm $vmid: config file already exists\n"
if !$force;
die "unable to restore vm $vmid: vm is running\n"
if PVE::QemuServer::check_running($vmid);
-
- # destroy existing data - keep empty config
- PVE::QemuServer::destroy_vm($storecfg, $vmid, 1);
}
my $realcmd = sub {
pool => $pool,
unique => $unique });
- PVE::AccessControl::lock_user_config($addVMtoPoolFn, "can't add VM to pool") if $pool;
+ PVE::AccessControl::add_vm_to_pool($vmid, $pool) if $pool;
};
return $rpcenv->fork_worker('qmrestore', $vmid, $authuser, $realcmd);
die "create failed - $err";
}
- PVE::AccessControl::lock_user_config($addVMtoPoolFn, "can't add VM to pool") if $pool;
+ PVE::AccessControl::add_vm_to_pool($vmid, $pool) if $pool;
};
return $rpcenv->fork_worker('qmcreate', $vmid, $authuser, $realcmd);
{ subdir => 'rrd' },
{ subdir => 'rrddata' },
{ subdir => 'monitor' },
+ { subdir => 'snapshot' },
];
return $res;
my $conf = PVE::QemuServer::load_config($param->{vmid});
+ delete $conf->{snapshots};
+
return $conf;
}});
$rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.Disk']);
my $drive = PVE::QemuServer::parse_drive($opt, $conf->{$opt});
- if (my $sid = &$test_deallocate_drive($storecfg, $vmid, $opt, $drive, $force)) {
+ if (my $sid = &$test_deallocate_drive($storecfg, $vmid, $opt, $drive, $force)) {
$rpcenv->check($authuser, "/storage/$sid", ['Datastore.Allocate']);
}
}
-
- die "error hot-unplug $opt" if !PVE::QemuServer::vm_deviceunplug($vmid, $conf, $opt);
+
+ my $unplugwarning = "";
+ if($conf->{ostype} && $conf->{ostype} eq 'l26'){
+ $unplugwarning = "<br>verify that you have acpiphp && pci_hotplug modules loaded in your guest VM";
+ }elsif($conf->{ostype} && $conf->{ostype} eq 'l24'){
+ $unplugwarning = "<br>kernel 2.4 don't support hotplug, please disable hotplug in options";
+ }elsif(!$conf->{ostype} || ($conf->{ostype} && $conf->{ostype} eq 'other')){
+ $unplugwarning = "<br>verify that your guest support acpi hotplug";
+ }
+
+ if($opt eq 'tablet'){
+ PVE::QemuServer::vm_deviceplug(undef, $conf, $vmid, $opt);
+ }else{
+ die "error hot-unplug $opt $unplugwarning" if !PVE::QemuServer::vm_deviceunplug($vmid, $conf, $opt);
+ }
if ($isDisk) {
my $drive = PVE::QemuServer::parse_drive($opt, $conf->{$opt});
PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
};
-my $safe_int_ne = sub {
+my $safe_num_ne = sub {
my ($a, $b) = @_;
- return 0 if !defined($a) && !defined($b);
- return 1 if !defined($a);
- return 1 if !defined($b);
+ return 0 if !defined($a) && !defined($b);
+ return 1 if !defined($a);
+ return 1 if !defined($b);
return $a != $b;
};
$conf = PVE::QemuServer::load_config($vmid); # update/reload
}
- if(&$safe_int_ne($drive->{bps}, $old_drive->{bps}) ||
- &$safe_int_ne($drive->{bps_rd}, $old_drive->{bps_rd}) ||
- &$safe_int_ne($drive->{bps_wr}, $old_drive->{bps_wr}) ||
- &$safe_int_ne($drive->{iops}, $old_drive->{iops}) ||
- &$safe_int_ne($drive->{iops_rd}, $old_drive->{iops_rd}) ||
- &$safe_int_ne($drive->{iops_wr}, $old_drive->{iops_wr})) {
- PVE::QemuServer::qemu_block_set_io_throttle($vmid,"drive-$opt",$drive->{bps}, $drive->{bps_rd}, $drive->{bps_wr}, $drive->{iops}, $drive->{iops_rd}, $drive->{iops_wr}) if !PVE::QemuServer::drive_is_cdrom($drive);
+ if(&$safe_num_ne($drive->{mbps}, $old_drive->{mbps}) ||
+ &$safe_num_ne($drive->{mbps_rd}, $old_drive->{mbps_rd}) ||
+ &$safe_num_ne($drive->{mbps_wr}, $old_drive->{mbps_wr}) ||
+ &$safe_num_ne($drive->{iops}, $old_drive->{iops}) ||
+ &$safe_num_ne($drive->{iops_rd}, $old_drive->{iops_rd}) ||
+ &$safe_num_ne($drive->{iops_wr}, $old_drive->{iops_wr})) {
+ PVE::QemuServer::qemu_block_set_io_throttle($vmid,"drive-$opt", $drive->{mbps}*1024*1024,
+ $drive->{mbps_rd}*1024*1024, $drive->{mbps_wr}*1024*1024,
+ $drive->{iops}, $drive->{iops_rd}, $drive->{iops_wr})
+ if !PVE::QemuServer::drive_is_cdrom($drive);
}
}
}
my $vmconfig_update_net = sub {
my ($rpcenv, $authuser, $conf, $storecfg, $vmid, $opt, $value) = @_;
- if ($conf->{$opt}) {
- #if online update, then unplug first
- die "error hot-unplug $opt for update" if !PVE::QemuServer::vm_deviceunplug($vmid, $conf, $opt);
- }
+ if ($conf->{$opt} && PVE::QemuServer::check_running($vmid)) {
+ my $oldnet = PVE::QemuServer::parse_net($conf->{$opt});
+ my $newnet = PVE::QemuServer::parse_net($value);
+
+ if($oldnet->{model} ne $newnet->{model}){
+ #if model change, we try to hot-unplug
+ die "error hot-unplug $opt for update" if !PVE::QemuServer::vm_deviceunplug($vmid, $conf, $opt);
+ }else{
+
+ if($newnet->{bridge} && $oldnet->{bridge}){
+ my $iface = "tap".$vmid."i".$1 if $opt =~ m/net(\d+)/;
+
+ if($newnet->{rate} ne $oldnet->{rate}){
+ PVE::Network::tap_rate_limit($iface, $newnet->{rate});
+ }
+
+ if(($newnet->{bridge} ne $oldnet->{bridge}) || ($newnet->{tag} ne $oldnet->{tag})){
+ eval{PVE::Network::tap_unplug($iface, $oldnet->{bridge}, $oldnet->{tag});};
+ PVE::Network::tap_plug($iface, $newnet->{bridge}, $newnet->{tag});
+ }
+
+ }else{
+ #if bridge/nat mode change, we try to hot-unplug
+ die "error hot-unplug $opt for update" if !PVE::QemuServer::vm_deviceunplug($vmid, $conf, $opt);
+ }
+ }
+ }
$conf->{$opt} = $value;
PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
$conf = PVE::QemuServer::load_config($vmid); # update/reload
};
my $vm_config_perm_list = [
- 'VM.Config.Disk',
- 'VM.Config.CDROM',
- 'VM.Config.CPU',
- 'VM.Config.Memory',
- 'VM.Config.Network',
+ 'VM.Config.Disk',
+ 'VM.Config.CDROM',
+ 'VM.Config.CPU',
+ 'VM.Config.Memory',
+ 'VM.Config.Network',
'VM.Config.HWType',
'VM.Config.Options',
];
my $storecfg = PVE::Storage::config();
+ my $defaults = PVE::QemuServer::load_defaults();
+
&$resolve_cdrom_alias($param);
# now try to verify all parameters
my $drive = PVE::QemuServer::parse_drive($opt, $param->{$opt});
PVE::QemuServer::cleanup_drive_path($opt, $storecfg, $drive);
$param->{$opt} = PVE::QemuServer::print_drive($vmid, $drive);
- } elsif ($opt =~ m/^net(\d+)$/) {
+ } elsif ($opt =~ m/^net(\d+)$/) {
# add macaddr
my $net = PVE::QemuServer::parse_net($param->{$opt});
$param->{$opt} = PVE::QemuServer::print_net($net);
PVE::QemuServer::check_lock($conf) if !$skiplock;
+ if ($param->{memory} || defined($param->{balloon})) {
+ my $maxmem = $param->{memory} || $conf->{memory} || $defaults->{memory};
+ my $balloon = defined($param->{balloon}) ? $param->{balloon} : $conf->{balloon};
+
+ die "balloon value too large (must be smaller than assigned memory)\n"
+ if $balloon > $maxmem;
+ }
+
PVE::Cluster::log_msg('info', $authuser, "update VM $vmid: " . join (' ', @paramarr));
foreach my $opt (@delete) { # delete
&$vmconfig_delete_option($rpcenv, $authuser, $conf, $storecfg, $vmid, $opt, $force);
}
+ my $running = PVE::QemuServer::check_running($vmid);
+
foreach my $opt (keys %$param) { # add/change
$conf = PVE::QemuServer::load_config($vmid); # update/reload
if (PVE::QemuServer::valid_drivename($opt)) {
- &$vmconfig_update_disk($rpcenv, $authuser, $conf, $storecfg, $vmid,
+ &$vmconfig_update_disk($rpcenv, $authuser, $conf, $storecfg, $vmid,
$opt, $param->{$opt}, $force);
-
+
} elsif ($opt =~ m/^net(\d+)$/) { #nics
- &$vmconfig_update_net($rpcenv, $authuser, $conf, $storecfg, $vmid,
+ &$vmconfig_update_net($rpcenv, $authuser, $conf, $storecfg, $vmid,
$opt, $param->{$opt});
} else {
+ if($opt eq 'tablet' && $param->{$opt} == 1){
+ PVE::QemuServer::vm_deviceplug(undef, $conf, $vmid, $opt);
+ }elsif($opt eq 'tablet' && $param->{$opt} == 0){
+ PVE::QemuServer::vm_deviceunplug($vmid, $conf, $opt);
+ }
+
$conf->{$opt} = $param->{$opt};
PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
}
}
+
+ # allow manual ballooning if shares is set to zero
+ if ($running && defined($param->{balloon}) &&
+ defined($conf->{shares}) && ($conf->{shares} == 0)) {
+ my $balloon = $param->{'balloon'} || $conf->{memory} || $defaults->{memory};
+ PVE::QemuServer::vm_mon_cmd($vmid, "balloon", value => $balloon*1024*1024);
+ }
+
};
PVE::QemuServer::lock_config($vmid, $updatefn);
my $storecfg = PVE::Storage::config();
- my $delVMfromPoolFn = sub {
+ my $delVMfromPoolFn = sub {
my $usercfg = cfs_read_file("user.cfg");
if (my $pool = $usercfg->{vms}->{$vmid}) {
if (my $data = $usercfg->{pools}->{$pool}) {
PVE::QemuServer::vm_destroy($storecfg, $vmid, $skiplock);
- PVE::AccessControl::lock_user_config($delVMfromPoolFn, "pool cleanup failed");
+ PVE::AccessControl::remove_vm_from_pool($vmid);
};
return $rpcenv->fork_worker('qmdestroy', $vmid, $authuser, $realcmd);
$remip = PVE::Cluster::remote_node_ip($node);
}
- # NOTE: kvm VNC traffic is already TLS encrypted,
- # so we select the fastest chipher here (or 'none'?)
- my $remcmd = $remip ? ['/usr/bin/ssh', '-T', '-o', 'BatchMode=yes',
- '-c', 'blowfish-cbc', $remip] : [];
+ # NOTE: kvm VNC traffic is already TLS encrypted
+ my $remcmd = $remip ? ['/usr/bin/ssh', '-T', '-o', 'BatchMode=yes', $remip] : [];
my $timeout = 10;
my $upid = $rpcenv->fork_worker('vncproxy', $vmid, $authuser, $realcmd);
+ PVE::Tools::wait_for_vnc_port($port);
+
return {
user => $authuser,
ticket => $ticket,
my $cc = PVE::Cluster::cfs_read_file('cluster.conf');
if (PVE::Cluster::cluster_conf_lookup_pvevm($cc, 0, $vmid, 1)) {
return 1;
- }
+ }
return 0;
};
return;
}});
+__PACKAGE__->register_method({
+ name => 'vm_feature',
+ path => '{vmid}/feature',
+ method => 'GET',
+ proxyto => 'node',
+ protected => 1,
+ description => "Check if feature for virtual machine is available.",
+ permissions => {
+ check => ['perm', '/vms/{vmid}', [ 'VM.Audit' ]],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid'),
+ feature => {
+ description => "Feature to check.",
+ type => 'string',
+ enum => [ 'snapshot', 'clone', 'copy' ],
+ },
+ snapname => get_standard_option('pve-snapshot-name', {
+ optional => 1,
+ }),
+ },
+ },
+ returns => {
+ type => "object",
+ properties => {
+ hasFeature => { type => 'boolean' },
+ nodes => {
+ type => 'array',
+ items => { type => 'string' },
+ }
+ },
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $node = extract_param($param, 'node');
+
+ my $vmid = extract_param($param, 'vmid');
+
+ my $snapname = extract_param($param, 'snapname');
+
+ my $feature = extract_param($param, 'feature');
+
+ my $running = PVE::QemuServer::check_running($vmid);
+
+ my $conf = PVE::QemuServer::load_config($vmid);
+
+ if($snapname){
+ my $snap = $conf->{snapshots}->{$snapname};
+ die "snapshot '$snapname' does not exist\n" if !defined($snap);
+ $conf = $snap;
+ }
+ my $storecfg = PVE::Storage::config();
+
+ my $nodelist = PVE::QemuServer::shared_nodes($conf, $storecfg);
+ my $hasFeature = PVE::QemuServer::has_feature($feature, $conf, $storecfg, $snapname, $running);
+
+ return {
+ hasFeature => $hasFeature,
+ nodes => [ keys %$nodelist ],
+ };
+ }});
+
+__PACKAGE__->register_method({
+ name => 'clone_vm',
+ path => '{vmid}/clone',
+ method => 'POST',
+ protected => 1,
+ proxyto => 'node',
+ description => "Create a copy of virtual machine/template.",
+ permissions => {
+ description => "You need 'VM.Clone' permissions on /vms/{vmid}, and 'VM.Allocate' permissions " .
+ "on /vms/{newid} (or on the VM pool /pool/{pool}). You also need " .
+ "'Datastore.AllocateSpace' on any used storage.",
+ check =>
+ [ 'and',
+ ['perm', '/vms/{vmid}', [ 'VM.Clone' ]],
+ [ 'or',
+ [ 'perm', '/vms/{newid}', ['VM.Allocate']],
+ [ 'perm', '/pool/{pool}', ['VM.Allocate'], require_param => 'pool'],
+ ],
+ ]
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid'),
+ newid => get_standard_option('pve-vmid', { description => 'VMID for the clone.' }),
+ name => {
+ optional => 1,
+ type => 'string', format => 'dns-name',
+ description => "Set a name for the new VM.",
+ },
+ description => {
+ optional => 1,
+ type => 'string',
+ description => "Description for the new VM.",
+ },
+ pool => {
+ optional => 1,
+ type => 'string', format => 'pve-poolid',
+ description => "Add the new VM to the specified pool.",
+ },
+ snapname => get_standard_option('pve-snapshot-name', {
+ requires => 'full',
+ optional => 1,
+ }),
+ storage => get_standard_option('pve-storage-id', {
+ description => "Target storage for full clone.",
+ requires => 'full',
+ optional => 1,
+ }),
+ 'format' => {
+ description => "Target format for file storage.",
+ requires => 'full',
+ type => 'string',
+ optional => 1,
+ enum => [ 'raw', 'qcow2', 'vmdk'],
+ },
+ full => {
+ optional => 1,
+ type => 'boolean',
+ description => "Create a full copy of all disk. This is always done when " .
+ "you clone a normal VM. For VM templates, we try to create a linked clone by default.",
+ default => 0,
+ },
+ target => get_standard_option('pve-node', {
+ description => "Target node. Only allowed if the original VM is on shared storage.",
+ optional => 1,
+ }),
+ },
+ },
+ returns => {
+ type => 'string',
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+
+ my $authuser = $rpcenv->get_user();
+
+ my $node = extract_param($param, 'node');
+
+ my $vmid = extract_param($param, 'vmid');
+
+ my $newid = extract_param($param, 'newid');
+
+ my $pool = extract_param($param, 'pool');
+
+ if (defined($pool)) {
+ $rpcenv->check_pool_exist($pool);
+ }
+
+ my $snapname = extract_param($param, 'snapname');
+
+ my $storage = extract_param($param, 'storage');
+
+ my $format = extract_param($param, 'format');
+
+ my $target = extract_param($param, 'target');
+
+ my $localnode = PVE::INotify::nodename();
+
+ undef $target if $target && ($target eq $localnode || $target eq 'localhost');
+
+ PVE::Cluster::check_node_exists($target) if $target;
+
+ my $storecfg = PVE::Storage::config();
+
+ if ($storage) {
+ # check if storage is enabled on local node
+ PVE::Storage::storage_check_enabled($storecfg, $storage);
+ if ($target) {
+ # check if storage is available on target node
+ PVE::Storage::storage_check_node($storecfg, $storage, $target);
+ # clone only works if target storage is shared
+ my $scfg = PVE::Storage::storage_config($storecfg, $storage);
+ die "can't clone to non-shared storage '$storage'\n" if !$scfg->{shared};
+ }
+ }
+
+ PVE::Cluster::check_cfs_quorum();
+
+ my $running = PVE::QemuServer::check_running($vmid) || 0;
+
+ # exclusive lock if VM is running - else shared lock is enough;
+ my $shared_lock = $running ? 0 : 1;
+
+ my $clonefn = sub {
+
+ # do all tests after lock
+ # we also try to do all tests before we fork the worker
+
+ my $conf = PVE::QemuServer::load_config($vmid);
+
+ PVE::QemuServer::check_lock($conf);
+
+ my $verify_running = PVE::QemuServer::check_running($vmid) || 0;
+
+ die "unexpected state change\n" if $verify_running != $running;
+
+ die "snapshot '$snapname' does not exist\n"
+ if $snapname && !defined( $conf->{snapshots}->{$snapname});
+
+ my $oldconf = $snapname ? $conf->{snapshots}->{$snapname} : $conf;
+
+ my $sharedvm = &$check_storage_access_clone($rpcenv, $authuser, $storecfg, $oldconf, $storage);
+
+ die "can't clone VM to node '$target' (VM uses local storage)\n" if $target && !$sharedvm;
+
+ my $conffile = PVE::QemuServer::config_file($newid);
+
+ die "unable to create VM $newid: config file already exists\n"
+ if -f $conffile;
+
+ my $newconf = { lock => 'clone' };
+ my $drives = {};
+ my $vollist = [];
+
+ foreach my $opt (keys %$oldconf) {
+ my $value = $oldconf->{$opt};
+
+ # do not copy snapshot related info
+ next if $opt eq 'snapshots' || $opt eq 'parent' || $opt eq 'snaptime' ||
+ $opt eq 'vmstate' || $opt eq 'snapstate';
+
+ # always change MAC! address
+ if ($opt =~ m/^net(\d+)$/) {
+ my $net = PVE::QemuServer::parse_net($value);
+ $net->{macaddr} = PVE::Tools::random_ether_addr();
+ $newconf->{$opt} = PVE::QemuServer::print_net($net);
+ } elsif (my $drive = PVE::QemuServer::parse_drive($opt, $value)) {
+ if (PVE::QemuServer::drive_is_cdrom($drive)) {
+ $newconf->{$opt} = $value; # simply copy configuration
+ } else {
+ if ($param->{full} || !PVE::Storage::volume_is_base($storecfg, $drive->{file})) {
+ die "Full clone feature is not available"
+ if !PVE::Storage::volume_has_feature($storecfg, 'copy', $drive->{file}, $snapname, $running);
+ $drive->{full} = 1;
+ }
+ $drives->{$opt} = $drive;
+ push @$vollist, $drive->{file};
+ }
+ } else {
+ # copy everything else
+ $newconf->{$opt} = $value;
+ }
+ }
+
+ delete $newconf->{template};
+
+ if ($param->{name}) {
+ $newconf->{name} = $param->{name};
+ } else {
+ $newconf->{name} = "Copy-of-$oldconf->{name}";
+ }
+
+ if ($param->{description}) {
+ $newconf->{description} = $param->{description};
+ }
+
+ # create empty/temp config - this fails if VM already exists on other node
+ PVE::Tools::file_set_contents($conffile, "# qmclone temporary file\nlock: clone\n");
+
+ my $realcmd = sub {
+ my $upid = shift;
+
+ my $newvollist = [];
+
+ eval {
+ local $SIG{INT} = $SIG{TERM} = $SIG{QUIT} = $SIG{HUP} = sub { die "interrupted by signal\n"; };
+
+ PVE::Storage::activate_volumes($storecfg, $vollist);
+
+ foreach my $opt (keys %$drives) {
+ my $drive = $drives->{$opt};
+
+ my $newvolid;
+ if (!$drive->{full}) {
+ print "create linked clone of drive $opt ($drive->{file})\n";
+ $newvolid = PVE::Storage::vdisk_clone($storecfg, $drive->{file}, $newid);
+ push @$newvollist, $newvolid;
+
+ } else {
+ my ($storeid, $volname) = PVE::Storage::parse_volume_id($drive->{file});
+ $storeid = $storage if $storage;
+
+ my $fmt = undef;
+ if($format){
+ $fmt = $format;
+ }else{
+ my $defformat = PVE::Storage::storage_default_format($storecfg, $storeid);
+ $fmt = $drive->{format} || $defformat;
+ }
+
+ my ($size) = PVE::Storage::volume_size_info($storecfg, $drive->{file}, 3);
+
+ print "create full clone of drive $opt ($drive->{file})\n";
+ $newvolid = PVE::Storage::vdisk_alloc($storecfg, $storeid, $newid, $fmt, undef, ($size/1024));
+ push @$newvollist, $newvolid;
+
+ if(!$running || $snapname){
+ PVE::QemuServer::qemu_img_convert($drive->{file}, $newvolid, $size, $snapname);
+ }else{
+ PVE::QemuServer::qemu_drive_mirror($vmid, $opt, $newvolid, $newid);
+ }
+
+ }
+
+ my ($size) = PVE::Storage::volume_size_info($storecfg, $newvolid, 3);
+ my $disk = $drive;
+ $disk->{full} = undef;
+ $disk->{format} = undef;
+ $disk->{file} = $newvolid;
+ $disk->{size} = $size;
+
+ $newconf->{$opt} = PVE::QemuServer::print_drive($vmid, $disk);
+
+ PVE::QemuServer::update_config_nolock($newid, $newconf, 1);
+ }
+
+ delete $newconf->{lock};
+ PVE::QemuServer::update_config_nolock($newid, $newconf, 1);
+
+ if ($target) {
+ my $newconffile = PVE::QemuServer::config_file($newid, $target);
+ die "Failed to move config to node '$target' - rename failed: $!\n"
+ if !rename($conffile, $newconffile);
+ }
+
+ PVE::AccessControl::add_vm_to_pool($newid, $pool) if $pool;
+ };
+ if (my $err = $@) {
+ unlink $conffile;
+
+ sleep 1; # some storage like rbd need to wait before release volume - really?
+
+ foreach my $volid (@$newvollist) {
+ eval { PVE::Storage::vdisk_free($storecfg, $volid); };
+ warn $@ if $@;
+ }
+ die "clone failed: $err";
+ }
+
+ return;
+ };
+
+ return $rpcenv->fork_worker('qmclone', $vmid, $authuser, $realcmd);
+ };
+
+ return PVE::QemuServer::lock_config_mode($vmid, 1, $shared_lock, sub {
+ # Aquire exclusive lock lock for $newid
+ return PVE::QemuServer::lock_config_full($newid, 1, $clonefn);
+ });
+
+ }});
+
__PACKAGE__->register_method({
name => 'migrate_vm',
path => '{vmid}/migrate',
my $digest = extract_param($param, 'digest');
my $disk = extract_param($param, 'disk');
-
+
my $sizestr = extract_param($param, 'size');
my $skiplock = extract_param($param, 'skiplock');
die "you can't resize a cdrom\n" if PVE::QemuServer::drive_is_cdrom($drive);
+ die "you can't online resize a virtio windows bootdisk\n"
+ if PVE::QemuServer::check_running($vmid) && $conf->{bootdisk} eq $disk && $conf->{ostype} =~ m/^w/ && $disk =~ m/^virtio/;
+
my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid);
$rpcenv->check($authuser, "/storage/$storeid", ['Datastore.AllocateSpace']);
PVE::Cluster::log_msg('info', $authuser, "update VM $vmid: resize --disk $disk --size $sizestr");
PVE::QemuServer::qemu_block_resize($vmid, "drive-$disk", $storecfg, $volid, $newsize);
-
+
$drive->{size} = $newsize;
$conf->{$disk} = PVE::QemuServer::print_drive($vmid, $drive);
return undef;
}});
+__PACKAGE__->register_method({
+ name => 'snapshot_list',
+ path => '{vmid}/snapshot',
+ method => 'GET',
+ description => "List all snapshots.",
+ permissions => {
+ check => ['perm', '/vms/{vmid}', [ 'VM.Audit' ]],
+ },
+ proxyto => 'node',
+ protected => 1, # qemu pid files are only readable by root
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ vmid => get_standard_option('pve-vmid'),
+ node => get_standard_option('pve-node'),
+ },
+ },
+ returns => {
+ type => 'array',
+ items => {
+ type => "object",
+ properties => {},
+ },
+ links => [ { rel => 'child', href => "{name}" } ],
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $vmid = $param->{vmid};
+
+ my $conf = PVE::QemuServer::load_config($vmid);
+ my $snaphash = $conf->{snapshots} || {};
+
+ my $res = [];
+
+ foreach my $name (keys %$snaphash) {
+ my $d = $snaphash->{$name};
+ my $item = {
+ name => $name,
+ snaptime => $d->{snaptime} || 0,
+ vmstate => $d->{vmstate} ? 1 : 0,
+ description => $d->{description} || '',
+ };
+ $item->{parent} = $d->{parent} if $d->{parent};
+ $item->{snapstate} = $d->{snapstate} if $d->{snapstate};
+ push @$res, $item;
+ }
+
+ my $running = PVE::QemuServer::check_running($vmid, 1) ? 1 : 0;
+ my $current = { name => 'current', digest => $conf->{digest}, running => $running };
+ $current->{parent} = $conf->{parent} if $conf->{parent};
+
+ push @$res, $current;
+
+ return $res;
+ }});
+
+__PACKAGE__->register_method({
+ name => 'snapshot',
+ path => '{vmid}/snapshot',
+ method => 'POST',
+ protected => 1,
+ proxyto => 'node',
+ description => "Snapshot a VM.",
+ permissions => {
+ check => ['perm', '/vms/{vmid}', [ 'VM.Snapshot' ]],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid'),
+ snapname => get_standard_option('pve-snapshot-name'),
+ vmstate => {
+ optional => 1,
+ type => 'boolean',
+ description => "Save the vmstate",
+ },
+ freezefs => {
+ optional => 1,
+ type => 'boolean',
+ description => "Freeze the filesystem",
+ },
+ description => {
+ optional => 1,
+ type => 'string',
+ description => "A textual description or comment.",
+ },
+ },
+ },
+ returns => {
+ type => 'string',
+ description => "the task ID.",
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+
+ my $authuser = $rpcenv->get_user();
+
+ my $node = extract_param($param, 'node');
+
+ my $vmid = extract_param($param, 'vmid');
+
+ my $snapname = extract_param($param, 'snapname');
+
+ die "unable to use snapshot name 'current' (reserved name)\n"
+ if $snapname eq 'current';
+
+ my $realcmd = sub {
+ PVE::Cluster::log_msg('info', $authuser, "snapshot VM $vmid: $snapname");
+ PVE::QemuServer::snapshot_create($vmid, $snapname, $param->{vmstate},
+ $param->{freezefs}, $param->{description});
+ };
+
+ return $rpcenv->fork_worker('qmsnapshot', $vmid, $authuser, $realcmd);
+ }});
+
+__PACKAGE__->register_method({
+ name => 'snapshot_cmd_idx',
+ path => '{vmid}/snapshot/{snapname}',
+ description => '',
+ method => 'GET',
+ permissions => {
+ user => 'all',
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ vmid => get_standard_option('pve-vmid'),
+ node => get_standard_option('pve-node'),
+ snapname => get_standard_option('pve-snapshot-name'),
+ },
+ },
+ returns => {
+ type => 'array',
+ items => {
+ type => "object",
+ properties => {},
+ },
+ links => [ { rel => 'child', href => "{cmd}" } ],
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $res = [];
+
+ push @$res, { cmd => 'rollback' };
+ push @$res, { cmd => 'config' };
+
+ return $res;
+ }});
+
+__PACKAGE__->register_method({
+ name => 'update_snapshot_config',
+ path => '{vmid}/snapshot/{snapname}/config',
+ method => 'PUT',
+ protected => 1,
+ proxyto => 'node',
+ description => "Update snapshot metadata.",
+ permissions => {
+ check => ['perm', '/vms/{vmid}', [ 'VM.Snapshot' ]],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid'),
+ snapname => get_standard_option('pve-snapshot-name'),
+ description => {
+ optional => 1,
+ type => 'string',
+ description => "A textual description or comment.",
+ },
+ },
+ },
+ returns => { type => 'null' },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+
+ my $authuser = $rpcenv->get_user();
+
+ my $vmid = extract_param($param, 'vmid');
+
+ my $snapname = extract_param($param, 'snapname');
+
+ return undef if !defined($param->{description});
+
+ my $updatefn = sub {
+
+ my $conf = PVE::QemuServer::load_config($vmid);
+
+ PVE::QemuServer::check_lock($conf);
+
+ my $snap = $conf->{snapshots}->{$snapname};
+
+ die "snapshot '$snapname' does not exist\n" if !defined($snap);
+
+ $snap->{description} = $param->{description} if defined($param->{description});
+
+ PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
+ };
+
+ PVE::QemuServer::lock_config($vmid, $updatefn);
+
+ return undef;
+ }});
+
+__PACKAGE__->register_method({
+ name => 'get_snapshot_config',
+ path => '{vmid}/snapshot/{snapname}/config',
+ method => 'GET',
+ proxyto => 'node',
+ description => "Get snapshot configuration",
+ permissions => {
+ check => ['perm', '/vms/{vmid}', [ 'VM.Snapshot' ]],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid'),
+ snapname => get_standard_option('pve-snapshot-name'),
+ },
+ },
+ returns => { type => "object" },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+
+ my $authuser = $rpcenv->get_user();
+
+ my $vmid = extract_param($param, 'vmid');
+
+ my $snapname = extract_param($param, 'snapname');
+
+ my $conf = PVE::QemuServer::load_config($vmid);
+
+ my $snap = $conf->{snapshots}->{$snapname};
+
+ die "snapshot '$snapname' does not exist\n" if !defined($snap);
+
+ return $snap;
+ }});
+
+__PACKAGE__->register_method({
+ name => 'rollback',
+ path => '{vmid}/snapshot/{snapname}/rollback',
+ method => 'POST',
+ protected => 1,
+ proxyto => 'node',
+ description => "Rollback VM state to specified snapshot.",
+ permissions => {
+ check => ['perm', '/vms/{vmid}', [ 'VM.Snapshot' ]],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid'),
+ snapname => get_standard_option('pve-snapshot-name'),
+ },
+ },
+ returns => {
+ type => 'string',
+ description => "the task ID.",
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+
+ my $authuser = $rpcenv->get_user();
+
+ my $node = extract_param($param, 'node');
+
+ my $vmid = extract_param($param, 'vmid');
+
+ my $snapname = extract_param($param, 'snapname');
+
+ my $realcmd = sub {
+ PVE::Cluster::log_msg('info', $authuser, "rollback snapshot VM $vmid: $snapname");
+ PVE::QemuServer::snapshot_rollback($vmid, $snapname);
+ };
+
+ return $rpcenv->fork_worker('qmrollback', $vmid, $authuser, $realcmd);
+ }});
+
+__PACKAGE__->register_method({
+ name => 'delsnapshot',
+ path => '{vmid}/snapshot/{snapname}',
+ method => 'DELETE',
+ protected => 1,
+ proxyto => 'node',
+ description => "Delete a VM snapshot.",
+ permissions => {
+ check => ['perm', '/vms/{vmid}', [ 'VM.Snapshot' ]],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid'),
+ snapname => get_standard_option('pve-snapshot-name'),
+ force => {
+ optional => 1,
+ type => 'boolean',
+ description => "For removal from config file, even if removing disk snapshots fails.",
+ },
+ },
+ },
+ returns => {
+ type => 'string',
+ description => "the task ID.",
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+
+ my $authuser = $rpcenv->get_user();
+
+ my $node = extract_param($param, 'node');
+
+ my $vmid = extract_param($param, 'vmid');
+
+ my $snapname = extract_param($param, 'snapname');
+
+ my $realcmd = sub {
+ PVE::Cluster::log_msg('info', $authuser, "delete snapshot VM $vmid: $snapname");
+ PVE::QemuServer::snapshot_delete($vmid, $snapname, $param->{force});
+ };
+
+ return $rpcenv->fork_worker('qmdelsnapshot', $vmid, $authuser, $realcmd);
+ }});
+
+__PACKAGE__->register_method({
+ name => 'template',
+ path => '{vmid}/template',
+ method => 'POST',
+ protected => 1,
+ proxyto => 'node',
+ description => "Create a Template.",
+ permissions => {
+ description => "You need 'VM.Allocate' permissions on /vms/{vmid}",
+ check => [ 'perm', '/vms/{vmid}', ['VM.Allocate']],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid'),
+ disk => {
+ optional => 1,
+ type => 'string',
+ description => "If you want to convert only 1 disk to base image.",
+ enum => [PVE::QemuServer::disknames()],
+ },
+
+ },
+ },
+ returns => { type => 'null'},
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+
+ my $authuser = $rpcenv->get_user();
+
+ my $node = extract_param($param, 'node');
+
+ my $vmid = extract_param($param, 'vmid');
+
+ my $disk = extract_param($param, 'disk');
+
+ my $updatefn = sub {
+
+ my $conf = PVE::QemuServer::load_config($vmid);
+
+ PVE::QemuServer::check_lock($conf);
+
+ die "unable to create template, because VM contains snapshots\n"
+ if $conf->{snapshots} && scalar(keys %{$conf->{snapshots}});
+
+ die "you can't convert a template to a template\n"
+ if PVE::QemuServer::is_template($conf) && !$disk;
+
+ die "you can't convert a VM to template if VM is running\n"
+ if PVE::QemuServer::check_running($vmid);
+
+ my $realcmd = sub {
+ PVE::QemuServer::template_create($vmid, $conf, $disk);
+ };
+
+ $conf->{template} = 1;
+ PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
+
+ return $rpcenv->fork_worker('qmtemplate', $vmid, $authuser, $realcmd);
+ };
+
+ PVE::QemuServer::lock_config($vmid, $updatefn);
+ return undef;
+ }});
+
1;