use warnings;
use Cwd 'abs_path';
use Net::SSLeay;
+use UUID;
use PVE::Cluster qw (cfs_read_file cfs_write_file);;
use PVE::SafeSyslog;
use PVE::Storage;
use PVE::JSONSchema qw(get_standard_option);
use PVE::RESTHandler;
+use PVE::QemuConfig;
use PVE::QemuServer;
use PVE::QemuMigrate;
use PVE::RPCEnvironment;
use PVE::AccessControl;
use PVE::INotify;
use PVE::Network;
+use PVE::Firewall;
+use PVE::API2::Firewall::VM;
+use PVE::HA::Env::PVE2;
+use PVE::HA::Config;
use Data::Dumper; # fixme: remove
}
};
-
my $check_storage_access = sub {
my ($rpcenv, $authuser, $storecfg, $vmid, $settings, $default_storage) = @_;
foreach my $opt (@$key_list) {
# disk checks need to be done somewhere else
- next if PVE::QemuServer::valid_drivename($opt);
+ next if PVE::QemuServer::is_valid_drivename($opt);
if ($opt eq 'sockets' || $opt eq 'cores' ||
- $opt eq 'cpu' || $opt eq 'smp' ||
+ $opt eq 'cpu' || $opt eq 'smp' || $opt eq 'vcpus' ||
$opt eq 'cpulimit' || $opt eq 'cpuunits') {
$rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.CPU']);
- } elsif ($opt eq 'boot' || $opt eq 'bootdisk') {
- $rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.Disk']);
} elsif ($opt eq 'memory' || $opt eq 'balloon' || $opt eq 'shares') {
$rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.Memory']);
} elsif ($opt eq 'args' || $opt eq 'lock') {
die "only root can set '$opt' config\n";
} elsif ($opt eq 'cpu' || $opt eq 'kvm' || $opt eq 'acpi' || $opt eq 'machine' ||
- $opt eq 'vga' || $opt eq 'watchdog' || $opt eq 'tablet') {
+ $opt eq 'vga' || $opt eq 'watchdog' || $opt eq 'tablet' || $opt eq 'smbios1') {
$rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.HWType']);
} elsif ($opt =~ m/^net\d+$/) {
$rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.Network']);
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
+ full => {
+ type => 'boolean',
+ optional => 1,
+ description => "Determine the full status of active VMs.",
+ },
},
},
returns => {
my $rpcenv = PVE::RPCEnvironment::get();
my $authuser = $rpcenv->get_user();
- my $vmstatus = PVE::QemuServer::vmstatus();
+ my $vmstatus = PVE::QemuServer::vmstatus(undef, $param->{full});
my $res = [];
foreach my $vmid (keys %$vmstatus) {
next if !$rpcenv->check($authuser, "/vms/$vmid", [ 'VM.Audit' ], 1);
my $data = $vmstatus->{$vmid};
- $data->{vmid} = $vmid;
+ $data->{vmid} = int($vmid);
push @$res, $data;
}
properties => PVE::QemuServer::json_config_properties(
{
node => get_standard_option('pve-node'),
- vmid => get_standard_option('pve-vmid'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::Cluster::complete_next_vmid }),
archive => {
description => "The backup file.",
type => 'string',
optional => 1,
maxLength => 255,
+ completion => \&PVE::QemuServer::complete_backup_archives,
},
storage => get_standard_option('pve-storage-id', {
description => "Default storage.",
optional => 1,
+ completion => \&PVE::QemuServer::complete_storage,
}),
force => {
optional => 1,
my $pool = extract_param($param, 'pool');
- my $filename = PVE::QemuServer::config_file($vmid);
+ my $filename = PVE::QemuConfig->config_file($vmid);
my $storecfg = PVE::Storage::config();
&$check_vm_modify_config_perm($rpcenv, $authuser, $vmid, $pool, [ keys %$param]);
foreach my $opt (keys %$param) {
- if (PVE::QemuServer::valid_drivename($opt)) {
+ if (PVE::QemuServer::is_valid_drivename($opt)) {
my $drive = PVE::QemuServer::parse_drive($opt, $param->{$opt});
raise_param_exc({ $opt => "unable to parse drive options" }) if !$drive;
}
my $restorefn = sub {
+ my $vmlist = PVE::Cluster::get_vmlist();
+ if ($vmlist->{ids}->{$vmid}) {
+ my $current_node = $vmlist->{ids}->{$vmid}->{node};
+ if ($current_node eq $node) {
+ my $conf = PVE::QemuConfig->load_config($vmid);
+
+ PVE::QemuConfig->check_protection($conf, "unable to restore VM $vmid");
- # fixme: this test does not work if VM exists on other node!
- if (-f $filename) {
- die "unable to restore vm $vmid: config file already exists\n"
- if !$force;
+ die "unable to restore vm $vmid - config file already exists\n"
+ if !$force;
- die "unable to restore vm $vmid: vm is running\n"
- if PVE::QemuServer::check_running($vmid);
+ die "unable to restore vm $vmid - vm is running\n"
+ if PVE::QemuServer::check_running($vmid);
+ } else {
+ die "unable to restore vm $vmid - already existing on cluster node '$current_node'\n";
+ }
}
my $realcmd = sub {
my $createfn = sub {
# test after locking
- die "unable to create vm $vmid: config file already exists\n"
- if -f $filename;
+ PVE::Cluster::check_vmid_unused($vmid);
my $realcmd = sub {
$vollist = &$create_disks($rpcenv, $authuser, $conf, $storecfg, $vmid, $pool, $param, $storage);
# try to be smart about bootdisk
- my @disks = PVE::QemuServer::disknames();
+ my @disks = PVE::QemuServer::valid_drive_names();
my $firstdisk;
foreach my $ds (reverse @disks) {
next if !$conf->{$ds};
$conf->{bootdisk} = $firstdisk;
}
- PVE::QemuServer::update_config_nolock($vmid, $conf);
+ # auto generate uuid if user did not specify smbios1 option
+ if (!$conf->{smbios1}) {
+ my ($uuid, $uuid_str);
+ UUID::generate($uuid);
+ UUID::unparse($uuid, $uuid_str);
+ $conf->{smbios1} = "uuid=$uuid_str";
+ }
+
+ PVE::QemuConfig->write_config($vmid, $conf);
};
my $err = $@;
return $rpcenv->fork_worker('qmcreate', $vmid, $authuser, $realcmd);
};
- return PVE::QemuServer::lock_config_full($vmid, 1, $archive ? $restorefn : $createfn);
+ return PVE::QemuConfig->lock_config_full($vmid, 1, $archive ? $restorefn : $createfn);
}});
__PACKAGE__->register_method({
my $res = [
{ subdir => 'config' },
+ { subdir => 'pending' },
{ subdir => 'status' },
{ subdir => 'unlink' },
{ subdir => 'vncproxy' },
{ subdir => 'monitor' },
{ subdir => 'snapshot' },
{ subdir => 'spiceproxy' },
+ { subdir => 'sendkey' },
+ { subdir => 'firewall' },
];
return $res;
}});
+__PACKAGE__->register_method ({
+ subclass => "PVE::API2::Firewall::VM",
+ path => '{vmid}/firewall',
+});
+
__PACKAGE__->register_method({
name => 'rrd',
path => '{vmid}/rrd',
path => '{vmid}/config',
method => 'GET',
proxyto => 'node',
- description => "Get virtual machine configuration.",
+ description => "Get current virtual machine configuration. This does not include pending configuration changes (see 'pending' API).",
permissions => {
check => ['perm', '/vms/{vmid}', [ 'VM.Audit' ]],
},
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
- vmid => get_standard_option('pve-vmid'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
+ current => {
+ description => "Get current values (instead of pending values).",
+ optional => 1,
+ default => 0,
+ type => 'boolean',
+ },
},
},
returns => {
code => sub {
my ($param) = @_;
- my $conf = PVE::QemuServer::load_config($param->{vmid});
+ my $conf = PVE::QemuConfig->load_config($param->{vmid});
delete $conf->{snapshots};
- return $conf;
- }});
-
-my $vm_is_volid_owner = sub {
- my ($storecfg, $vmid, $volid) =@_;
-
- if ($volid !~ m|^/|) {
- my ($path, $owner);
- eval { ($path, $owner) = PVE::Storage::path($storecfg, $volid); };
- if ($owner && ($owner == $vmid)) {
- return 1;
- }
- }
-
- return undef;
-};
-
-my $test_deallocate_drive = sub {
- my ($storecfg, $vmid, $key, $drive, $force) = @_;
-
- if (!PVE::QemuServer::drive_is_cdrom($drive)) {
- my $volid = $drive->{file};
- if (&$vm_is_volid_owner($storecfg, $vmid, $volid)) {
- if ($force || $key =~ m/^unused/) {
- my $sid = PVE::Storage::parse_volume_id($volid);
- return $sid;
+ if (!$param->{current}) {
+ foreach my $opt (keys %{$conf->{pending}}) {
+ next if $opt eq 'delete';
+ my $value = $conf->{pending}->{$opt};
+ next if ref($value); # just to be sure
+ $conf->{$opt} = $value;
}
- }
- }
-
- return undef;
-};
-
-my $delete_drive = sub {
- my ($conf, $storecfg, $vmid, $key, $drive, $force) = @_;
-
- if (!PVE::QemuServer::drive_is_cdrom($drive)) {
- my $volid = $drive->{file};
-
- if (&$vm_is_volid_owner($storecfg, $vmid, $volid)) {
- if ($force || $key =~ m/^unused/) {
- eval {
- # check if the disk is really unused
- my $used_paths = PVE::QemuServer::get_used_paths($vmid, $storecfg, $conf, 1, $key);
- my $path = PVE::Storage::path($storecfg, $volid);
-
- die "unable to delete '$volid' - volume is still in use (snapshot?)\n"
- if $used_paths->{$path};
-
- PVE::Storage::vdisk_free($storecfg, $volid);
- };
- die $@ if $@;
- } else {
- PVE::QemuServer::add_unused_volume($conf, $volid, $vmid);
+ my $pending_delete_hash = PVE::QemuServer::split_flagged_list($conf->{pending}->{delete});
+ foreach my $opt (keys %$pending_delete_hash) {
+ delete $conf->{$opt} if $conf->{$opt};
}
}
- }
-
- delete $conf->{$key};
-};
-
-my $vmconfig_delete_option = sub {
- my ($rpcenv, $authuser, $conf, $storecfg, $vmid, $opt, $force) = @_;
-
- return if !defined($conf->{$opt});
-
- my $isDisk = PVE::QemuServer::valid_drivename($opt)|| ($opt =~ m/^unused/);
- if ($isDisk) {
- $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.Disk']);
+ delete $conf->{pending};
- my $drive = PVE::QemuServer::parse_drive($opt, $conf->{$opt});
- if (my $sid = &$test_deallocate_drive($storecfg, $vmid, $opt, $drive, $force)) {
- $rpcenv->check($authuser, "/storage/$sid", ['Datastore.AllocateSpace']);
- }
- }
-
- my $unplugwarning = "";
- if ($conf->{ostype} && $conf->{ostype} eq 'l26') {
- $unplugwarning = "<br>verify that you have acpiphp && pci_hotplug modules loaded in your guest VM";
- } elsif ($conf->{ostype} && $conf->{ostype} eq 'l24') {
- $unplugwarning = "<br>kernel 2.4 don't support hotplug, please disable hotplug in options";
- } elsif (!$conf->{ostype} || ($conf->{ostype} && $conf->{ostype} eq 'other')) {
- $unplugwarning = "<br>verify that your guest support acpi hotplug";
- }
-
- if ($opt eq 'tablet') {
- PVE::QemuServer::vm_deviceplug(undef, $conf, $vmid, $opt);
- } else {
- die "error hot-unplug $opt $unplugwarning" if !PVE::QemuServer::vm_deviceunplug($vmid, $conf, $opt);
- }
-
- if ($isDisk) {
- my $drive = PVE::QemuServer::parse_drive($opt, $conf->{$opt});
- &$delete_drive($conf, $storecfg, $vmid, $opt, $drive, $force);
- } else {
- delete $conf->{$opt};
- }
-
- PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
-};
-
-my $safe_num_ne = sub {
- my ($a, $b) = @_;
-
- return 0 if !defined($a) && !defined($b);
- return 1 if !defined($a);
- return 1 if !defined($b);
-
- return $a != $b;
-};
-
-my $vmconfig_update_disk = sub {
- my ($rpcenv, $authuser, $conf, $storecfg, $vmid, $opt, $value, $force) = @_;
-
- my $drive = PVE::QemuServer::parse_drive($opt, $value);
-
- if (PVE::QemuServer::drive_is_cdrom($drive)) { #cdrom
- $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.CDROM']);
- } else {
- $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.Disk']);
- }
-
- if ($conf->{$opt}) {
+ return $conf;
+ }});
- if (my $old_drive = PVE::QemuServer::parse_drive($opt, $conf->{$opt})) {
+__PACKAGE__->register_method({
+ name => 'vm_pending',
+ path => '{vmid}/pending',
+ method => 'GET',
+ proxyto => 'node',
+ description => "Get virtual machine configuration, including pending changes.",
+ permissions => {
+ check => ['perm', '/vms/{vmid}', [ 'VM.Audit' ]],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
+ },
+ },
+ returns => {
+ type => "array",
+ items => {
+ type => "object",
+ properties => {
+ key => {
+ description => "Configuration option name.",
+ type => 'string',
+ },
+ value => {
+ description => "Current value.",
+ type => 'string',
+ optional => 1,
+ },
+ pending => {
+ description => "Pending value.",
+ type => 'string',
+ optional => 1,
+ },
+ delete => {
+ description => "Indicates a pending delete request if present and not 0. " .
+ "The value 2 indicates a force-delete request.",
+ type => 'integer',
+ minimum => 0,
+ maximum => 2,
+ optional => 1,
+ },
+ },
+ },
+ },
+ code => sub {
+ my ($param) = @_;
- my $media = $drive->{media} || 'disk';
- my $oldmedia = $old_drive->{media} || 'disk';
- die "unable to change media type\n" if $media ne $oldmedia;
+ my $conf = PVE::QemuConfig->load_config($param->{vmid});
- if (!PVE::QemuServer::drive_is_cdrom($old_drive) &&
- ($drive->{file} ne $old_drive->{file})) { # delete old disks
+ my $pending_delete_hash = PVE::QemuServer::split_flagged_list($conf->{pending}->{delete});
- &$vmconfig_delete_option($rpcenv, $authuser, $conf, $storecfg, $vmid, $opt, $force);
- $conf = PVE::QemuServer::load_config($vmid); # update/reload
- }
+ my $res = [];
- if(&$safe_num_ne($drive->{mbps}, $old_drive->{mbps}) ||
- &$safe_num_ne($drive->{mbps_rd}, $old_drive->{mbps_rd}) ||
- &$safe_num_ne($drive->{mbps_wr}, $old_drive->{mbps_wr}) ||
- &$safe_num_ne($drive->{iops}, $old_drive->{iops}) ||
- &$safe_num_ne($drive->{iops_rd}, $old_drive->{iops_rd}) ||
- &$safe_num_ne($drive->{iops_wr}, $old_drive->{iops_wr})) {
- PVE::QemuServer::qemu_block_set_io_throttle($vmid,"drive-$opt",
- ($drive->{mbps} || 0)*1024*1024,
- ($drive->{mbps_rd} || 0)*1024*1024,
- ($drive->{mbps_wr} || 0)*1024*1024,
- $drive->{iops} || 0,
- $drive->{iops_rd} || 0,
- $drive->{iops_wr} || 0)
- if !PVE::QemuServer::drive_is_cdrom($drive);
- }
+ foreach my $opt (keys %$conf) {
+ next if ref($conf->{$opt});
+ my $item = { key => $opt };
+ $item->{value} = $conf->{$opt} if defined($conf->{$opt});
+ $item->{pending} = $conf->{pending}->{$opt} if defined($conf->{pending}->{$opt});
+ $item->{delete} = ($pending_delete_hash->{$opt} ? 2 : 1) if exists $pending_delete_hash->{$opt};
+ push @$res, $item;
}
- }
- &$create_disks($rpcenv, $authuser, $conf, $storecfg, $vmid, undef, {$opt => $value});
- PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
-
- $conf = PVE::QemuServer::load_config($vmid); # update/reload
- $drive = PVE::QemuServer::parse_drive($opt, $conf->{$opt});
-
- if (PVE::QemuServer::drive_is_cdrom($drive)) { # cdrom
-
- if (PVE::QemuServer::check_running($vmid)) {
- if ($drive->{file} eq 'none') {
- PVE::QemuServer::vm_mon_cmd($vmid, "eject",force => JSON::true,device => "drive-$opt");
- } else {
- my $path = PVE::QemuServer::get_iso_path($storecfg, $vmid, $drive->{file});
- PVE::QemuServer::vm_mon_cmd($vmid, "eject",force => JSON::true,device => "drive-$opt"); #force eject if locked
- PVE::QemuServer::vm_mon_cmd($vmid, "change",device => "drive-$opt",target => "$path") if $path;
- }
+ foreach my $opt (keys %{$conf->{pending}}) {
+ next if $opt eq 'delete';
+ next if ref($conf->{pending}->{$opt}); # just to be sure
+ next if defined($conf->{$opt});
+ my $item = { key => $opt };
+ $item->{pending} = $conf->{pending}->{$opt};
+ push @$res, $item;
}
- } else { # hotplug new disks
-
- die "error hotplug $opt" if !PVE::QemuServer::vm_deviceplug($storecfg, $conf, $vmid, $opt, $drive);
- }
-};
-
-my $vmconfig_update_net = sub {
- my ($rpcenv, $authuser, $conf, $storecfg, $vmid, $opt, $value) = @_;
-
- if ($conf->{$opt} && PVE::QemuServer::check_running($vmid)) {
- my $oldnet = PVE::QemuServer::parse_net($conf->{$opt});
- my $newnet = PVE::QemuServer::parse_net($value);
-
- if($oldnet->{model} ne $newnet->{model}){
- #if model change, we try to hot-unplug
- die "error hot-unplug $opt for update" if !PVE::QemuServer::vm_deviceunplug($vmid, $conf, $opt);
- }else{
-
- if($newnet->{bridge} && $oldnet->{bridge}){
- my $iface = "tap".$vmid."i".$1 if $opt =~ m/net(\d+)/;
-
- if($newnet->{rate} ne $oldnet->{rate}){
- PVE::Network::tap_rate_limit($iface, $newnet->{rate});
- }
-
- if(($newnet->{bridge} ne $oldnet->{bridge}) || ($newnet->{tag} ne $oldnet->{tag})){
- eval{PVE::Network::tap_unplug($iface, $oldnet->{bridge}, $oldnet->{tag});};
- PVE::Network::tap_plug($iface, $newnet->{bridge}, $newnet->{tag});
- }
-
- }else{
- #if bridge/nat mode change, we try to hot-unplug
- die "error hot-unplug $opt for update" if !PVE::QemuServer::vm_deviceunplug($vmid, $conf, $opt);
- }
+ while (my ($opt, $force) = each %$pending_delete_hash) {
+ next if $conf->{pending}->{$opt}; # just to be sure
+ next if $conf->{$opt};
+ my $item = { key => $opt, delete => ($force ? 2 : 1)};
+ push @$res, $item;
}
- }
- $conf->{$opt} = $value;
- PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
- $conf = PVE::QemuServer::load_config($vmid); # update/reload
-
- my $net = PVE::QemuServer::parse_net($conf->{$opt});
-
- die "error hotplug $opt" if !PVE::QemuServer::vm_deviceplug($storecfg, $conf, $vmid, $opt, $net);
-};
+ return $res;
+ }});
# POST/PUT {vmid}/config implementation
#
my $delete_str = extract_param($param, 'delete');
+ my $revert_str = extract_param($param, 'revert');
+
my $force = extract_param($param, 'force');
- die "no options specified\n" if !$delete_str && !scalar(keys %$param);
+ die "no options specified\n" if !$delete_str && !$revert_str && !scalar(keys %$param);
my $storecfg = PVE::Storage::config();
# now try to verify all parameters
+ my $revert = {};
+ foreach my $opt (PVE::Tools::split_list($revert_str)) {
+ if (!PVE::QemuServer::option_exists($opt)) {
+ raise_param_exc({ revert => "unknown option '$opt'" });
+ }
+
+ raise_param_exc({ delete => "you can't use '-$opt' and " .
+ "-revert $opt' at the same time" })
+ if defined($param->{$opt});
+
+ $revert->{$opt} = 1;
+ }
+
my @delete = ();
foreach my $opt (PVE::Tools::split_list($delete_str)) {
$opt = 'ide2' if $opt eq 'cdrom';
+
raise_param_exc({ delete => "you can't use '-$opt' and " .
"-delete $opt' at the same time" })
if defined($param->{$opt});
+ raise_param_exc({ revert => "you can't use '-delete $opt' and " .
+ "-revert $opt' at the same time" })
+ if $revert->{$opt};
+
if (!PVE::QemuServer::option_exists($opt)) {
raise_param_exc({ delete => "unknown option '$opt'" });
}
}
foreach my $opt (keys %$param) {
- if (PVE::QemuServer::valid_drivename($opt)) {
+ if (PVE::QemuServer::is_valid_drivename($opt)) {
# cleanup drive path
my $drive = PVE::QemuServer::parse_drive($opt, $param->{$opt});
PVE::QemuServer::cleanup_drive_path($opt, $storecfg, $drive);
my $updatefn = sub {
- my $conf = PVE::QemuServer::load_config($vmid);
+ my $conf = PVE::QemuConfig->load_config($vmid);
die "checksum missmatch (file change by other user?)\n"
if $digest && $digest ne $conf->{digest};
- PVE::QemuServer::check_lock($conf) if !$skiplock;
+ PVE::QemuConfig->check_lock($conf) if !$skiplock;
+
+ foreach my $opt (keys %$revert) {
+ if (defined($conf->{$opt})) {
+ $param->{$opt} = $conf->{$opt};
+ } elsif (defined($conf->{pending}->{$opt})) {
+ push @delete, $opt;
+ }
+ }
if ($param->{memory} || defined($param->{balloon})) {
- my $maxmem = $param->{memory} || $conf->{memory} || $defaults->{memory};
- my $balloon = defined($param->{balloon}) ? $param->{balloon} : $conf->{balloon};
+ my $maxmem = $param->{memory} || $conf->{pending}->{memory} || $conf->{memory} || $defaults->{memory};
+ my $balloon = defined($param->{balloon}) ? $param->{balloon} : $conf->{pending}->{balloon} || $conf->{balloon};
die "balloon value too large (must be smaller than assigned memory)\n"
if $balloon && $balloon > $maxmem;
print "update VM $vmid: " . join (' ', @paramarr) . "\n";
- foreach my $opt (@delete) { # delete
- $conf = PVE::QemuServer::load_config($vmid); # update/reload
- &$vmconfig_delete_option($rpcenv, $authuser, $conf, $storecfg, $vmid, $opt, $force);
- }
+ # write updates to pending section
- my $running = PVE::QemuServer::check_running($vmid);
+ my $modified = {}; # record what $option we modify
- foreach my $opt (keys %$param) { # add/change
+ foreach my $opt (@delete) {
+ $modified->{$opt} = 1;
+ $conf = PVE::QemuConfig->load_config($vmid); # update/reload
+ if ($opt =~ m/^unused/) {
+ my $drive = PVE::QemuServer::parse_drive($opt, $conf->{$opt});
+ PVE::QemuConfig->check_protection($conf, "can't remove unused disk '$drive->{file}'");
+ $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.Disk']);
+ if (PVE::QemuServer::try_deallocate_drive($storecfg, $vmid, $conf, $opt, $drive, $rpcenv, $authuser)) {
+ delete $conf->{$opt};
+ PVE::QemuConfig->write_config($vmid, $conf);
+ }
+ } elsif (PVE::QemuServer::is_valid_drivename($opt)) {
+ PVE::QemuConfig->check_protection($conf, "can't remove drive '$opt'");
+ $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.Disk']);
+ PVE::QemuServer::vmconfig_register_unused_drive($storecfg, $vmid, $conf, PVE::QemuServer::parse_drive($opt, $conf->{pending}->{$opt}))
+ if defined($conf->{pending}->{$opt});
+ PVE::QemuServer::vmconfig_delete_pending_option($conf, $opt, $force);
+ PVE::QemuConfig->write_config($vmid, $conf);
+ } else {
+ PVE::QemuServer::vmconfig_delete_pending_option($conf, $opt, $force);
+ PVE::QemuConfig->write_config($vmid, $conf);
+ }
+ }
- $conf = PVE::QemuServer::load_config($vmid); # update/reload
+ foreach my $opt (keys %$param) { # add/change
+ $modified->{$opt} = 1;
+ $conf = PVE::QemuConfig->load_config($vmid); # update/reload
+ next if defined($conf->{pending}->{$opt}) && ($param->{$opt} eq $conf->{pending}->{$opt}); # skip if nothing changed
- next if $conf->{$opt} && ($param->{$opt} eq $conf->{$opt}); # skip if nothing changed
+ if (PVE::QemuServer::is_valid_drivename($opt)) {
+ my $drive = PVE::QemuServer::parse_drive($opt, $param->{$opt});
+ if (PVE::QemuServer::drive_is_cdrom($drive)) { # CDROM
+ $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.CDROM']);
+ } else {
+ $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.Disk']);
+ }
+ PVE::QemuServer::vmconfig_register_unused_drive($storecfg, $vmid, $conf, PVE::QemuServer::parse_drive($opt, $conf->{pending}->{$opt}))
+ if defined($conf->{pending}->{$opt});
- if (PVE::QemuServer::valid_drivename($opt)) {
+ &$create_disks($rpcenv, $authuser, $conf->{pending}, $storecfg, $vmid, undef, {$opt => $param->{$opt}});
+ } else {
+ $conf->{pending}->{$opt} = $param->{$opt};
+ }
+ PVE::QemuServer::vmconfig_undelete_pending_option($conf, $opt);
+ PVE::QemuConfig->write_config($vmid, $conf);
+ }
- &$vmconfig_update_disk($rpcenv, $authuser, $conf, $storecfg, $vmid,
- $opt, $param->{$opt}, $force);
+ # remove pending changes when nothing changed
+ $conf = PVE::QemuConfig->load_config($vmid); # update/reload
+ my $changes = PVE::QemuServer::vmconfig_cleanup_pending($conf);
+ PVE::QemuConfig->write_config($vmid, $conf) if $changes;
- } elsif ($opt =~ m/^net(\d+)$/) { #nics
+ return if !scalar(keys %{$conf->{pending}});
- &$vmconfig_update_net($rpcenv, $authuser, $conf, $storecfg, $vmid,
- $opt, $param->{$opt});
+ my $running = PVE::QemuServer::check_running($vmid);
- } else {
+ # apply pending changes
- if($opt eq 'tablet' && $param->{$opt} == 1){
- PVE::QemuServer::vm_deviceplug(undef, $conf, $vmid, $opt);
- } elsif($opt eq 'tablet' && $param->{$opt} == 0){
- PVE::QemuServer::vm_deviceunplug($vmid, $conf, $opt);
- }
+ $conf = PVE::QemuConfig->load_config($vmid); # update/reload
- $conf->{$opt} = $param->{$opt};
- PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
- }
+ if ($running) {
+ my $errors = {};
+ PVE::QemuServer::vmconfig_hotplug_pending($vmid, $conf, $storecfg, $modified, $errors);
+ raise_param_exc($errors) if scalar(keys %$errors);
+ } else {
+ PVE::QemuServer::vmconfig_apply_pending($vmid, $conf, $storecfg, $running);
}
- # allow manual ballooning if shares is set to zero
- if ($running && defined($param->{balloon}) &&
- defined($conf->{shares}) && ($conf->{shares} == 0)) {
- my $balloon = $param->{'balloon'} || $conf->{memory} || $defaults->{memory};
- PVE::QemuServer::vm_mon_cmd($vmid, "balloon", value => $balloon*1024*1024);
- }
+ return;
};
if ($sync) {
}
};
- return PVE::QemuServer::lock_config($vmid, $updatefn);
+ return PVE::QemuConfig->lock_config($vmid, $updatefn);
};
my $vm_config_perm_list = [
description => "A list of settings you want to delete.",
optional => 1,
},
+ revert => {
+ type => 'string', format => 'pve-configid-list',
+ description => "Revert a pending change.",
+ optional => 1,
+ },
force => {
type => 'boolean',
description => $opt_force_description,
properties => PVE::QemuServer::json_config_properties(
{
node => get_standard_option('pve-node'),
- vmid => get_standard_option('pve-vmid'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
skiplock => get_standard_option('skiplock'),
delete => {
type => 'string', format => 'pve-configid-list',
description => "A list of settings you want to delete.",
optional => 1,
},
+ revert => {
+ type => 'string', format => 'pve-configid-list',
+ description => "Revert a pending change.",
+ optional => 1,
+ },
force => {
type => 'boolean',
description => $opt_force_description,
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
- vmid => get_standard_option('pve-vmid'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid_stopped }),
skiplock => get_standard_option('skiplock'),
},
},
if $skiplock && $authuser ne 'root@pam';
# test if VM exists
- my $conf = PVE::QemuServer::load_config($vmid);
+ my $conf = PVE::QemuConfig->load_config($vmid);
my $storecfg = PVE::Storage::config();
- my $delVMfromPoolFn = sub {
- my $usercfg = cfs_read_file("user.cfg");
- if (my $pool = $usercfg->{vms}->{$vmid}) {
- if (my $data = $usercfg->{pools}->{$pool}) {
- delete $data->{vms}->{$vmid};
- delete $usercfg->{vms}->{$vmid};
- cfs_write_file("user.cfg", $usercfg);
- }
- }
- };
+ PVE::QemuConfig->check_protection($conf, "can't remove VM $vmid");
+
+ die "unable to remove VM $vmid - used in HA resources\n"
+ if PVE::HA::Config::vm_is_ha_managed($vmid);
+
+ # early tests (repeat after locking)
+ die "VM $vmid is running - destroy failed\n"
+ if PVE::QemuServer::check_running($vmid);
my $realcmd = sub {
my $upid = shift;
PVE::QemuServer::vm_destroy($storecfg, $vmid, $skiplock);
- PVE::AccessControl::remove_vm_from_pool($vmid);
+ PVE::AccessControl::remove_vm_access($vmid);
+
+ PVE::Firewall::remove_vmfw_conf($vmid);
};
return $rpcenv->fork_worker('qmdestroy', $vmid, $authuser, $realcmd);
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
- vmid => get_standard_option('pve-vmid'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
idlist => {
type => 'string', format => 'pve-configid-list',
description => "A list of disk IDs you want to delete.",
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid'),
+ websocket => {
+ optional => 1,
+ type => 'boolean',
+ description => "starts websockify instead of vncproxy",
+ },
},
},
returns => {
my $vmid = $param->{vmid};
my $node = $param->{node};
+ my $websocket = $param->{websocket};
- my $conf = PVE::QemuServer::load_config($vmid, $node); # check if VM exists
+ my $conf = PVE::QemuConfig->load_config($vmid, $node); # check if VM exists
my $authpath = "/vms/$vmid";
$sslcert = PVE::Tools::file_get_contents("/etc/pve/pve-root-ca.pem", 8192)
if !$sslcert;
- my $port = PVE::Tools::next_vnc_port();
-
- my $remip;
+ my ($remip, $family);
my $remcmd = [];
if ($node ne 'localhost' && $node ne PVE::INotify::nodename()) {
- $remip = PVE::Cluster::remote_node_ip($node);
- # NOTE: kvm VNC traffic is already TLS encrypted
+ ($remip, $family) = PVE::Cluster::remote_node_ip($node);
+ # NOTE: kvm VNC traffic is already TLS encrypted or is known unsecure
$remcmd = ['/usr/bin/ssh', '-T', '-o', 'BatchMode=yes', $remip];
+ } else {
+ $family = PVE::Tools::get_host_address_family($node);
}
+ my $port = PVE::Tools::next_vnc_port($family);
+
my $timeout = 10;
my $realcmd = sub {
if ($conf->{vga} && ($conf->{vga} =~ m/^serial\d+$/)) {
+ die "Websocket mode is not supported in vga serial mode!" if $websocket;
+
my $termcmd = [ '/usr/sbin/qm', 'terminal', $vmid, '-iface', $conf->{vga} ];
#my $termcmd = "/usr/bin/qm terminal -iface $conf->{vga}";
$cmd = ['/usr/bin/vncterm', '-rfbport', $port,
'-perm', 'Sys.Console', '-c', @$remcmd, @$termcmd];
} else {
+ $ENV{LC_PVE_TICKET} = $ticket if $websocket; # set ticket with "qm vncproxy"
+
my $qmcmd = [@$remcmd, "/usr/sbin/qm", 'vncproxy', $vmid];
my $qmstr = join(' ', @$qmcmd);
# also redirect stderr (else we get RFB protocol errors)
- $cmd = ['/bin/nc', '-l', '-p', $port, '-w', $timeout, '-c', "$qmstr 2>/dev/null"];
+ $cmd = ['/bin/nc6', '-l', '-p', $port, '-w', $timeout, '-e', "$qmstr 2>/dev/null"];
}
PVE::Tools::run_command($cmd);
}});
__PACKAGE__->register_method({
- name => 'spiceproxy',
- path => '{vmid}/spiceproxy',
+ name => 'vncwebsocket',
+ path => '{vmid}/vncwebsocket',
method => 'GET',
- protected => 1,
- proxyto => 'node', # fixme: use direct connections or ssh tunnel?
permissions => {
+ description => "You also need to pass a valid ticket (vncticket).",
check => ['perm', '/vms/{vmid}', [ 'VM.Console' ]],
},
- description => "Returns a SPICE configuration to connect to the VM.",
+ description => "Opens a weksocket for VNC traffic.",
parameters => {
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid'),
- proxy => {
- description => "This can be used by the client to specify the proxy server. All nodes in a cluster runs 'spiceproxy', so it is up to the client to choose one. By default, we return the node where the VM is currently running. As resonable setting is to use same node you use to connect to the API (This is window.location.hostname for the JS GUI).",
- type => 'string', format => 'dns-name',
- optional => 1,
+ vncticket => {
+ description => "Ticket from previous call to vncproxy.",
+ type => 'string',
+ maxLength => 512,
+ },
+ port => {
+ description => "Port number returned by previous vncproxy call.",
+ type => 'integer',
+ minimum => 5900,
+ maximum => 5999,
},
},
},
returns => {
- description => "Returned values can be directly passed to the 'remote-viewer' application.",
- additionalProperties => 1,
+ type => "object",
properties => {
- type => { type => 'string' },
- password => { type => 'string' },
- proxy => { type => 'string' },
- host => { type => 'string' },
- 'tls-port' => { type => 'integer' },
+ port => { type => 'string' },
},
},
code => sub {
my $vmid = $param->{vmid};
my $node = $param->{node};
- my $proxy = $param->{proxy};
- my ($ticket, $proxyticket) = PVE::AccessControl::assemble_spice_ticket($authuser, $vmid, $node);
+ my $authpath = "/vms/$vmid";
- my $timeout = 10;
+ PVE::AccessControl::verify_vnc_ticket($param->{vncticket}, $authuser, $authpath);
- my $port = PVE::QemuServer::spice_port($vmid);
- PVE::QemuServer::vm_mon_cmd($vmid, "set_password", protocol => 'spice', password => $ticket);
- PVE::QemuServer::vm_mon_cmd($vmid, "expire_password", protocol => 'spice', time => "+30");
+ my $conf = PVE::QemuConfig->load_config($vmid, $node); # VM exists ?
- if (!$proxy) {
- my $host = `hostname -f` || PVE::INotify::nodename();
- chomp $host;
- $proxy = $host;
- }
+ # Note: VNC ports are acessible from outside, so we do not gain any
+ # security if we verify that $param->{port} belongs to VM $vmid. This
+ # check is done by verifying the VNC ticket (inside VNC protocol).
- my $filename = "/etc/pve/local/pve-ssl.pem";
- my $subject = PVE::QemuServer::read_x509_subject_spice($filename);
+ my $port = $param->{port};
- my $cacert = PVE::Tools::file_get_contents("/etc/pve/pve-root-ca.pem", 8192);
- $cacert =~ s/\n/\\n/g;
+ return { port => $port };
+ }});
- return {
- type => 'spice',
- title => "VM $vmid",
- host => $proxyticket, # this break tls hostname verification, so we need to use 'host-subject'
- proxy => "http://$proxy:3128",
- 'tls-port' => $port,
- 'host-subject' => $subject,
- ca => $cacert,
- password => $ticket,
- 'delete-this-file' => 1,
- };
+__PACKAGE__->register_method({
+ name => 'spiceproxy',
+ path => '{vmid}/spiceproxy',
+ method => 'POST',
+ protected => 1,
+ proxyto => 'node',
+ permissions => {
+ check => ['perm', '/vms/{vmid}', [ 'VM.Console' ]],
+ },
+ description => "Returns a SPICE configuration to connect to the VM.",
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid'),
+ proxy => get_standard_option('spice-proxy', { optional => 1 }),
+ },
+ },
+ returns => get_standard_option('remote-viewer-config'),
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+
+ my $authuser = $rpcenv->get_user();
+
+ my $vmid = $param->{vmid};
+ my $node = $param->{node};
+ my $proxy = $param->{proxy};
+
+ my $conf = PVE::QemuConfig->load_config($vmid, $node);
+ my $title = "VM $vmid";
+ $title .= " - ". $conf->{name} if $conf->{name};
+
+ my $port = PVE::QemuServer::spice_port($vmid);
+
+ my ($ticket, undef, $remote_viewer_config) =
+ PVE::AccessControl::remote_viewer_config($authuser, $vmid, $node, $proxy, $title, $port);
+
+ PVE::QemuServer::vm_mon_cmd($vmid, "set_password", protocol => 'spice', password => $ticket);
+ PVE::QemuServer::vm_mon_cmd($vmid, "expire_password", protocol => 'spice', time => "+30");
+
+ return $remote_viewer_config;
}});
__PACKAGE__->register_method({
my ($param) = @_;
# test if VM exists
- my $conf = PVE::QemuServer::load_config($param->{vmid});
+ my $conf = PVE::QemuConfig->load_config($param->{vmid});
my $res = [
{ subdir => 'current' },
return $res;
}});
-my $vm_is_ha_managed = sub {
- my ($vmid) = @_;
-
- my $cc = PVE::Cluster::cfs_read_file('cluster.conf');
- if (PVE::Cluster::cluster_conf_lookup_pvevm($cc, 0, $vmid, 1)) {
- return 1;
- }
- return 0;
-};
-
__PACKAGE__->register_method({
name => 'vm_status',
path => '{vmid}/status/current',
my ($param) = @_;
# test if VM exists
- my $conf = PVE::QemuServer::load_config($param->{vmid});
+ my $conf = PVE::QemuConfig->load_config($param->{vmid});
my $vmstatus = PVE::QemuServer::vmstatus($param->{vmid}, 1);
my $status = $vmstatus->{$param->{vmid}};
- $status->{ha} = &$vm_is_ha_managed($param->{vmid});
+ $status->{ha} = PVE::HA::Config::vm_is_ha_managed($param->{vmid});
$status->{spice} = 1 if PVE::QemuServer::vga_conf_has_spice($conf->{vga});
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
- vmid => get_standard_option('pve-vmid'),
+ vmid => get_standard_option('pve-vmid',
+ { completion => \&PVE::QemuServer::complete_vmid_stopped }),
skiplock => get_standard_option('skiplock'),
stateuri => get_standard_option('pve-qm-stateuri'),
migratedfrom => get_standard_option('pve-node',{ optional => 1 }),
}
}
+ PVE::Cluster::check_cfs_quorum();
+
my $storecfg = PVE::Storage::config();
- if (&$vm_is_ha_managed($vmid) && !$stateuri &&
+ if (PVE::HA::Config::vm_is_ha_managed($vmid) && !$stateuri &&
$rpcenv->{type} ne 'ha') {
my $hacmd = sub {
my $upid = shift;
- my $service = "pvevm:$vmid";
+ my $service = "vm:$vmid";
- my $cmd = ['clusvcadm', '-e', $service, '-m', $node];
+ my $cmd = ['ha-manager', 'enable', $service];
print "Executing HA start for VM $vmid\n";
method => 'POST',
protected => 1,
proxyto => 'node',
- description => "Stop virtual machine.",
+ description => "Stop virtual machine. The qemu process will exit immediately. This" .
+ "is akin to pulling the power plug of a running computer and may damage the VM data",
permissions => {
check => ['perm', '/vms/{vmid}', [ 'VM.PowerMgmt' ]],
},
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
- vmid => get_standard_option('pve-vmid'),
+ vmid => get_standard_option('pve-vmid',
+ { completion => \&PVE::QemuServer::complete_vmid_running }),
skiplock => get_standard_option('skiplock'),
- migratedfrom => get_standard_option('pve-node',{ optional => 1 }),
+ migratedfrom => get_standard_option('pve-node', { optional => 1 }),
timeout => {
description => "Wait maximal timeout seconds.",
type => 'integer',
my $storecfg = PVE::Storage::config();
- if (&$vm_is_ha_managed($vmid) && $rpcenv->{type} ne 'ha') {
+ if (PVE::HA::Config::vm_is_ha_managed($vmid) && ($rpcenv->{type} ne 'ha') && !defined($migratedfrom)) {
my $hacmd = sub {
my $upid = shift;
- my $service = "pvevm:$vmid";
+ my $service = "vm:$vmid";
- my $cmd = ['clusvcadm', '-d', $service];
+ my $cmd = ['ha-manager', 'disable', $service];
print "Executing HA stop for VM $vmid\n";
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
- vmid => get_standard_option('pve-vmid'),
+ vmid => get_standard_option('pve-vmid',
+ { completion => \&PVE::QemuServer::complete_vmid_running }),
skiplock => get_standard_option('skiplock'),
},
},
method => 'POST',
protected => 1,
proxyto => 'node',
- description => "Shutdown virtual machine.",
+ description => "Shutdown virtual machine. This is similar to pressing the power button on a physical machine." .
+ "This will send an ACPI event for the guest OS, which should then proceed to a clean shutdown.",
permissions => {
check => ['perm', '/vms/{vmid}', [ 'VM.PowerMgmt' ]],
},
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
- vmid => get_standard_option('pve-vmid'),
+ vmid => get_standard_option('pve-vmid',
+ { completion => \&PVE::QemuServer::complete_vmid_running }),
skiplock => get_standard_option('skiplock'),
timeout => {
description => "Wait maximal timeout seconds.",
my $storecfg = PVE::Storage::config();
+ my $shutdown = 1;
+
+ # if vm is paused, do not shutdown (but stop if forceStop = 1)
+ # otherwise, we will infer a shutdown command, but run into the timeout,
+ # then when the vm is resumed, it will instantly shutdown
+ #
+ # checking the qmp status here to get feedback to the gui/cli/api
+ # and the status query should not take too long
+ my $qmpstatus;
+ eval {
+ $qmpstatus = PVE::QemuServer::vm_qmp_command($vmid, { execute => "query-status" }, 0);
+ };
+ my $err = $@ if $@;
+
+ if (!$err && $qmpstatus->{status} eq "paused") {
+ if ($param->{forceStop}) {
+ warn "VM is paused - stop instead of shutdown\n";
+ $shutdown = 0;
+ } else {
+ die "VM is paused - cannot shutdown\n";
+ }
+ }
+
my $realcmd = sub {
my $upid = shift;
syslog('info', "shutdown VM $vmid: $upid\n");
PVE::QemuServer::vm_stop($storecfg, $vmid, $skiplock, 0, $param->{timeout},
- 1, $param->{forceStop}, $keepActive);
+ $shutdown, $param->{forceStop}, $keepActive);
return;
};
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
- vmid => get_standard_option('pve-vmid'),
+ vmid => get_standard_option('pve-vmid',
+ { completion => \&PVE::QemuServer::complete_vmid_running }),
skiplock => get_standard_option('skiplock'),
},
},
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
- vmid => get_standard_option('pve-vmid'),
+ vmid => get_standard_option('pve-vmid',
+ { completion => \&PVE::QemuServer::complete_vmid_running }),
skiplock => get_standard_option('skiplock'),
+ nocheck => { type => 'boolean', optional => 1 },
+
},
},
returns => {
raise_param_exc({ skiplock => "Only root may use this option." })
if $skiplock && $authuser ne 'root@pam';
- die "VM $vmid not running\n" if !PVE::QemuServer::check_running($vmid);
+ my $nocheck = extract_param($param, 'nocheck');
+
+ die "VM $vmid not running\n" if !PVE::QemuServer::check_running($vmid, $nocheck);
my $realcmd = sub {
my $upid = shift;
syslog('info', "resume VM $vmid: $upid\n");
- PVE::QemuServer::vm_resume($vmid, $skiplock);
+ PVE::QemuServer::vm_resume($vmid, $skiplock, $nocheck);
return;
};
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
- vmid => get_standard_option('pve-vmid'),
+ vmid => get_standard_option('pve-vmid',
+ { completion => \&PVE::QemuServer::complete_vmid_running }),
skiplock => get_standard_option('skiplock'),
key => {
description => "The key (qemu monitor encoding).",
my $running = PVE::QemuServer::check_running($vmid);
- my $conf = PVE::QemuServer::load_config($vmid);
+ my $conf = PVE::QemuConfig->load_config($vmid);
if($snapname){
my $snap = $conf->{snapshots}->{$snapname};
my $storecfg = PVE::Storage::config();
my $nodelist = PVE::QemuServer::shared_nodes($conf, $storecfg);
- my $hasFeature = PVE::QemuServer::has_feature($feature, $conf, $storecfg, $snapname, $running);
+ my $hasFeature = PVE::QemuConfig->has_feature($feature, $conf, $storecfg, $snapname, $running);
return {
hasFeature => $hasFeature,
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
- vmid => get_standard_option('pve-vmid'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
newid => get_standard_option('pve-vmid', { description => 'VMID for the clone.' }),
name => {
optional => 1,
description => "Add the new VM to the specified pool.",
},
snapname => get_standard_option('pve-snapshot-name', {
- requires => 'full',
optional => 1,
}),
storage => get_standard_option('pve-storage-id', {
# do all tests after lock
# we also try to do all tests before we fork the worker
- my $conf = PVE::QemuServer::load_config($vmid);
+ my $conf = PVE::QemuConfig->load_config($vmid);
- PVE::QemuServer::check_lock($conf);
+ PVE::QemuConfig->check_lock($conf);
my $verify_running = PVE::QemuServer::check_running($vmid) || 0;
die "can't clone VM to node '$target' (VM uses local storage)\n" if $target && !$sharedvm;
- my $conffile = PVE::QemuServer::config_file($newid);
+ my $conffile = PVE::QemuConfig->config_file($newid);
die "unable to create VM $newid: config file already exists\n"
if -f $conffile;
my $newconf = { lock => 'clone' };
my $drives = {};
+ my $fullclone = {};
my $vollist = [];
foreach my $opt (keys %$oldconf) {
next if $opt eq 'snapshots' || $opt eq 'parent' || $opt eq 'snaptime' ||
$opt eq 'vmstate' || $opt eq 'snapstate';
+ # no need to copy unused images, because VMID(owner) changes anyways
+ next if $opt =~ m/^unused\d+$/;
+
# always change MAC! address
if ($opt =~ m/^net(\d+)$/) {
my $net = PVE::QemuServer::parse_net($value);
$net->{macaddr} = PVE::Tools::random_ether_addr();
$newconf->{$opt} = PVE::QemuServer::print_net($net);
- } elsif (PVE::QemuServer::valid_drivename($opt)) {
+ } elsif (PVE::QemuServer::is_valid_drivename($opt)) {
my $drive = PVE::QemuServer::parse_drive($opt, $value);
die "unable to parse drive options for '$opt'\n" if !$drive;
if (PVE::QemuServer::drive_is_cdrom($drive)) {
$newconf->{$opt} = $value; # simply copy configuration
} else {
- if ($param->{full} || !PVE::Storage::volume_is_base($storecfg, $drive->{file})) {
+ if ($param->{full}) {
die "Full clone feature is not available"
if !PVE::Storage::volume_has_feature($storecfg, 'copy', $drive->{file}, $snapname, $running);
- $drive->{full} = 1;
+ $fullclone->{$opt} = 1;
+ } else {
+ # not full means clone instead of copy
+ die "Linked clone feature is not available"
+ if !PVE::Storage::volume_has_feature($storecfg, 'clone', $drive->{file}, $snapname, $running);
}
$drives->{$opt} = $drive;
push @$vollist, $drive->{file};
}
}
+ # auto generate a new uuid
+ my ($uuid, $uuid_str);
+ UUID::generate($uuid);
+ UUID::unparse($uuid, $uuid_str);
+ my $smbios1 = PVE::QemuServer::parse_smbios1($newconf->{smbios1} || '');
+ $smbios1->{uuid} = $uuid_str;
+ $newconf->{smbios1} = PVE::QemuServer::print_smbios1($smbios1);
+
delete $newconf->{template};
if ($param->{name}) {
eval {
local $SIG{INT} = $SIG{TERM} = $SIG{QUIT} = $SIG{HUP} = sub { die "interrupted by signal\n"; };
- PVE::Storage::activate_volumes($storecfg, $vollist);
+ PVE::Storage::activate_volumes($storecfg, $vollist, $snapname);
foreach my $opt (keys %$drives) {
my $drive = $drives->{$opt};
my $newdrive = PVE::QemuServer::clone_disk($storecfg, $vmid, $running, $opt, $drive, $snapname,
- $newid, $storage, $format, $drive->{full}, $newvollist);
+ $newid, $storage, $format, $fullclone->{$opt}, $newvollist);
$newconf->{$opt} = PVE::QemuServer::print_drive($vmid, $newdrive);
- PVE::QemuServer::update_config_nolock($newid, $newconf, 1);
+ PVE::QemuConfig->write_config($newid, $newconf);
}
delete $newconf->{lock};
- PVE::QemuServer::update_config_nolock($newid, $newconf, 1);
+ PVE::QemuConfig->write_config($newid, $newconf);
if ($target) {
# always deactivate volumes - avoid lvm LVs to be active on several nodes
- PVE::Storage::deactivate_volumes($storecfg, $vollist);
+ PVE::Storage::deactivate_volumes($storecfg, $vollist, $snapname) if !$running;
- my $newconffile = PVE::QemuServer::config_file($newid, $target);
+ my $newconffile = PVE::QemuConfig->config_file($newid, $target);
die "Failed to move config to node '$target' - rename failed: $!\n"
if !rename($conffile, $newconffile);
}
return;
};
+ PVE::Firewall::clone_vmfw_conf($vmid, $newid);
+
return $rpcenv->fork_worker('qmclone', $vmid, $authuser, $realcmd);
};
- return PVE::QemuServer::lock_config_mode($vmid, 1, $shared_lock, sub {
+ return PVE::QemuConfig->lock_config_mode($vmid, 1, $shared_lock, sub {
# Aquire exclusive lock lock for $newid
- return PVE::QemuServer::lock_config_full($newid, 1, $clonefn);
+ return PVE::QemuConfig->lock_config_full($newid, 1, $clonefn);
});
}});
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
- vmid => get_standard_option('pve-vmid'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
disk => {
type => 'string',
description => "The disk you want to move.",
- enum => [ PVE::QemuServer::disknames() ],
+ enum => [ PVE::QemuServer::valid_drive_names() ],
},
- storage => get_standard_option('pve-storage-id', { description => "Target Storage." }),
+ storage => get_standard_option('pve-storage-id', {
+ description => "Target storage.",
+ completion => \&PVE::QemuServer::complete_storage,
+ }),
'format' => {
type => 'string',
description => "Target Format.",
my $updatefn = sub {
- my $conf = PVE::QemuServer::load_config($vmid);
+ my $conf = PVE::QemuConfig->load_config($vmid);
die "checksum missmatch (file change by other user?)\n"
if $digest && $digest ne $conf->{digest};
$conf->{$disk} = PVE::QemuServer::print_drive($vmid, $newdrive);
- PVE::QemuServer::add_unused_volume($conf, $old_volid) if !$param->{delete};
+ PVE::QemuConfig->add_unused_volume($conf, $old_volid) if !$param->{delete};
+
+ PVE::QemuConfig->write_config($vmid, $conf);
- PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
+ eval {
+ # try to deactivate volumes - avoid lvm LVs to be active on several nodes
+ PVE::Storage::deactivate_volumes($storecfg, [ $newdrive->{file} ])
+ if !$running;
+ };
+ warn $@ if $@;
};
if (my $err = $@) {
}
if ($param->{delete}) {
- eval { PVE::Storage::vdisk_free($storecfg, $old_volid); };
- warn $@ if $@;
+ if (PVE::QemuServer::is_volume_in_use($storecfg, $conf, undef, $old_volid)) {
+ warn "volume $old_volid still has snapshots, can't delete it\n";
+ PVE::QemuConfig->add_unused_volume($conf, $old_volid);
+ PVE::QemuConfig->write_config($vmid, $conf);
+ } else {
+ eval { PVE::Storage::vdisk_free($storecfg, $old_volid); };
+ warn $@ if $@;
+ }
}
};
return $rpcenv->fork_worker('qmmove', $vmid, $authuser, $realcmd);
};
- return PVE::QemuServer::lock_config($vmid, $updatefn);
+ return PVE::QemuConfig->lock_config($vmid, $updatefn);
}});
__PACKAGE__->register_method({
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
- vmid => get_standard_option('pve-vmid'),
- target => get_standard_option('pve-node', { description => "Target node." }),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
+ target => get_standard_option('pve-node', {
+ description => "Target node.",
+ completion => \&PVE::Cluster::complete_migration_target,
+ }),
online => {
type => 'boolean',
description => "Use online/live migration.",
if $param->{force} && $authuser ne 'root@pam';
# test if VM exists
- my $conf = PVE::QemuServer::load_config($vmid);
+ my $conf = PVE::QemuConfig->load_config($vmid);
# try to detect errors early
- PVE::QemuServer::check_lock($conf);
+ PVE::QemuConfig->check_lock($conf);
if (PVE::QemuServer::check_running($vmid)) {
die "cant migrate running VM without --online\n"
my $storecfg = PVE::Storage::config();
PVE::QemuServer::check_storage_availability($storecfg, $conf, $target);
- if (&$vm_is_ha_managed($vmid) && $rpcenv->{type} ne 'ha') {
+ if (PVE::HA::Config::vm_is_ha_managed($vmid) && $rpcenv->{type} ne 'ha') {
my $hacmd = sub {
my $upid = shift;
- my $service = "pvevm:$vmid";
+ my $service = "vm:$vmid";
- my $cmd = ['clusvcadm', '-M', $service, '-m', $target];
+ my $cmd = ['ha-manager', 'migrate', $service, $target];
print "Executing HA migrate for VM $vmid to node $target\n";
my $vmid = $param->{vmid};
- my $conf = PVE::QemuServer::load_config ($vmid); # check if VM exists
+ my $conf = PVE::QemuConfig->load_config ($vmid); # check if VM exists
my $res = '';
eval {
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
- vmid => get_standard_option('pve-vmid'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
skiplock => get_standard_option('skiplock'),
disk => {
type => 'string',
description => "The disk you want to resize.",
- enum => [PVE::QemuServer::disknames()],
+ enum => [PVE::QemuServer::valid_drive_names()],
},
size => {
type => 'string',
my $updatefn = sub {
- my $conf = PVE::QemuServer::load_config($vmid);
+ my $conf = PVE::QemuConfig->load_config($vmid);
die "checksum missmatch (file change by other user?)\n"
if $digest && $digest ne $conf->{digest};
- PVE::QemuServer::check_lock($conf) if !$skiplock;
+ PVE::QemuConfig->check_lock($conf) if !$skiplock;
die "disk '$disk' does not exist\n" if !$conf->{$disk};
my $drive = PVE::QemuServer::parse_drive($disk, $conf->{$disk});
+ my (undef, undef, undef, undef, undef, undef, $format) =
+ PVE::Storage::parse_volname($storecfg, $drive->{file});
+
+ die "can't resize volume: $disk if snapshot exists\n"
+ if %{$conf->{snapshots}} && $format eq 'qcow2';
+
my $volid = $drive->{file};
die "disk '$disk' has no associated volume\n" if !$volid;
die "you can't resize a cdrom\n" if PVE::QemuServer::drive_is_cdrom($drive);
- die "you can't online resize a virtio windows bootdisk\n"
- if PVE::QemuServer::check_running($vmid) && $conf->{bootdisk} eq $disk && $conf->{ostype} =~ m/^w/ && $disk =~ m/^virtio/;
-
my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid);
$rpcenv->check($authuser, "/storage/$storeid", ['Datastore.AllocateSpace']);
$drive->{size} = $newsize;
$conf->{$disk} = PVE::QemuServer::print_drive($vmid, $drive);
- PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
+ PVE::QemuConfig->write_config($vmid, $conf);
};
- PVE::QemuServer::lock_config($vmid, $updatefn);
+ PVE::QemuConfig->lock_config($vmid, $updatefn);
return undef;
}});
my $vmid = $param->{vmid};
- my $conf = PVE::QemuServer::load_config($vmid);
+ my $conf = PVE::QemuConfig->load_config($vmid);
my $snaphash = $conf->{snapshots} || {};
my $res = [];
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
- vmid => get_standard_option('pve-vmid'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
snapname => get_standard_option('pve-snapshot-name'),
vmstate => {
optional => 1,
type => 'boolean',
description => "Save the vmstate",
},
- freezefs => {
- optional => 1,
- type => 'boolean',
- description => "Freeze the filesystem",
- },
description => {
optional => 1,
type => 'string',
my $realcmd = sub {
PVE::Cluster::log_msg('info', $authuser, "snapshot VM $vmid: $snapname");
- PVE::QemuServer::snapshot_create($vmid, $snapname, $param->{vmstate},
- $param->{freezefs}, $param->{description});
+ PVE::QemuConfig->snapshot_create($vmid, $snapname, $param->{vmstate},
+ $param->{description});
};
return $rpcenv->fork_worker('qmsnapshot', $vmid, $authuser, $realcmd);
my $updatefn = sub {
- my $conf = PVE::QemuServer::load_config($vmid);
+ my $conf = PVE::QemuConfig->load_config($vmid);
- PVE::QemuServer::check_lock($conf);
+ PVE::QemuConfig->check_lock($conf);
my $snap = $conf->{snapshots}->{$snapname};
$snap->{description} = $param->{description} if defined($param->{description});
- PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
+ PVE::QemuConfig->write_config($vmid, $conf);
};
- PVE::QemuServer::lock_config($vmid, $updatefn);
+ PVE::QemuConfig->lock_config($vmid, $updatefn);
return undef;
}});
my $snapname = extract_param($param, 'snapname');
- my $conf = PVE::QemuServer::load_config($vmid);
+ my $conf = PVE::QemuConfig->load_config($vmid);
my $snap = $conf->{snapshots}->{$snapname};
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
- vmid => get_standard_option('pve-vmid'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
snapname => get_standard_option('pve-snapshot-name'),
},
},
my $realcmd = sub {
PVE::Cluster::log_msg('info', $authuser, "rollback snapshot VM $vmid: $snapname");
- PVE::QemuServer::snapshot_rollback($vmid, $snapname);
+ PVE::QemuConfig->snapshot_rollback($vmid, $snapname);
};
return $rpcenv->fork_worker('qmrollback', $vmid, $authuser, $realcmd);
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
- vmid => get_standard_option('pve-vmid'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
snapname => get_standard_option('pve-snapshot-name'),
force => {
optional => 1,
my $realcmd = sub {
PVE::Cluster::log_msg('info', $authuser, "delete snapshot VM $vmid: $snapname");
- PVE::QemuServer::snapshot_delete($vmid, $snapname, $param->{force});
+ PVE::QemuConfig->snapshot_delete($vmid, $snapname, $param->{force});
};
return $rpcenv->fork_worker('qmdelsnapshot', $vmid, $authuser, $realcmd);
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
- vmid => get_standard_option('pve-vmid'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid_stopped }),
disk => {
optional => 1,
type => 'string',
description => "If you want to convert only 1 disk to base image.",
- enum => [PVE::QemuServer::disknames()],
+ enum => [PVE::QemuServer::valid_drive_names()],
},
},
my $updatefn = sub {
- my $conf = PVE::QemuServer::load_config($vmid);
+ my $conf = PVE::QemuConfig->load_config($vmid);
- PVE::QemuServer::check_lock($conf);
+ PVE::QemuConfig->check_lock($conf);
die "unable to create template, because VM contains snapshots\n"
if $conf->{snapshots} && scalar(keys %{$conf->{snapshots}});
die "you can't convert a template to a template\n"
- if PVE::QemuServer::is_template($conf) && !$disk;
+ if PVE::QemuConfig->is_template($conf) && !$disk;
die "you can't convert a VM to template if VM is running\n"
if PVE::QemuServer::check_running($vmid);
};
$conf->{template} = 1;
- PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
+ PVE::QemuConfig->write_config($vmid, $conf);
return $rpcenv->fork_worker('qmtemplate', $vmid, $authuser, $realcmd);
};
- PVE::QemuServer::lock_config($vmid, $updatefn);
+ PVE::QemuConfig->lock_config($vmid, $updatefn);
return undef;
}});