use strict;
use warnings;
use Cwd 'abs_path';
+use Net::SSLeay;
+use UUID;
+use POSIX;
+use IO::Socket::IP;
use PVE::Cluster qw (cfs_read_file cfs_write_file);;
use PVE::SafeSyslog;
use PVE::Storage;
use PVE::JSONSchema qw(get_standard_option);
use PVE::RESTHandler;
+use PVE::ReplicationConfig;
+use PVE::GuestHelpers;
+use PVE::QemuConfig;
use PVE::QemuServer;
use PVE::QemuMigrate;
use PVE::RPCEnvironment;
use PVE::AccessControl;
use PVE::INotify;
use PVE::Network;
+use PVE::Firewall;
+use PVE::API2::Firewall::VM;
+
+BEGIN {
+ if (!$ENV{PVE_GENERATING_DOCS}) {
+ require PVE::HA::Env::PVE2;
+ import PVE::HA::Env::PVE2;
+ require PVE::HA::Config;
+ import PVE::HA::Config;
+ }
+}
use Data::Dumper; # fixme: remove
}
};
-
+my $NEW_DISK_RE = qr!^(([^/:\s]+):)?(\d+(\.\d+)?)$!;
my $check_storage_access = sub {
my ($rpcenv, $authuser, $storecfg, $vmid, $settings, $default_storage) = @_;
# nothing to check
} elsif ($isCDROM && ($volid eq 'cdrom')) {
$rpcenv->check($authuser, "/", ['Sys.Console']);
- } elsif (!$isCDROM && ($volid =~ m/^(([^:\s]+):)?(\d+(\.\d+)?)$/)) {
+ } elsif (!$isCDROM && ($volid =~ $NEW_DISK_RE)) {
my ($storeid, $size) = ($2 || $default_storage, $3);
die "no storage ID specified (and no default storage)\n" if !$storeid;
$rpcenv->check($authuser, "/storage/$storeid", ['Datastore.AllocateSpace']);
+ my $scfg = PVE::Storage::storage_config($storecfg, $storeid);
+ raise_param_exc({ storage => "storage '$storeid' does not support vm images"})
+ if !$scfg->{content}->{images};
} else {
- $rpcenv->check_volume_access($authuser, $storecfg, $vmid, $volid);
+ PVE::Storage::check_volume_access($rpcenv, $authuser, $storecfg, $vmid, $volid);
}
});
};
my $vollist = [];
my $res = {};
- PVE::QemuServer::foreach_drive($settings, sub {
+
+ my $code = sub {
my ($ds, $disk) = @_;
my $volid = $disk->{file};
if (!$volid || $volid eq 'none' || $volid eq 'cdrom') {
delete $disk->{size};
$res->{$ds} = PVE::QemuServer::print_drive($vmid, $disk);
- } elsif ($volid =~ m/^(([^:\s]+):)?(\d+(\.\d+)?)$/) {
+ } elsif ($volid =~ $NEW_DISK_RE) {
my ($storeid, $size) = ($2 || $default_storage, $3);
die "no storage ID specified (and no default storage)\n" if !$storeid;
my $defformat = PVE::Storage::storage_default_format($storecfg, $storeid);
my $fmt = $disk->{format} || $defformat;
- my $volid = PVE::Storage::vdisk_alloc($storecfg, $storeid, $vmid,
- $fmt, undef, $size*1024*1024);
- $disk->{file} = $volid;
- $disk->{size} = $size*1024*1024*1024;
+
+ $size = PVE::Tools::convert_size($size, 'gb' => 'kb'); # vdisk_alloc uses kb
+
+ my $volid;
+ if ($ds eq 'efidisk0') {
+ ($volid, $size) = PVE::QemuServer::create_efidisk($storecfg, $storeid, $vmid, $fmt);
+ } else {
+ $volid = PVE::Storage::vdisk_alloc($storecfg, $storeid, $vmid, $fmt, undef, $size);
+ }
push @$vollist, $volid;
+ $disk->{file} = $volid;
+ $disk->{size} = PVE::Tools::convert_size($size, 'kb' => 'b');
delete $disk->{format}; # no longer needed
$res->{$ds} = PVE::QemuServer::print_drive($vmid, $disk);
} else {
- my $path = $rpcenv->check_volume_access($authuser, $storecfg, $vmid, $volid);
+ PVE::Storage::check_volume_access($rpcenv, $authuser, $storecfg, $vmid, $volid);
my $volid_is_new = 1;
$res->{$ds} = PVE::QemuServer::print_drive($vmid, $disk);
}
- });
+ };
+
+ eval { PVE::QemuServer::foreach_drive($settings, $code); };
# free allocated images on error
if (my $err = $@) {
return $vollist;
};
+my $cpuoptions = {
+ 'cores' => 1,
+ 'cpu' => 1,
+ 'cpulimit' => 1,
+ 'cpuunits' => 1,
+ 'numa' => 1,
+ 'smp' => 1,
+ 'sockets' => 1,
+ 'vcpus' => 1,
+};
+
+my $memoryoptions = {
+ 'memory' => 1,
+ 'balloon' => 1,
+ 'shares' => 1,
+};
+
+my $hwtypeoptions = {
+ 'acpi' => 1,
+ 'hotplug' => 1,
+ 'kvm' => 1,
+ 'machine' => 1,
+ 'scsihw' => 1,
+ 'smbios1' => 1,
+ 'tablet' => 1,
+ 'vga' => 1,
+ 'watchdog' => 1,
+};
+
+my $generaloptions = {
+ 'agent' => 1,
+ 'autostart' => 1,
+ 'bios' => 1,
+ 'description' => 1,
+ 'keyboard' => 1,
+ 'localtime' => 1,
+ 'migrate_downtime' => 1,
+ 'migrate_speed' => 1,
+ 'name' => 1,
+ 'onboot' => 1,
+ 'ostype' => 1,
+ 'protection' => 1,
+ 'reboot' => 1,
+ 'startdate' => 1,
+ 'startup' => 1,
+ 'tdf' => 1,
+ 'template' => 1,
+};
+
+my $vmpoweroptions = {
+ 'freeze' => 1,
+};
+
+my $diskoptions = {
+ 'boot' => 1,
+ 'bootdisk' => 1,
+};
+
my $check_vm_modify_config_perm = sub {
my ($rpcenv, $authuser, $vmid, $pool, $key_list) = @_;
foreach my $opt (@$key_list) {
# disk checks need to be done somewhere else
- next if PVE::QemuServer::valid_drivename($opt);
+ next if PVE::QemuServer::is_valid_drivename($opt);
+ next if $opt eq 'cdrom';
+ next if $opt =~ m/^unused\d+$/;
- if ($opt eq 'sockets' || $opt eq 'cores' ||
- $opt eq 'cpu' || $opt eq 'smp' ||
- $opt eq 'cpulimit' || $opt eq 'cpuunits') {
+ if ($cpuoptions->{$opt} || $opt =~ m/^numa\d+$/) {
$rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.CPU']);
- } elsif ($opt eq 'boot' || $opt eq 'bootdisk') {
- $rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.Disk']);
- } elsif ($opt eq 'memory' || $opt eq 'balloon' || $opt eq 'shares') {
+ } elsif ($memoryoptions->{$opt}) {
$rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.Memory']);
- } elsif ($opt eq 'args' || $opt eq 'lock') {
- die "only root can set '$opt' config\n";
- } elsif ($opt eq 'cpu' || $opt eq 'kvm' || $opt eq 'acpi' || $opt eq 'machine' ||
- $opt eq 'vga' || $opt eq 'watchdog' || $opt eq 'tablet') {
+ } elsif ($hwtypeoptions->{$opt}) {
$rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.HWType']);
+ } elsif ($generaloptions->{$opt}) {
+ $rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.Options']);
+ # special case for startup since it changes host behaviour
+ if ($opt eq 'startup') {
+ $rpcenv->check_full($authuser, "/", ['Sys.Modify']);
+ }
+ } elsif ($vmpoweroptions->{$opt}) {
+ $rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.PowerMgmt']);
+ } elsif ($diskoptions->{$opt}) {
+ $rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.Disk']);
} elsif ($opt =~ m/^net\d+$/) {
$rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.Network']);
} else {
- $rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.Options']);
+ # catches usb\d+, hostpci\d+, args, lock, etc.
+ # new options will be checked here
+ die "only root can set '$opt' config\n";
}
}
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
+ full => {
+ type => 'boolean',
+ optional => 1,
+ description => "Determine the full status of active VMs.",
+ },
},
},
returns => {
my $rpcenv = PVE::RPCEnvironment::get();
my $authuser = $rpcenv->get_user();
- my $vmstatus = PVE::QemuServer::vmstatus();
+ my $vmstatus = PVE::QemuServer::vmstatus(undef, $param->{full});
my $res = [];
foreach my $vmid (keys %$vmstatus) {
next if !$rpcenv->check($authuser, "/vms/$vmid", [ 'VM.Audit' ], 1);
my $data = $vmstatus->{$vmid};
- $data->{vmid} = $vmid;
+ $data->{vmid} = int($vmid);
push @$res, $data;
}
properties => PVE::QemuServer::json_config_properties(
{
node => get_standard_option('pve-node'),
- vmid => get_standard_option('pve-vmid'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::Cluster::complete_next_vmid }),
archive => {
description => "The backup file.",
type => 'string',
optional => 1,
maxLength => 255,
+ completion => \&PVE::QemuServer::complete_backup_archives,
},
storage => get_standard_option('pve-storage-id', {
description => "Default storage.",
optional => 1,
+ completion => \&PVE::QemuServer::complete_storage,
}),
force => {
optional => 1,
my $pool = extract_param($param, 'pool');
- my $filename = PVE::QemuServer::config_file($vmid);
+ my $filename = PVE::QemuConfig->config_file($vmid);
my $storecfg = PVE::Storage::config();
&$check_vm_modify_config_perm($rpcenv, $authuser, $vmid, $pool, [ keys %$param]);
foreach my $opt (keys %$param) {
- if (PVE::QemuServer::valid_drivename($opt)) {
+ if (PVE::QemuServer::is_valid_drivename($opt)) {
my $drive = PVE::QemuServer::parse_drive($opt, $param->{$opt});
raise_param_exc({ $opt => "unable to parse drive options" }) if !$drive;
die "pipe requires cli environment\n"
if $rpcenv->{type} ne 'cli';
} else {
- my $path = $rpcenv->check_volume_access($authuser, $storecfg, $vmid, $archive);
-
- PVE::Storage::activate_volumes($storecfg, [ $archive ])
- if PVE::Storage::parse_volume_id ($archive, 1);
-
- die "can't find archive file '$archive'\n" if !($path && -f $path);
- $archive = $path;
+ PVE::Storage::check_volume_access($rpcenv, $authuser, $storecfg, $vmid, $archive);
+ $archive = PVE::Storage::abs_filesystem_path($storecfg, $archive);
}
}
my $restorefn = sub {
+ my $vmlist = PVE::Cluster::get_vmlist();
+ if ($vmlist->{ids}->{$vmid}) {
+ my $current_node = $vmlist->{ids}->{$vmid}->{node};
+ if ($current_node eq $node) {
+ my $conf = PVE::QemuConfig->load_config($vmid);
- # fixme: this test does not work if VM exists on other node!
- if (-f $filename) {
- die "unable to restore vm $vmid: config file already exists\n"
- if !$force;
+ PVE::QemuConfig->check_protection($conf, "unable to restore VM $vmid");
- die "unable to restore vm $vmid: vm is running\n"
- if PVE::QemuServer::check_running($vmid);
+ die "unable to restore vm $vmid - config file already exists\n"
+ if !$force;
+
+ die "unable to restore vm $vmid - vm is running\n"
+ if PVE::QemuServer::check_running($vmid);
+
+ die "unable to restore vm $vmid - vm is a template\n"
+ if PVE::QemuConfig->is_template($conf);
+
+ } else {
+ die "unable to restore vm $vmid - already existing on cluster node '$current_node'\n";
+ }
}
my $realcmd = sub {
PVE::AccessControl::add_vm_to_pool($vmid, $pool) if $pool;
};
+ # ensure no old replication state are exists
+ PVE::ReplicationState::delete_guest_states($vmid);
+
return $rpcenv->fork_worker('qmrestore', $vmid, $authuser, $realcmd);
};
my $createfn = sub {
# test after locking
- die "unable to create vm $vmid: config file already exists\n"
- if -f $filename;
+ PVE::Cluster::check_vmid_unused($vmid);
+
+ # ensure no old replication state are exists
+ PVE::ReplicationState::delete_guest_states($vmid);
my $realcmd = sub {
$vollist = &$create_disks($rpcenv, $authuser, $conf, $storecfg, $vmid, $pool, $param, $storage);
# try to be smart about bootdisk
- my @disks = PVE::QemuServer::disknames();
+ my @disks = PVE::QemuServer::valid_drive_names();
my $firstdisk;
foreach my $ds (reverse @disks) {
next if !$conf->{$ds};
$conf->{bootdisk} = $firstdisk;
}
- PVE::QemuServer::update_config_nolock($vmid, $conf);
+ # auto generate uuid if user did not specify smbios1 option
+ if (!$conf->{smbios1}) {
+ my ($uuid, $uuid_str);
+ UUID::generate($uuid);
+ UUID::unparse($uuid, $uuid_str);
+ $conf->{smbios1} = "uuid=$uuid_str";
+ }
+
+ PVE::QemuConfig->write_config($vmid, $conf);
};
my $err = $@;
return $rpcenv->fork_worker('qmcreate', $vmid, $authuser, $realcmd);
};
- return PVE::QemuServer::lock_config_full($vmid, 1, $archive ? $restorefn : $createfn);
+ return PVE::QemuConfig->lock_config_full($vmid, 1, $archive ? $restorefn : $createfn);
}});
__PACKAGE__->register_method({
my $res = [
{ subdir => 'config' },
+ { subdir => 'pending' },
{ subdir => 'status' },
{ subdir => 'unlink' },
{ subdir => 'vncproxy' },
{ subdir => 'rrd' },
{ subdir => 'rrddata' },
{ subdir => 'monitor' },
+ { subdir => 'agent' },
{ subdir => 'snapshot' },
+ { subdir => 'spiceproxy' },
+ { subdir => 'sendkey' },
+ { subdir => 'firewall' },
];
return $res;
}});
+__PACKAGE__->register_method ({
+ subclass => "PVE::API2::Firewall::VM",
+ path => '{vmid}/firewall',
+});
+
__PACKAGE__->register_method({
name => 'rrd',
path => '{vmid}/rrd',
path => '{vmid}/config',
method => 'GET',
proxyto => 'node',
- description => "Get virtual machine configuration.",
+ description => "Get current virtual machine configuration. This does not include pending configuration changes (see 'pending' API).",
permissions => {
check => ['perm', '/vms/{vmid}', [ 'VM.Audit' ]],
},
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
- vmid => get_standard_option('pve-vmid'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
+ current => {
+ description => "Get current values (instead of pending values).",
+ optional => 1,
+ default => 0,
+ type => 'boolean',
+ },
},
},
returns => {
code => sub {
my ($param) = @_;
- my $conf = PVE::QemuServer::load_config($param->{vmid});
+ my $conf = PVE::QemuConfig->load_config($param->{vmid});
delete $conf->{snapshots};
- return $conf;
- }});
-
-my $vm_is_volid_owner = sub {
- my ($storecfg, $vmid, $volid) =@_;
-
- if ($volid !~ m|^/|) {
- my ($path, $owner);
- eval { ($path, $owner) = PVE::Storage::path($storecfg, $volid); };
- if ($owner && ($owner == $vmid)) {
- return 1;
- }
- }
-
- return undef;
-};
-
-my $test_deallocate_drive = sub {
- my ($storecfg, $vmid, $key, $drive, $force) = @_;
-
- if (!PVE::QemuServer::drive_is_cdrom($drive)) {
- my $volid = $drive->{file};
- if (&$vm_is_volid_owner($storecfg, $vmid, $volid)) {
- if ($force || $key =~ m/^unused/) {
- my $sid = PVE::Storage::parse_volume_id($volid);
- return $sid;
+ if (!$param->{current}) {
+ foreach my $opt (keys %{$conf->{pending}}) {
+ next if $opt eq 'delete';
+ my $value = $conf->{pending}->{$opt};
+ next if ref($value); # just to be sure
+ $conf->{$opt} = $value;
}
- }
- }
-
- return undef;
-};
-
-my $delete_drive = sub {
- my ($conf, $storecfg, $vmid, $key, $drive, $force) = @_;
-
- if (!PVE::QemuServer::drive_is_cdrom($drive)) {
- my $volid = $drive->{file};
-
- if (&$vm_is_volid_owner($storecfg, $vmid, $volid)) {
- if ($force || $key =~ m/^unused/) {
- eval {
- # check if the disk is really unused
- my $used_paths = PVE::QemuServer::get_used_paths($vmid, $storecfg, $conf, 1, $key);
- my $path = PVE::Storage::path($storecfg, $volid);
-
- die "unable to delete '$volid' - volume is still in use (snapshot?)\n"
- if $used_paths->{$path};
-
- PVE::Storage::vdisk_free($storecfg, $volid);
- };
- die $@ if $@;
- } else {
- PVE::QemuServer::add_unused_volume($conf, $volid, $vmid);
+ my $pending_delete_hash = PVE::QemuServer::split_flagged_list($conf->{pending}->{delete});
+ foreach my $opt (keys %$pending_delete_hash) {
+ delete $conf->{$opt} if $conf->{$opt};
}
}
- }
-
- delete $conf->{$key};
-};
-
-my $vmconfig_delete_option = sub {
- my ($rpcenv, $authuser, $conf, $storecfg, $vmid, $opt, $force) = @_;
-
- return if !defined($conf->{$opt});
-
- my $isDisk = PVE::QemuServer::valid_drivename($opt)|| ($opt =~ m/^unused/);
-
- if ($isDisk) {
- $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.Disk']);
-
- my $drive = PVE::QemuServer::parse_drive($opt, $conf->{$opt});
- if (my $sid = &$test_deallocate_drive($storecfg, $vmid, $opt, $drive, $force)) {
- $rpcenv->check($authuser, "/storage/$sid", ['Datastore.Allocate']);
- }
- }
- my $unplugwarning = "";
- if($conf->{ostype} && $conf->{ostype} eq 'l26'){
- $unplugwarning = "<br>verify that you have acpiphp && pci_hotplug modules loaded in your guest VM";
- }elsif($conf->{ostype} && $conf->{ostype} eq 'l24'){
- $unplugwarning = "<br>kernel 2.4 don't support hotplug, please disable hotplug in options";
- }elsif(!$conf->{ostype} || ($conf->{ostype} && $conf->{ostype} eq 'other')){
- $unplugwarning = "<br>verify that your guest support acpi hotplug";
- }
-
- if($opt eq 'tablet'){
- PVE::QemuServer::vm_deviceplug(undef, $conf, $vmid, $opt);
- }else{
- die "error hot-unplug $opt $unplugwarning" if !PVE::QemuServer::vm_deviceunplug($vmid, $conf, $opt);
- }
-
- if ($isDisk) {
- my $drive = PVE::QemuServer::parse_drive($opt, $conf->{$opt});
- &$delete_drive($conf, $storecfg, $vmid, $opt, $drive, $force);
- } else {
- delete $conf->{$opt};
- }
-
- PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
-};
-
-my $safe_num_ne = sub {
- my ($a, $b) = @_;
-
- return 0 if !defined($a) && !defined($b);
- return 1 if !defined($a);
- return 1 if !defined($b);
-
- return $a != $b;
-};
-
-my $vmconfig_update_disk = sub {
- my ($rpcenv, $authuser, $conf, $storecfg, $vmid, $opt, $value, $force) = @_;
-
- my $drive = PVE::QemuServer::parse_drive($opt, $value);
-
- if (PVE::QemuServer::drive_is_cdrom($drive)) { #cdrom
- $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.CDROM']);
- } else {
- $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.Disk']);
- }
+ delete $conf->{pending};
- if ($conf->{$opt}) {
+ return $conf;
+ }});
- if (my $old_drive = PVE::QemuServer::parse_drive($opt, $conf->{$opt})) {
+__PACKAGE__->register_method({
+ name => 'vm_pending',
+ path => '{vmid}/pending',
+ method => 'GET',
+ proxyto => 'node',
+ description => "Get virtual machine configuration, including pending changes.",
+ permissions => {
+ check => ['perm', '/vms/{vmid}', [ 'VM.Audit' ]],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
+ },
+ },
+ returns => {
+ type => "array",
+ items => {
+ type => "object",
+ properties => {
+ key => {
+ description => "Configuration option name.",
+ type => 'string',
+ },
+ value => {
+ description => "Current value.",
+ type => 'string',
+ optional => 1,
+ },
+ pending => {
+ description => "Pending value.",
+ type => 'string',
+ optional => 1,
+ },
+ delete => {
+ description => "Indicates a pending delete request if present and not 0. " .
+ "The value 2 indicates a force-delete request.",
+ type => 'integer',
+ minimum => 0,
+ maximum => 2,
+ optional => 1,
+ },
+ },
+ },
+ },
+ code => sub {
+ my ($param) = @_;
- my $media = $drive->{media} || 'disk';
- my $oldmedia = $old_drive->{media} || 'disk';
- die "unable to change media type\n" if $media ne $oldmedia;
+ my $conf = PVE::QemuConfig->load_config($param->{vmid});
- if (!PVE::QemuServer::drive_is_cdrom($old_drive) &&
- ($drive->{file} ne $old_drive->{file})) { # delete old disks
+ my $pending_delete_hash = PVE::QemuServer::split_flagged_list($conf->{pending}->{delete});
- &$vmconfig_delete_option($rpcenv, $authuser, $conf, $storecfg, $vmid, $opt, $force);
- $conf = PVE::QemuServer::load_config($vmid); # update/reload
- }
+ my $res = [];
- if(&$safe_num_ne($drive->{mbps}, $old_drive->{mbps}) ||
- &$safe_num_ne($drive->{mbps_rd}, $old_drive->{mbps_rd}) ||
- &$safe_num_ne($drive->{mbps_wr}, $old_drive->{mbps_wr}) ||
- &$safe_num_ne($drive->{iops}, $old_drive->{iops}) ||
- &$safe_num_ne($drive->{iops_rd}, $old_drive->{iops_rd}) ||
- &$safe_num_ne($drive->{iops_wr}, $old_drive->{iops_wr})) {
- PVE::QemuServer::qemu_block_set_io_throttle($vmid,"drive-$opt", $drive->{mbps}*1024*1024,
- $drive->{mbps_rd}*1024*1024, $drive->{mbps_wr}*1024*1024,
- $drive->{iops}, $drive->{iops_rd}, $drive->{iops_wr})
- if !PVE::QemuServer::drive_is_cdrom($drive);
- }
+ foreach my $opt (keys %$conf) {
+ next if ref($conf->{$opt});
+ my $item = { key => $opt };
+ $item->{value} = $conf->{$opt} if defined($conf->{$opt});
+ $item->{pending} = $conf->{pending}->{$opt} if defined($conf->{pending}->{$opt});
+ $item->{delete} = ($pending_delete_hash->{$opt} ? 2 : 1) if exists $pending_delete_hash->{$opt};
+ push @$res, $item;
}
- }
-
- &$create_disks($rpcenv, $authuser, $conf, $storecfg, $vmid, undef, {$opt => $value});
- PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
- $conf = PVE::QemuServer::load_config($vmid); # update/reload
- $drive = PVE::QemuServer::parse_drive($opt, $conf->{$opt});
-
- if (PVE::QemuServer::drive_is_cdrom($drive)) { # cdrom
-
- if (PVE::QemuServer::check_running($vmid)) {
- if ($drive->{file} eq 'none') {
- PVE::QemuServer::vm_mon_cmd($vmid, "eject",force => JSON::true,device => "drive-$opt");
- } else {
- my $path = PVE::QemuServer::get_iso_path($storecfg, $vmid, $drive->{file});
- PVE::QemuServer::vm_mon_cmd($vmid, "eject",force => JSON::true,device => "drive-$opt"); #force eject if locked
- PVE::QemuServer::vm_mon_cmd($vmid, "change",device => "drive-$opt",target => "$path") if $path;
- }
+ foreach my $opt (keys %{$conf->{pending}}) {
+ next if $opt eq 'delete';
+ next if ref($conf->{pending}->{$opt}); # just to be sure
+ next if defined($conf->{$opt});
+ my $item = { key => $opt };
+ $item->{pending} = $conf->{pending}->{$opt};
+ push @$res, $item;
}
- } else { # hotplug new disks
-
- die "error hotplug $opt" if !PVE::QemuServer::vm_deviceplug($storecfg, $conf, $vmid, $opt, $drive);
- }
-};
-
-my $vmconfig_update_net = sub {
- my ($rpcenv, $authuser, $conf, $storecfg, $vmid, $opt, $value) = @_;
-
- if ($conf->{$opt} && PVE::QemuServer::check_running($vmid)) {
- my $oldnet = PVE::QemuServer::parse_net($conf->{$opt});
- my $newnet = PVE::QemuServer::parse_net($value);
-
- if($oldnet->{model} ne $newnet->{model}){
- #if model change, we try to hot-unplug
- die "error hot-unplug $opt for update" if !PVE::QemuServer::vm_deviceunplug($vmid, $conf, $opt);
- }else{
-
- if($newnet->{bridge} && $oldnet->{bridge}){
- my $iface = "tap".$vmid."i".$1 if $opt =~ m/net(\d+)/;
-
- if($newnet->{rate} ne $oldnet->{rate}){
- PVE::Network::tap_rate_limit($iface, $newnet->{rate});
- }
-
- if(($newnet->{bridge} ne $oldnet->{bridge}) || ($newnet->{tag} ne $oldnet->{tag})){
- eval{PVE::Network::tap_unplug($iface, $oldnet->{bridge}, $oldnet->{tag});};
- PVE::Network::tap_plug($iface, $newnet->{bridge}, $newnet->{tag});
- }
-
- }else{
- #if bridge/nat mode change, we try to hot-unplug
- die "error hot-unplug $opt for update" if !PVE::QemuServer::vm_deviceunplug($vmid, $conf, $opt);
- }
+ while (my ($opt, $force) = each %$pending_delete_hash) {
+ next if $conf->{pending}->{$opt}; # just to be sure
+ next if $conf->{$opt};
+ my $item = { key => $opt, delete => ($force ? 2 : 1)};
+ push @$res, $item;
}
- }
- $conf->{$opt} = $value;
- PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
- $conf = PVE::QemuServer::load_config($vmid); # update/reload
-
- my $net = PVE::QemuServer::parse_net($conf->{$opt});
-
- die "error hotplug $opt" if !PVE::QemuServer::vm_deviceplug($storecfg, $conf, $vmid, $opt, $net);
-};
+ return $res;
+ }});
# POST/PUT {vmid}/config implementation
#
my $background_delay = extract_param($param, 'background_delay');
my @paramarr = (); # used for log message
- foreach my $key (keys %$param) {
+ foreach my $key (sort keys %$param) {
push @paramarr, "-$key", $param->{$key};
}
my $delete_str = extract_param($param, 'delete');
+ my $revert_str = extract_param($param, 'revert');
+
my $force = extract_param($param, 'force');
- die "no options specified\n" if !$delete_str && !scalar(keys %$param);
+ die "no options specified\n" if !$delete_str && !$revert_str && !scalar(keys %$param);
my $storecfg = PVE::Storage::config();
# now try to verify all parameters
+ my $revert = {};
+ foreach my $opt (PVE::Tools::split_list($revert_str)) {
+ if (!PVE::QemuServer::option_exists($opt)) {
+ raise_param_exc({ revert => "unknown option '$opt'" });
+ }
+
+ raise_param_exc({ delete => "you can't use '-$opt' and " .
+ "-revert $opt' at the same time" })
+ if defined($param->{$opt});
+
+ $revert->{$opt} = 1;
+ }
+
my @delete = ();
foreach my $opt (PVE::Tools::split_list($delete_str)) {
$opt = 'ide2' if $opt eq 'cdrom';
+
raise_param_exc({ delete => "you can't use '-$opt' and " .
"-delete $opt' at the same time" })
if defined($param->{$opt});
+ raise_param_exc({ revert => "you can't use '-delete $opt' and " .
+ "-revert $opt' at the same time" })
+ if $revert->{$opt};
+
if (!PVE::QemuServer::option_exists($opt)) {
raise_param_exc({ delete => "unknown option '$opt'" });
}
push @delete, $opt;
}
+ my $repl_conf = PVE::ReplicationConfig->new();
+ my $is_replicated = $repl_conf->check_for_existing_jobs($vmid, 1);
+ my $check_replication = sub {
+ my ($drive) = @_;
+ return if !$is_replicated;
+ my $volid = $drive->{file};
+ return if !$volid || !($drive->{replicate}//1);
+ return if PVE::QemuServer::drive_is_cdrom($drive);
+ my ($storeid, $format);
+ if ($volid =~ $NEW_DISK_RE) {
+ $storeid = $2;
+ $format = $drive->{format} || PVE::Storage::storage_default_format($storecfg, $storeid);
+ } else {
+ ($storeid, undef) = PVE::Storage::parse_volume_id($volid, 1);
+ $format = (PVE::Storage::parse_volname($storecfg, $volid))[6];
+ }
+ return if PVE::Storage::storage_can_replicate($storecfg, $storeid, $format);
+ my $scfg = PVE::Storage::storage_config($storecfg, $storeid);
+ return if $scfg->{shared};
+ die "cannot add non-replicatable volume to a replicated VM\n";
+ };
+
foreach my $opt (keys %$param) {
- if (PVE::QemuServer::valid_drivename($opt)) {
+ if (PVE::QemuServer::is_valid_drivename($opt)) {
# cleanup drive path
my $drive = PVE::QemuServer::parse_drive($opt, $param->{$opt});
+ raise_param_exc({ $opt => "unable to parse drive options" }) if !$drive;
PVE::QemuServer::cleanup_drive_path($opt, $storecfg, $drive);
+ $check_replication->($drive);
$param->{$opt} = PVE::QemuServer::print_drive($vmid, $drive);
} elsif ($opt =~ m/^net(\d+)$/) {
# add macaddr
my $updatefn = sub {
- my $conf = PVE::QemuServer::load_config($vmid);
+ my $conf = PVE::QemuConfig->load_config($vmid);
die "checksum missmatch (file change by other user?)\n"
if $digest && $digest ne $conf->{digest};
- PVE::QemuServer::check_lock($conf) if !$skiplock;
+ PVE::QemuConfig->check_lock($conf) if !$skiplock;
+
+ foreach my $opt (keys %$revert) {
+ if (defined($conf->{$opt})) {
+ $param->{$opt} = $conf->{$opt};
+ } elsif (defined($conf->{pending}->{$opt})) {
+ push @delete, $opt;
+ }
+ }
if ($param->{memory} || defined($param->{balloon})) {
- my $maxmem = $param->{memory} || $conf->{memory} || $defaults->{memory};
- my $balloon = defined($param->{balloon}) ? $param->{balloon} : $conf->{balloon};
+ my $maxmem = $param->{memory} || $conf->{pending}->{memory} || $conf->{memory} || $defaults->{memory};
+ my $balloon = defined($param->{balloon}) ? $param->{balloon} : $conf->{pending}->{balloon} || $conf->{balloon};
die "balloon value too large (must be smaller than assigned memory)\n"
if $balloon && $balloon > $maxmem;
print "update VM $vmid: " . join (' ', @paramarr) . "\n";
- foreach my $opt (@delete) { # delete
- $conf = PVE::QemuServer::load_config($vmid); # update/reload
- &$vmconfig_delete_option($rpcenv, $authuser, $conf, $storecfg, $vmid, $opt, $force);
- }
+ # write updates to pending section
- my $running = PVE::QemuServer::check_running($vmid);
+ my $modified = {}; # record what $option we modify
- foreach my $opt (keys %$param) { # add/change
+ foreach my $opt (@delete) {
+ $modified->{$opt} = 1;
+ $conf = PVE::QemuConfig->load_config($vmid); # update/reload
+ if (!defined($conf->{$opt}) && !defined($conf->{pending}->{$opt})) {
+ warn "cannot delete '$opt' - not set in current configuration!\n";
+ $modified->{$opt} = 0;
+ next;
+ }
- $conf = PVE::QemuServer::load_config($vmid); # update/reload
+ if ($opt =~ m/^unused/) {
+ my $drive = PVE::QemuServer::parse_drive($opt, $conf->{$opt});
+ PVE::QemuConfig->check_protection($conf, "can't remove unused disk '$drive->{file}'");
+ $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.Disk']);
+ if (PVE::QemuServer::try_deallocate_drive($storecfg, $vmid, $conf, $opt, $drive, $rpcenv, $authuser)) {
+ delete $conf->{$opt};
+ PVE::QemuConfig->write_config($vmid, $conf);
+ }
+ } elsif (PVE::QemuServer::is_valid_drivename($opt)) {
+ PVE::QemuConfig->check_protection($conf, "can't remove drive '$opt'");
+ $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.Disk']);
+ PVE::QemuServer::vmconfig_register_unused_drive($storecfg, $vmid, $conf, PVE::QemuServer::parse_drive($opt, $conf->{pending}->{$opt}))
+ if defined($conf->{pending}->{$opt});
+ PVE::QemuServer::vmconfig_delete_pending_option($conf, $opt, $force);
+ PVE::QemuConfig->write_config($vmid, $conf);
+ } else {
+ PVE::QemuServer::vmconfig_delete_pending_option($conf, $opt, $force);
+ PVE::QemuConfig->write_config($vmid, $conf);
+ }
+ }
- next if $conf->{$opt} && ($param->{$opt} eq $conf->{$opt}); # skip if nothing changed
+ foreach my $opt (keys %$param) { # add/change
+ $modified->{$opt} = 1;
+ $conf = PVE::QemuConfig->load_config($vmid); # update/reload
+ next if defined($conf->{pending}->{$opt}) && ($param->{$opt} eq $conf->{pending}->{$opt}); # skip if nothing changed
- if (PVE::QemuServer::valid_drivename($opt)) {
+ if (PVE::QemuServer::is_valid_drivename($opt)) {
+ my $drive = PVE::QemuServer::parse_drive($opt, $param->{$opt});
+ if (PVE::QemuServer::drive_is_cdrom($drive)) { # CDROM
+ $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.CDROM']);
+ } else {
+ $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.Disk']);
+ }
+ PVE::QemuServer::vmconfig_register_unused_drive($storecfg, $vmid, $conf, PVE::QemuServer::parse_drive($opt, $conf->{pending}->{$opt}))
+ if defined($conf->{pending}->{$opt});
- &$vmconfig_update_disk($rpcenv, $authuser, $conf, $storecfg, $vmid,
- $opt, $param->{$opt}, $force);
+ &$create_disks($rpcenv, $authuser, $conf->{pending}, $storecfg, $vmid, undef, {$opt => $param->{$opt}});
+ } else {
+ $conf->{pending}->{$opt} = $param->{$opt};
+ }
+ PVE::QemuServer::vmconfig_undelete_pending_option($conf, $opt);
+ PVE::QemuConfig->write_config($vmid, $conf);
+ }
- } elsif ($opt =~ m/^net(\d+)$/) { #nics
+ # remove pending changes when nothing changed
+ $conf = PVE::QemuConfig->load_config($vmid); # update/reload
+ my $changes = PVE::QemuServer::vmconfig_cleanup_pending($conf);
+ PVE::QemuConfig->write_config($vmid, $conf) if $changes;
- &$vmconfig_update_net($rpcenv, $authuser, $conf, $storecfg, $vmid,
- $opt, $param->{$opt});
+ return if !scalar(keys %{$conf->{pending}});
- } else {
+ my $running = PVE::QemuServer::check_running($vmid);
- if($opt eq 'tablet' && $param->{$opt} == 1){
- PVE::QemuServer::vm_deviceplug(undef, $conf, $vmid, $opt);
- } elsif($opt eq 'tablet' && $param->{$opt} == 0){
- PVE::QemuServer::vm_deviceunplug($vmid, $conf, $opt);
- }
+ # apply pending changes
- $conf->{$opt} = $param->{$opt};
- PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
- }
- }
+ $conf = PVE::QemuConfig->load_config($vmid); # update/reload
- # allow manual ballooning if shares is set to zero
- if ($running && defined($param->{balloon}) &&
- defined($conf->{shares}) && ($conf->{shares} == 0)) {
- my $balloon = $param->{'balloon'} || $conf->{memory} || $defaults->{memory};
- PVE::QemuServer::vm_mon_cmd($vmid, "balloon", value => $balloon*1024*1024);
+ if ($running) {
+ my $errors = {};
+ PVE::QemuServer::vmconfig_hotplug_pending($vmid, $conf, $storecfg, $modified, $errors);
+ raise_param_exc($errors) if scalar(keys %$errors);
+ } else {
+ PVE::QemuServer::vmconfig_apply_pending($vmid, $conf, $storecfg, $running);
}
+
+ return;
};
if ($sync) {
}
};
- return PVE::QemuServer::lock_config($vmid, $updatefn);
+ return PVE::QemuConfig->lock_config($vmid, $updatefn);
};
my $vm_config_perm_list = [
description => "A list of settings you want to delete.",
optional => 1,
},
+ revert => {
+ type => 'string', format => 'pve-configid-list',
+ description => "Revert a pending change.",
+ optional => 1,
+ },
force => {
type => 'boolean',
description => $opt_force_description,
properties => PVE::QemuServer::json_config_properties(
{
node => get_standard_option('pve-node'),
- vmid => get_standard_option('pve-vmid'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
skiplock => get_standard_option('skiplock'),
delete => {
type => 'string', format => 'pve-configid-list',
description => "A list of settings you want to delete.",
optional => 1,
},
+ revert => {
+ type => 'string', format => 'pve-configid-list',
+ description => "Revert a pending change.",
+ optional => 1,
+ },
force => {
type => 'boolean',
description => $opt_force_description,
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
- vmid => get_standard_option('pve-vmid'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid_stopped }),
skiplock => get_standard_option('skiplock'),
},
},
if $skiplock && $authuser ne 'root@pam';
# test if VM exists
- my $conf = PVE::QemuServer::load_config($vmid);
+ my $conf = PVE::QemuConfig->load_config($vmid);
my $storecfg = PVE::Storage::config();
- my $delVMfromPoolFn = sub {
- my $usercfg = cfs_read_file("user.cfg");
- if (my $pool = $usercfg->{vms}->{$vmid}) {
- if (my $data = $usercfg->{pools}->{$pool}) {
- delete $data->{vms}->{$vmid};
- delete $usercfg->{vms}->{$vmid};
- cfs_write_file("user.cfg", $usercfg);
- }
- }
- };
+ PVE::QemuConfig->check_protection($conf, "can't remove VM $vmid");
+
+ die "unable to remove VM $vmid - used in HA resources\n"
+ if PVE::HA::Config::vm_is_ha_managed($vmid);
+
+ # do not allow destroy if there are replication jobs
+ my $repl_conf = PVE::ReplicationConfig->new();
+ $repl_conf->check_for_existing_jobs($vmid);
+
+ # early tests (repeat after locking)
+ die "VM $vmid is running - destroy failed\n"
+ if PVE::QemuServer::check_running($vmid);
my $realcmd = sub {
my $upid = shift;
PVE::QemuServer::vm_destroy($storecfg, $vmid, $skiplock);
- PVE::AccessControl::remove_vm_from_pool($vmid);
+ PVE::AccessControl::remove_vm_access($vmid);
+
+ PVE::Firewall::remove_vmfw_conf($vmid);
};
return $rpcenv->fork_worker('qmdestroy', $vmid, $authuser, $realcmd);
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
- vmid => get_standard_option('pve-vmid'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
idlist => {
type => 'string', format => 'pve-configid-list',
description => "A list of disk IDs you want to delete.",
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid'),
+ websocket => {
+ optional => 1,
+ type => 'boolean',
+ description => "starts websockify instead of vncproxy",
+ },
},
},
returns => {
my $vmid = $param->{vmid};
my $node = $param->{node};
+ my $websocket = $param->{websocket};
+
+ my $conf = PVE::QemuConfig->load_config($vmid, $node); # check if VM exists
my $authpath = "/vms/$vmid";
$sslcert = PVE::Tools::file_get_contents("/etc/pve/pve-root-ca.pem", 8192)
if !$sslcert;
- my $port = PVE::Tools::next_vnc_port();
-
- my $remip;
+ my ($remip, $family);
+ my $remcmd = [];
if ($node ne 'localhost' && $node ne PVE::INotify::nodename()) {
- $remip = PVE::Cluster::remote_node_ip($node);
+ ($remip, $family) = PVE::Cluster::remote_node_ip($node);
+ # NOTE: kvm VNC traffic is already TLS encrypted or is known unsecure
+ $remcmd = ['/usr/bin/ssh', '-T', '-o', 'BatchMode=yes', $remip];
+ } else {
+ $family = PVE::Tools::get_host_address_family($node);
}
- # NOTE: kvm VNC traffic is already TLS encrypted
- my $remcmd = $remip ? ['/usr/bin/ssh', '-T', '-o', 'BatchMode=yes', $remip] : [];
+ my $port = PVE::Tools::next_vnc_port($family);
my $timeout = 10;
syslog('info', "starting vnc proxy $upid\n");
- my $qmcmd = [@$remcmd, "/usr/sbin/qm", 'vncproxy', $vmid];
+ my $cmd;
- my $qmstr = join(' ', @$qmcmd);
+ if ($conf->{vga} && ($conf->{vga} =~ m/^serial\d+$/)) {
- # also redirect stderr (else we get RFB protocol errors)
- my $cmd = ['/bin/nc', '-l', '-p', $port, '-w', $timeout, '-c', "$qmstr 2>/dev/null"];
+ die "Websocket mode is not supported in vga serial mode!" if $websocket;
- PVE::Tools::run_command($cmd);
+ my $termcmd = [ '/usr/sbin/qm', 'terminal', $vmid, '-iface', $conf->{vga} ];
+ #my $termcmd = "/usr/bin/qm terminal -iface $conf->{vga}";
+ $cmd = ['/usr/bin/vncterm', '-rfbport', $port,
+ '-timeout', $timeout, '-authpath', $authpath,
+ '-perm', 'Sys.Console', '-c', @$remcmd, @$termcmd];
+ PVE::Tools::run_command($cmd);
+ } else {
+
+ $ENV{LC_PVE_TICKET} = $ticket if $websocket; # set ticket with "qm vncproxy"
+
+ $cmd = [@$remcmd, "/usr/sbin/qm", 'vncproxy', $vmid];
+
+ my $sock = IO::Socket::IP->new(
+ ReuseAddr => 1,
+ Listen => 1,
+ LocalPort => $port,
+ Proto => 'tcp',
+ GetAddrInfoFlags => 0,
+ ) or die "failed to create socket: $!\n";
+ # Inside the worker we shouldn't have any previous alarms
+ # running anyway...:
+ alarm(0);
+ local $SIG{ALRM} = sub { die "connection timed out\n" };
+ alarm $timeout;
+ accept(my $cli, $sock) or die "connection failed: $!\n";
+ alarm(0);
+ close($sock);
+ if (PVE::Tools::run_command($cmd,
+ output => '>&'.fileno($cli),
+ input => '<&'.fileno($cli),
+ noerr => 1) != 0)
+ {
+ die "Failed to run vncproxy.\n";
+ }
+ }
return;
};
- my $upid = $rpcenv->fork_worker('vncproxy', $vmid, $authuser, $realcmd);
+ my $upid = $rpcenv->fork_worker('vncproxy', $vmid, $authuser, $realcmd, 1);
PVE::Tools::wait_for_vnc_port($port);
};
}});
+__PACKAGE__->register_method({
+ name => 'vncwebsocket',
+ path => '{vmid}/vncwebsocket',
+ method => 'GET',
+ permissions => {
+ description => "You also need to pass a valid ticket (vncticket).",
+ check => ['perm', '/vms/{vmid}', [ 'VM.Console' ]],
+ },
+ description => "Opens a weksocket for VNC traffic.",
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid'),
+ vncticket => {
+ description => "Ticket from previous call to vncproxy.",
+ type => 'string',
+ maxLength => 512,
+ },
+ port => {
+ description => "Port number returned by previous vncproxy call.",
+ type => 'integer',
+ minimum => 5900,
+ maximum => 5999,
+ },
+ },
+ },
+ returns => {
+ type => "object",
+ properties => {
+ port => { type => 'string' },
+ },
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+
+ my $authuser = $rpcenv->get_user();
+
+ my $vmid = $param->{vmid};
+ my $node = $param->{node};
+
+ my $authpath = "/vms/$vmid";
+
+ PVE::AccessControl::verify_vnc_ticket($param->{vncticket}, $authuser, $authpath);
+
+ my $conf = PVE::QemuConfig->load_config($vmid, $node); # VM exists ?
+
+ # Note: VNC ports are acessible from outside, so we do not gain any
+ # security if we verify that $param->{port} belongs to VM $vmid. This
+ # check is done by verifying the VNC ticket (inside VNC protocol).
+
+ my $port = $param->{port};
+
+ return { port => $port };
+ }});
+
+__PACKAGE__->register_method({
+ name => 'spiceproxy',
+ path => '{vmid}/spiceproxy',
+ method => 'POST',
+ protected => 1,
+ proxyto => 'node',
+ permissions => {
+ check => ['perm', '/vms/{vmid}', [ 'VM.Console' ]],
+ },
+ description => "Returns a SPICE configuration to connect to the VM.",
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid'),
+ proxy => get_standard_option('spice-proxy', { optional => 1 }),
+ },
+ },
+ returns => get_standard_option('remote-viewer-config'),
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+
+ my $authuser = $rpcenv->get_user();
+
+ my $vmid = $param->{vmid};
+ my $node = $param->{node};
+ my $proxy = $param->{proxy};
+
+ my $conf = PVE::QemuConfig->load_config($vmid, $node);
+ my $title = "VM $vmid";
+ $title .= " - ". $conf->{name} if $conf->{name};
+
+ my $port = PVE::QemuServer::spice_port($vmid);
+
+ my ($ticket, undef, $remote_viewer_config) =
+ PVE::AccessControl::remote_viewer_config($authuser, $vmid, $node, $proxy, $title, $port);
+
+ PVE::QemuServer::vm_mon_cmd($vmid, "set_password", protocol => 'spice', password => $ticket);
+ PVE::QemuServer::vm_mon_cmd($vmid, "expire_password", protocol => 'spice', time => "+30");
+
+ return $remote_viewer_config;
+ }});
+
__PACKAGE__->register_method({
name => 'vmcmdidx',
path => '{vmid}/status',
my ($param) = @_;
# test if VM exists
- my $conf = PVE::QemuServer::load_config($param->{vmid});
+ my $conf = PVE::QemuConfig->load_config($param->{vmid});
my $res = [
{ subdir => 'current' },
return $res;
}});
-my $vm_is_ha_managed = sub {
- my ($vmid) = @_;
-
- my $cc = PVE::Cluster::cfs_read_file('cluster.conf');
- if (PVE::Cluster::cluster_conf_lookup_pvevm($cc, 0, $vmid, 1)) {
- return 1;
- }
- return 0;
-};
-
__PACKAGE__->register_method({
name => 'vm_status',
path => '{vmid}/status/current',
my ($param) = @_;
# test if VM exists
- my $conf = PVE::QemuServer::load_config($param->{vmid});
+ my $conf = PVE::QemuConfig->load_config($param->{vmid});
my $vmstatus = PVE::QemuServer::vmstatus($param->{vmid}, 1);
my $status = $vmstatus->{$param->{vmid}};
- $status->{ha} = &$vm_is_ha_managed($param->{vmid});
+ $status->{ha} = PVE::HA::Config::get_service_status("vm:$param->{vmid}");
+
+ $status->{spice} = 1 if PVE::QemuServer::vga_conf_has_spice($conf->{vga});
return $status;
}});
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
- vmid => get_standard_option('pve-vmid'),
+ vmid => get_standard_option('pve-vmid',
+ { completion => \&PVE::QemuServer::complete_vmid_stopped }),
skiplock => get_standard_option('skiplock'),
stateuri => get_standard_option('pve-qm-stateuri'),
migratedfrom => get_standard_option('pve-node',{ optional => 1 }),
+ migration_type => {
+ type => 'string',
+ enum => ['secure', 'insecure'],
+ description => "Migration traffic is encrypted using an SSH " .
+ "tunnel by default. On secure, completely private networks " .
+ "this can be disabled to increase performance.",
+ optional => 1,
+ },
+ migration_network => {
+ type => 'string', format => 'CIDR',
+ description => "CIDR of the (sub) network that is used for migration.",
+ optional => 1,
+ },
machine => get_standard_option('pve-qm-machine'),
+ targetstorage => {
+ description => "Target storage for the migration. (Can be '1' to use the same storage id as on the source node.)",
+ type => 'string',
+ optional => 1
+ }
},
},
returns => {
raise_param_exc({ migratedfrom => "Only root may use this option." })
if $migratedfrom && $authuser ne 'root@pam';
+ my $migration_type = extract_param($param, 'migration_type');
+ raise_param_exc({ migration_type => "Only root may use this option." })
+ if $migration_type && $authuser ne 'root@pam';
+
+ my $migration_network = extract_param($param, 'migration_network');
+ raise_param_exc({ migration_network => "Only root may use this option." })
+ if $migration_network && $authuser ne 'root@pam';
+
+ my $targetstorage = extract_param($param, 'targetstorage');
+ raise_param_exc({ targetstorage => "Only root may use this option." })
+ if $targetstorage && $authuser ne 'root@pam';
+
+ raise_param_exc({ targetstorage => "targetstorage can only by used with migratedfrom." })
+ if $targetstorage && !$migratedfrom;
+
+ # read spice ticket from STDIN
+ my $spice_ticket;
+ if ($stateuri && ($stateuri eq 'tcp') && $migratedfrom && ($rpcenv->{type} eq 'cli')) {
+ if (defined(my $line = <>)) {
+ chomp $line;
+ $spice_ticket = $line;
+ }
+ }
+
+ PVE::Cluster::check_cfs_quorum();
+
my $storecfg = PVE::Storage::config();
- if (&$vm_is_ha_managed($vmid) && !$stateuri &&
+ if (PVE::HA::Config::vm_is_ha_managed($vmid) && !$stateuri &&
$rpcenv->{type} ne 'ha') {
my $hacmd = sub {
my $upid = shift;
- my $service = "pvevm:$vmid";
+ my $service = "vm:$vmid";
- my $cmd = ['clusvcadm', '-e', $service, '-m', $node];
+ my $cmd = ['ha-manager', 'set', $service, '--state', 'started'];
- print "Executing HA start for VM $vmid\n";
+ print "Requesting HA start for VM $vmid\n";
PVE::Tools::run_command($cmd);
syslog('info', "start VM $vmid: $upid\n");
- PVE::QemuServer::vm_start($storecfg, $vmid, $stateuri, $skiplock, $migratedfrom, undef, $machine);
+ PVE::QemuServer::vm_start($storecfg, $vmid, $stateuri, $skiplock, $migratedfrom, undef,
+ $machine, $spice_ticket, $migration_network, $migration_type, $targetstorage);
return;
};
method => 'POST',
protected => 1,
proxyto => 'node',
- description => "Stop virtual machine.",
+ description => "Stop virtual machine. The qemu process will exit immediately. This" .
+ "is akin to pulling the power plug of a running computer and may damage the VM data",
permissions => {
check => ['perm', '/vms/{vmid}', [ 'VM.PowerMgmt' ]],
},
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
- vmid => get_standard_option('pve-vmid'),
+ vmid => get_standard_option('pve-vmid',
+ { completion => \&PVE::QemuServer::complete_vmid_running }),
skiplock => get_standard_option('skiplock'),
- migratedfrom => get_standard_option('pve-node',{ optional => 1 }),
+ migratedfrom => get_standard_option('pve-node', { optional => 1 }),
timeout => {
description => "Wait maximal timeout seconds.",
type => 'integer',
optional => 1,
},
keepActive => {
- description => "Do not decativate storage volumes.",
+ description => "Do not deactivate storage volumes.",
type => 'boolean',
optional => 1,
default => 0,
my $storecfg = PVE::Storage::config();
- if (&$vm_is_ha_managed($vmid) && $rpcenv->{type} ne 'ha') {
+ if (PVE::HA::Config::vm_is_ha_managed($vmid) && ($rpcenv->{type} ne 'ha') && !defined($migratedfrom)) {
my $hacmd = sub {
my $upid = shift;
- my $service = "pvevm:$vmid";
+ my $service = "vm:$vmid";
- my $cmd = ['clusvcadm', '-d', $service];
+ my $cmd = ['ha-manager', 'set', $service, '--state', 'stopped'];
- print "Executing HA stop for VM $vmid\n";
+ print "Requesting HA stop for VM $vmid\n";
PVE::Tools::run_command($cmd);
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
- vmid => get_standard_option('pve-vmid'),
+ vmid => get_standard_option('pve-vmid',
+ { completion => \&PVE::QemuServer::complete_vmid_running }),
skiplock => get_standard_option('skiplock'),
},
},
method => 'POST',
protected => 1,
proxyto => 'node',
- description => "Shutdown virtual machine.",
+ description => "Shutdown virtual machine. This is similar to pressing the power button on a physical machine." .
+ "This will send an ACPI event for the guest OS, which should then proceed to a clean shutdown.",
permissions => {
check => ['perm', '/vms/{vmid}', [ 'VM.PowerMgmt' ]],
},
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
- vmid => get_standard_option('pve-vmid'),
+ vmid => get_standard_option('pve-vmid',
+ { completion => \&PVE::QemuServer::complete_vmid_running }),
skiplock => get_standard_option('skiplock'),
timeout => {
description => "Wait maximal timeout seconds.",
default => 0,
},
keepActive => {
- description => "Do not decativate storage volumes.",
+ description => "Do not deactivate storage volumes.",
type => 'boolean',
optional => 1,
default => 0,
my $storecfg = PVE::Storage::config();
- my $realcmd = sub {
- my $upid = shift;
+ my $shutdown = 1;
- syslog('info', "shutdown VM $vmid: $upid\n");
+ # if vm is paused, do not shutdown (but stop if forceStop = 1)
+ # otherwise, we will infer a shutdown command, but run into the timeout,
+ # then when the vm is resumed, it will instantly shutdown
+ #
+ # checking the qmp status here to get feedback to the gui/cli/api
+ # and the status query should not take too long
+ my $qmpstatus;
+ eval {
+ $qmpstatus = PVE::QemuServer::vm_qmp_command($vmid, { execute => "query-status" }, 0);
+ };
+ my $err = $@ if $@;
- PVE::QemuServer::vm_stop($storecfg, $vmid, $skiplock, 0, $param->{timeout},
- 1, $param->{forceStop}, $keepActive);
+ if (!$err && $qmpstatus->{status} eq "paused") {
+ if ($param->{forceStop}) {
+ warn "VM is paused - stop instead of shutdown\n";
+ $shutdown = 0;
+ } else {
+ die "VM is paused - cannot shutdown\n";
+ }
+ }
- return;
- };
+ if (PVE::HA::Config::vm_is_ha_managed($vmid) &&
+ ($rpcenv->{type} ne 'ha')) {
+
+ my $hacmd = sub {
+ my $upid = shift;
+
+ my $service = "vm:$vmid";
+
+ my $cmd = ['ha-manager', 'set', $service, '--state', 'stopped'];
+
+ print "Requesting HA stop for VM $vmid\n";
+
+ PVE::Tools::run_command($cmd);
- return $rpcenv->fork_worker('qmshutdown', $vmid, $authuser, $realcmd);
+ return;
+ };
+
+ return $rpcenv->fork_worker('hastop', $vmid, $authuser, $hacmd);
+
+ } else {
+
+ my $realcmd = sub {
+ my $upid = shift;
+
+ syslog('info', "shutdown VM $vmid: $upid\n");
+
+ PVE::QemuServer::vm_stop($storecfg, $vmid, $skiplock, 0, $param->{timeout},
+ $shutdown, $param->{forceStop}, $keepActive);
+
+ return;
+ };
+
+ return $rpcenv->fork_worker('qmshutdown', $vmid, $authuser, $realcmd);
+ }
}});
__PACKAGE__->register_method({
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
- vmid => get_standard_option('pve-vmid'),
+ vmid => get_standard_option('pve-vmid',
+ { completion => \&PVE::QemuServer::complete_vmid_running }),
skiplock => get_standard_option('skiplock'),
},
},
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
- vmid => get_standard_option('pve-vmid'),
+ vmid => get_standard_option('pve-vmid',
+ { completion => \&PVE::QemuServer::complete_vmid_running }),
skiplock => get_standard_option('skiplock'),
+ nocheck => { type => 'boolean', optional => 1 },
+
},
},
returns => {
raise_param_exc({ skiplock => "Only root may use this option." })
if $skiplock && $authuser ne 'root@pam';
- die "VM $vmid not running\n" if !PVE::QemuServer::check_running($vmid);
+ my $nocheck = extract_param($param, 'nocheck');
+
+ die "VM $vmid not running\n" if !PVE::QemuServer::check_running($vmid, $nocheck);
my $realcmd = sub {
my $upid = shift;
syslog('info', "resume VM $vmid: $upid\n");
- PVE::QemuServer::vm_resume($vmid, $skiplock);
+ PVE::QemuServer::vm_resume($vmid, $skiplock, $nocheck);
return;
};
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
- vmid => get_standard_option('pve-vmid'),
+ vmid => get_standard_option('pve-vmid',
+ { completion => \&PVE::QemuServer::complete_vmid_running }),
skiplock => get_standard_option('skiplock'),
key => {
description => "The key (qemu monitor encoding).",
my $running = PVE::QemuServer::check_running($vmid);
- my $conf = PVE::QemuServer::load_config($vmid);
+ my $conf = PVE::QemuConfig->load_config($vmid);
if($snapname){
my $snap = $conf->{snapshots}->{$snapname};
my $storecfg = PVE::Storage::config();
my $nodelist = PVE::QemuServer::shared_nodes($conf, $storecfg);
- my $hasFeature = PVE::QemuServer::has_feature($feature, $conf, $storecfg, $snapname, $running);
+ my $hasFeature = PVE::QemuConfig->has_feature($feature, $conf, $storecfg, $snapname, $running);
return {
hasFeature => $hasFeature,
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
- vmid => get_standard_option('pve-vmid'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
newid => get_standard_option('pve-vmid', { description => 'VMID for the clone.' }),
name => {
optional => 1,
description => "Add the new VM to the specified pool.",
},
snapname => get_standard_option('pve-snapshot-name', {
- requires => 'full',
optional => 1,
}),
storage => get_standard_option('pve-storage-id', {
# do all tests after lock
# we also try to do all tests before we fork the worker
- my $conf = PVE::QemuServer::load_config($vmid);
+ my $conf = PVE::QemuConfig->load_config($vmid);
- PVE::QemuServer::check_lock($conf);
+ PVE::QemuConfig->check_lock($conf);
my $verify_running = PVE::QemuServer::check_running($vmid) || 0;
die "can't clone VM to node '$target' (VM uses local storage)\n" if $target && !$sharedvm;
- my $conffile = PVE::QemuServer::config_file($newid);
+ my $conffile = PVE::QemuConfig->config_file($newid);
die "unable to create VM $newid: config file already exists\n"
if -f $conffile;
my $newconf = { lock => 'clone' };
my $drives = {};
+ my $fullclone = {};
my $vollist = [];
foreach my $opt (keys %$oldconf) {
next if $opt eq 'snapshots' || $opt eq 'parent' || $opt eq 'snaptime' ||
$opt eq 'vmstate' || $opt eq 'snapstate';
+ # no need to copy unused images, because VMID(owner) changes anyways
+ next if $opt =~ m/^unused\d+$/;
+
# always change MAC! address
if ($opt =~ m/^net(\d+)$/) {
my $net = PVE::QemuServer::parse_net($value);
- $net->{macaddr} = PVE::Tools::random_ether_addr();
+ my $dc = PVE::Cluster::cfs_read_file('datacenter.cfg');
+ $net->{macaddr} = PVE::Tools::random_ether_addr($dc->{mac_prefix});
$newconf->{$opt} = PVE::QemuServer::print_net($net);
- } elsif (my $drive = PVE::QemuServer::parse_drive($opt, $value)) {
+ } elsif (PVE::QemuServer::is_valid_drivename($opt)) {
+ my $drive = PVE::QemuServer::parse_drive($opt, $value);
+ die "unable to parse drive options for '$opt'\n" if !$drive;
if (PVE::QemuServer::drive_is_cdrom($drive)) {
$newconf->{$opt} = $value; # simply copy configuration
} else {
- if ($param->{full} || !PVE::Storage::volume_is_base($storecfg, $drive->{file})) {
- die "Full clone feature is not available"
+ if ($param->{full}) {
+ die "Full clone feature is not supported for drive '$opt'\n"
if !PVE::Storage::volume_has_feature($storecfg, 'copy', $drive->{file}, $snapname, $running);
- $drive->{full} = 1;
+ $fullclone->{$opt} = 1;
+ } else {
+ # not full means clone instead of copy
+ die "Linked clone feature is not supported for drive '$opt'\n"
+ if !PVE::Storage::volume_has_feature($storecfg, 'clone', $drive->{file}, $snapname, $running);
}
$drives->{$opt} = $drive;
push @$vollist, $drive->{file};
}
}
+ # auto generate a new uuid
+ my ($uuid, $uuid_str);
+ UUID::generate($uuid);
+ UUID::unparse($uuid, $uuid_str);
+ my $smbios1 = PVE::QemuServer::parse_smbios1($newconf->{smbios1} || '');
+ $smbios1->{uuid} = $uuid_str;
+ $newconf->{smbios1} = PVE::QemuServer::print_smbios1($smbios1);
+
delete $newconf->{template};
if ($param->{name}) {
my $upid = shift;
my $newvollist = [];
+ my $jobs = {};
eval {
- local $SIG{INT} = $SIG{TERM} = $SIG{QUIT} = $SIG{HUP} = sub { die "interrupted by signal\n"; };
+ local $SIG{INT} =
+ local $SIG{TERM} =
+ local $SIG{QUIT} =
+ local $SIG{HUP} = sub { die "interrupted by signal\n"; };
- PVE::Storage::activate_volumes($storecfg, $vollist);
+ PVE::Storage::activate_volumes($storecfg, $vollist, $snapname);
+
+ my $total_jobs = scalar(keys %{$drives});
+ my $i = 1;
foreach my $opt (keys %$drives) {
my $drive = $drives->{$opt};
+ my $skipcomplete = ($total_jobs != $i); # finish after last drive
my $newdrive = PVE::QemuServer::clone_disk($storecfg, $vmid, $running, $opt, $drive, $snapname,
- $newid, $storage, $format, $drive->{full}, $newvollist);
+ $newid, $storage, $format, $fullclone->{$opt}, $newvollist,
+ $jobs, $skipcomplete, $oldconf->{agent});
$newconf->{$opt} = PVE::QemuServer::print_drive($vmid, $newdrive);
- PVE::QemuServer::update_config_nolock($newid, $newconf, 1);
+ PVE::QemuConfig->write_config($newid, $newconf);
+ $i++;
}
delete $newconf->{lock};
- PVE::QemuServer::update_config_nolock($newid, $newconf, 1);
+ PVE::QemuConfig->write_config($newid, $newconf);
if ($target) {
- my $newconffile = PVE::QemuServer::config_file($newid, $target);
+ # always deactivate volumes - avoid lvm LVs to be active on several nodes
+ PVE::Storage::deactivate_volumes($storecfg, $vollist, $snapname) if !$running;
+ PVE::Storage::deactivate_volumes($storecfg, $newvollist);
+
+ my $newconffile = PVE::QemuConfig->config_file($newid, $target);
die "Failed to move config to node '$target' - rename failed: $!\n"
if !rename($conffile, $newconffile);
}
if (my $err = $@) {
unlink $conffile;
+ eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $jobs) };
+
sleep 1; # some storage like rbd need to wait before release volume - really?
foreach my $volid (@$newvollist) {
return;
};
+ PVE::Firewall::clone_vmfw_conf($vmid, $newid);
+
return $rpcenv->fork_worker('qmclone', $vmid, $authuser, $realcmd);
};
- return PVE::QemuServer::lock_config_mode($vmid, 1, $shared_lock, sub {
+ return PVE::QemuConfig->lock_config_mode($vmid, 1, $shared_lock, sub {
# Aquire exclusive lock lock for $newid
- return PVE::QemuServer::lock_config_full($newid, 1, $clonefn);
+ return PVE::QemuConfig->lock_config_full($newid, 1, $clonefn);
});
}});
proxyto => 'node',
description => "Move volume to different storage.",
permissions => {
- description => "You need 'VM.Config.Disk' permissions on /vms/{vmid}, " .
- "and 'Datastore.AllocateSpace' permissions on the storage.",
- check =>
- [ 'and',
- ['perm', '/vms/{vmid}', [ 'VM.Config.Disk' ]],
- ['perm', '/storage/{storage}', [ 'Datastore.AllocateSpace' ]],
- ],
+ description => "You need 'VM.Config.Disk' permissions on /vms/{vmid}, and 'Datastore.AllocateSpace' permissions on the storage.",
+ check => [ 'and',
+ ['perm', '/vms/{vmid}', [ 'VM.Config.Disk' ]],
+ ['perm', '/storage/{storage}', [ 'Datastore.AllocateSpace' ]],
+ ],
},
parameters => {
additionalProperties => 0,
- properties => {
+ properties => {
node => get_standard_option('pve-node'),
- vmid => get_standard_option('pve-vmid'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
disk => {
type => 'string',
description => "The disk you want to move.",
- enum => [ PVE::QemuServer::disknames() ],
+ enum => [ PVE::QemuServer::valid_drive_names() ],
},
- storage => get_standard_option('pve-storage-id', { description => "Target Storage." }),
+ storage => get_standard_option('pve-storage-id', {
+ description => "Target storage.",
+ completion => \&PVE::QemuServer::complete_storage,
+ }),
'format' => {
type => 'string',
description => "Target Format.",
my $updatefn = sub {
- my $conf = PVE::QemuServer::load_config($vmid);
+ my $conf = PVE::QemuConfig->load_config($vmid);
+
+ PVE::QemuConfig->check_lock($conf);
die "checksum missmatch (file change by other user?)\n"
if $digest && $digest ne $conf->{digest};
die "you can't move on the same storage with same format\n" if $oldstoreid eq $storeid &&
(!$format || !$oldfmt || $oldfmt eq $format);
+ # this only checks snapshots because $disk is passed!
+ my $snapshotted = PVE::QemuServer::is_volume_in_use($storecfg, $conf, $disk, $old_volid);
+ die "you can't move a disk with snapshots and delete the source\n"
+ if $snapshotted && $param->{delete};
+
PVE::Cluster::log_msg('info', $authuser, "move disk VM $vmid: move --disk $disk --storage $storeid");
my $running = PVE::QemuServer::check_running($vmid);
my $newvollist = [];
eval {
- local $SIG{INT} = $SIG{TERM} = $SIG{QUIT} = $SIG{HUP} = sub { die "interrupted by signal\n"; };
+ local $SIG{INT} =
+ local $SIG{TERM} =
+ local $SIG{QUIT} =
+ local $SIG{HUP} = sub { die "interrupted by signal\n"; };
+
+ warn "moving disk with snapshots, snapshots will not be moved!\n"
+ if $snapshotted;
my $newdrive = PVE::QemuServer::clone_disk($storecfg, $vmid, $running, $disk, $drive, undef,
$vmid, $storeid, $format, 1, $newvollist);
$conf->{$disk} = PVE::QemuServer::print_drive($vmid, $newdrive);
- PVE::QemuServer::add_unused_volume($conf, $old_volid) if !$param->{delete};
+ PVE::QemuConfig->add_unused_volume($conf, $old_volid) if !$param->{delete};
- PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
+ # convert moved disk to base if part of template
+ PVE::QemuServer::template_create($vmid, $conf, $disk)
+ if PVE::QemuConfig->is_template($conf);
+
+ PVE::QemuConfig->write_config($vmid, $conf);
+
+ eval {
+ # try to deactivate volumes - avoid lvm LVs to be active on several nodes
+ PVE::Storage::deactivate_volumes($storecfg, [ $newdrive->{file} ])
+ if !$running;
+ };
+ warn $@ if $@;
};
if (my $err = $@) {
}
if ($param->{delete}) {
- eval { PVE::Storage::vdisk_free($storecfg, $old_volid); };
+ eval {
+ PVE::Storage::deactivate_volumes($storecfg, [$old_volid]);
+ PVE::Storage::vdisk_free($storecfg, $old_volid);
+ };
warn $@ if $@;
}
};
return $rpcenv->fork_worker('qmmove', $vmid, $authuser, $realcmd);
};
- return PVE::QemuServer::lock_config($vmid, $updatefn);
+ return PVE::QemuConfig->lock_config($vmid, $updatefn);
}});
__PACKAGE__->register_method({
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
- vmid => get_standard_option('pve-vmid'),
- target => get_standard_option('pve-node', { description => "Target node." }),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
+ target => get_standard_option('pve-node', {
+ description => "Target node.",
+ completion => \&PVE::Cluster::complete_migration_target,
+ }),
online => {
type => 'boolean',
description => "Use online/live migration.",
description => "Allow to migrate VMs which use local devices. Only root may use this option.",
optional => 1,
},
+ migration_type => {
+ type => 'string',
+ enum => ['secure', 'insecure'],
+ description => "Migration traffic is encrypted using an SSH tunnel by default. On secure, completely private networks this can be disabled to increase performance.",
+ optional => 1,
+ },
+ migration_network => {
+ type => 'string', format => 'CIDR',
+ description => "CIDR of the (sub) network that is used for migration.",
+ optional => 1,
+ },
+ "with-local-disks" => {
+ type => 'boolean',
+ description => "Enable live storage migration for local disk",
+ optional => 1,
+ },
+ targetstorage => get_standard_option('pve-storage-id', {
+ description => "Default target storage.",
+ optional => 1,
+ completion => \&PVE::QemuServer::complete_storage,
+ }),
},
},
returns => {
my $vmid = extract_param($param, 'vmid');
+ raise_param_exc({ targetstorage => "Live storage migration can only be done online." })
+ if !$param->{online} && $param->{targetstorage};
+
raise_param_exc({ force => "Only root may use this option." })
if $param->{force} && $authuser ne 'root@pam';
+ raise_param_exc({ migration_type => "Only root may use this option." })
+ if $param->{migration_type} && $authuser ne 'root@pam';
+
+ # allow root only until better network permissions are available
+ raise_param_exc({ migration_network => "Only root may use this option." })
+ if $param->{migration_network} && $authuser ne 'root@pam';
+
# test if VM exists
- my $conf = PVE::QemuServer::load_config($vmid);
+ my $conf = PVE::QemuConfig->load_config($vmid);
# try to detect errors early
- PVE::QemuServer::check_lock($conf);
+ PVE::QemuConfig->check_lock($conf);
if (PVE::QemuServer::check_running($vmid)) {
die "cant migrate running VM without --online\n"
}
my $storecfg = PVE::Storage::config();
- PVE::QemuServer::check_storage_availability($storecfg, $conf, $target);
- if (&$vm_is_ha_managed($vmid) && $rpcenv->{type} ne 'ha') {
+ if( $param->{targetstorage}) {
+ PVE::Storage::storage_check_node($storecfg, $param->{targetstorage}, $target);
+ } else {
+ PVE::QemuServer::check_storage_availability($storecfg, $conf, $target);
+ }
+
+ if (PVE::HA::Config::vm_is_ha_managed($vmid) && $rpcenv->{type} ne 'ha') {
my $hacmd = sub {
my $upid = shift;
- my $service = "pvevm:$vmid";
+ my $service = "vm:$vmid";
- my $cmd = ['clusvcadm', '-M', $service, '-m', $target];
+ my $cmd = ['ha-manager', 'migrate', $service, $target];
- print "Executing HA migrate for VM $vmid to node $target\n";
+ print "Requesting HA migration for VM $vmid to node $target\n";
PVE::Tools::run_command($cmd);
} else {
my $realcmd = sub {
- my $upid = shift;
-
PVE::QemuMigrate->migrate($target, $targetip, $vmid, $param);
};
- return $rpcenv->fork_worker('qmigrate', $vmid, $authuser, $realcmd);
+ my $worker = sub {
+ return PVE::GuestHelpers::guest_migration_lock($vmid, 10, $realcmd);
+ };
+
+ return $rpcenv->fork_worker('qmigrate', $vmid, $authuser, $worker);
}
}});
proxyto => 'node',
description => "Execute Qemu monitor commands.",
permissions => {
- check => ['perm', '/vms/{vmid}', [ 'VM.Monitor' ]],
+ description => "Sys.Modify is required for (sub)commands which are not read-only ('info *' and 'help')",
+ check => ['perm', '/vms/{vmid}', [ 'VM.Monitor' ]],
},
parameters => {
additionalProperties => 0,
code => sub {
my ($param) = @_;
+ my $rpcenv = PVE::RPCEnvironment::get();
+ my $authuser = $rpcenv->get_user();
+
+ my $is_ro = sub {
+ my $command = shift;
+ return $command =~ m/^\s*info(\s+|$)/
+ || $command =~ m/^\s*help\s*$/;
+ };
+
+ $rpcenv->check_full($authuser, "/", ['Sys.Modify'])
+ if !&$is_ro($param->{command});
+
my $vmid = $param->{vmid};
- my $conf = PVE::QemuServer::load_config ($vmid); # check if VM exists
+ my $conf = PVE::QemuConfig->load_config ($vmid); # check if VM exists
my $res = '';
eval {
return $res;
}});
+my $guest_agent_commands = [
+ 'ping',
+ 'get-time',
+ 'info',
+ 'fsfreeze-status',
+ 'fsfreeze-freeze',
+ 'fsfreeze-thaw',
+ 'fstrim',
+ 'network-get-interfaces',
+ 'get-vcpus',
+ 'get-fsinfo',
+ 'get-memory-blocks',
+ 'get-memory-block-info',
+ 'suspend-hybrid',
+ 'suspend-ram',
+ 'suspend-disk',
+ 'shutdown',
+ ];
+
+__PACKAGE__->register_method({
+ name => 'agent',
+ path => '{vmid}/agent',
+ method => 'POST',
+ protected => 1,
+ proxyto => 'node',
+ description => "Execute Qemu Guest Agent commands.",
+ permissions => {
+ check => ['perm', '/vms/{vmid}', [ 'VM.Monitor' ]],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid', {
+ completion => \&PVE::QemuServer::complete_vmid_running }),
+ command => {
+ type => 'string',
+ description => "The QGA command.",
+ enum => $guest_agent_commands,
+ },
+ },
+ },
+ returns => {
+ type => 'object',
+ description => "Returns an object with a single `result` property. The type of that
+property depends on the executed command.",
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $vmid = $param->{vmid};
+
+ my $conf = PVE::QemuConfig->load_config ($vmid); # check if VM exists
+
+ die "No Qemu Guest Agent\n" if !defined($conf->{agent});
+ die "VM $vmid is not running\n" if !PVE::QemuServer::check_running($vmid);
+
+ my $cmd = $param->{command};
+
+ my $res = PVE::QemuServer::vm_mon_cmd($vmid, "guest-$cmd");
+
+ return { result => $res };
+ }});
+
__PACKAGE__->register_method({
name => 'resize_vm',
path => '{vmid}/resize',
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
- vmid => get_standard_option('pve-vmid'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
skiplock => get_standard_option('skiplock'),
disk => {
type => 'string',
description => "The disk you want to resize.",
- enum => [PVE::QemuServer::disknames()],
+ enum => [PVE::QemuServer::valid_drive_names()],
},
size => {
type => 'string',
pattern => '\+?\d+(\.\d+)?[KMGT]?',
- description => "The new size. With the '+' sign the value is added to the actual size of the volume and without it, the value is taken as an absolute one. Shrinking disk size is not supported.",
+ description => "The new size. With the `+` sign the value is added to the actual size of the volume and without it, the value is taken as an absolute one. Shrinking disk size is not supported.",
},
digest => {
type => 'string',
my $updatefn = sub {
- my $conf = PVE::QemuServer::load_config($vmid);
+ my $conf = PVE::QemuConfig->load_config($vmid);
die "checksum missmatch (file change by other user?)\n"
if $digest && $digest ne $conf->{digest};
- PVE::QemuServer::check_lock($conf) if !$skiplock;
+ PVE::QemuConfig->check_lock($conf) if !$skiplock;
die "disk '$disk' does not exist\n" if !$conf->{$disk};
my $drive = PVE::QemuServer::parse_drive($disk, $conf->{$disk});
+ my (undef, undef, undef, undef, undef, undef, $format) =
+ PVE::Storage::parse_volname($storecfg, $drive->{file});
+
+ die "can't resize volume: $disk if snapshot exists\n"
+ if %{$conf->{snapshots}} && $format eq 'qcow2';
+
my $volid = $drive->{file};
die "disk '$disk' has no associated volume\n" if !$volid;
die "you can't resize a cdrom\n" if PVE::QemuServer::drive_is_cdrom($drive);
- die "you can't online resize a virtio windows bootdisk\n"
- if PVE::QemuServer::check_running($vmid) && $conf->{bootdisk} eq $disk && $conf->{ostype} =~ m/^w/ && $disk =~ m/^virtio/;
-
my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid);
$rpcenv->check($authuser, "/storage/$storeid", ['Datastore.AllocateSpace']);
+ PVE::Storage::activate_volumes($storecfg, [$volid]);
my $size = PVE::Storage::volume_size_info($storecfg, $volid, 5);
die "internal error" if $sizestr !~ m/^(\+)?(\d+(\.\d+)?)([KMGT])?$/;
$newsize += $size if $ext;
$newsize = int($newsize);
- die "unable to skrink disk size\n" if $newsize < $size;
+ die "shrinking disks is not supported\n" if $newsize < $size;
return if $size == $newsize;
$drive->{size} = $newsize;
$conf->{$disk} = PVE::QemuServer::print_drive($vmid, $drive);
- PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
+ PVE::QemuConfig->write_config($vmid, $conf);
};
- PVE::QemuServer::lock_config($vmid, $updatefn);
+ PVE::QemuConfig->lock_config($vmid, $updatefn);
return undef;
}});
parameters => {
additionalProperties => 0,
properties => {
- vmid => get_standard_option('pve-vmid'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
node => get_standard_option('pve-node'),
},
},
my $vmid = $param->{vmid};
- my $conf = PVE::QemuServer::load_config($vmid);
+ my $conf = PVE::QemuConfig->load_config($vmid);
my $snaphash = $conf->{snapshots} || {};
my $res = [];
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
- vmid => get_standard_option('pve-vmid'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
snapname => get_standard_option('pve-snapshot-name'),
vmstate => {
optional => 1,
type => 'boolean',
description => "Save the vmstate",
},
- freezefs => {
- optional => 1,
- type => 'boolean',
- description => "Freeze the filesystem",
- },
description => {
optional => 1,
type => 'string',
my $realcmd = sub {
PVE::Cluster::log_msg('info', $authuser, "snapshot VM $vmid: $snapname");
- PVE::QemuServer::snapshot_create($vmid, $snapname, $param->{vmstate},
- $param->{freezefs}, $param->{description});
+ PVE::QemuConfig->snapshot_create($vmid, $snapname, $param->{vmstate},
+ $param->{description});
};
return $rpcenv->fork_worker('qmsnapshot', $vmid, $authuser, $realcmd);
my $updatefn = sub {
- my $conf = PVE::QemuServer::load_config($vmid);
+ my $conf = PVE::QemuConfig->load_config($vmid);
- PVE::QemuServer::check_lock($conf);
+ PVE::QemuConfig->check_lock($conf);
my $snap = $conf->{snapshots}->{$snapname};
$snap->{description} = $param->{description} if defined($param->{description});
- PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
+ PVE::QemuConfig->write_config($vmid, $conf);
};
- PVE::QemuServer::lock_config($vmid, $updatefn);
+ PVE::QemuConfig->lock_config($vmid, $updatefn);
return undef;
}});
proxyto => 'node',
description => "Get snapshot configuration",
permissions => {
- check => ['perm', '/vms/{vmid}', [ 'VM.Snapshot' ]],
+ check => ['perm', '/vms/{vmid}', [ 'VM.Snapshot', 'VM.Snapshot.Rollback' ], any => 1],
},
parameters => {
additionalProperties => 0,
my $snapname = extract_param($param, 'snapname');
- my $conf = PVE::QemuServer::load_config($vmid);
+ my $conf = PVE::QemuConfig->load_config($vmid);
my $snap = $conf->{snapshots}->{$snapname};
proxyto => 'node',
description => "Rollback VM state to specified snapshot.",
permissions => {
- check => ['perm', '/vms/{vmid}', [ 'VM.Snapshot' ]],
+ check => ['perm', '/vms/{vmid}', [ 'VM.Snapshot', 'VM.Snapshot.Rollback' ], any => 1],
},
parameters => {
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
- vmid => get_standard_option('pve-vmid'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
snapname => get_standard_option('pve-snapshot-name'),
},
},
my $realcmd = sub {
PVE::Cluster::log_msg('info', $authuser, "rollback snapshot VM $vmid: $snapname");
- PVE::QemuServer::snapshot_rollback($vmid, $snapname);
+ PVE::QemuConfig->snapshot_rollback($vmid, $snapname);
};
- return $rpcenv->fork_worker('qmrollback', $vmid, $authuser, $realcmd);
+ my $worker = sub {
+ # hold migration lock, this makes sure that nobody create replication snapshots
+ return PVE::GuestHelpers::guest_migration_lock($vmid, 10, $realcmd);
+ };
+
+ return $rpcenv->fork_worker('qmrollback', $vmid, $authuser, $worker);
}});
__PACKAGE__->register_method({
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
- vmid => get_standard_option('pve-vmid'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
snapname => get_standard_option('pve-snapshot-name'),
force => {
optional => 1,
my $realcmd = sub {
PVE::Cluster::log_msg('info', $authuser, "delete snapshot VM $vmid: $snapname");
- PVE::QemuServer::snapshot_delete($vmid, $snapname, $param->{force});
+ PVE::QemuConfig->snapshot_delete($vmid, $snapname, $param->{force});
};
return $rpcenv->fork_worker('qmdelsnapshot', $vmid, $authuser, $realcmd);
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
- vmid => get_standard_option('pve-vmid'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid_stopped }),
disk => {
optional => 1,
type => 'string',
description => "If you want to convert only 1 disk to base image.",
- enum => [PVE::QemuServer::disknames()],
+ enum => [PVE::QemuServer::valid_drive_names()],
},
},
my $updatefn = sub {
- my $conf = PVE::QemuServer::load_config($vmid);
+ my $conf = PVE::QemuConfig->load_config($vmid);
- PVE::QemuServer::check_lock($conf);
+ PVE::QemuConfig->check_lock($conf);
die "unable to create template, because VM contains snapshots\n"
if $conf->{snapshots} && scalar(keys %{$conf->{snapshots}});
die "you can't convert a template to a template\n"
- if PVE::QemuServer::is_template($conf) && !$disk;
+ if PVE::QemuConfig->is_template($conf) && !$disk;
die "you can't convert a VM to template if VM is running\n"
if PVE::QemuServer::check_running($vmid);
};
$conf->{template} = 1;
- PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
+ PVE::QemuConfig->write_config($vmid, $conf);
return $rpcenv->fork_worker('qmtemplate', $vmid, $authuser, $realcmd);
};
- PVE::QemuServer::lock_config($vmid, $updatefn);
+ PVE::QemuConfig->lock_config($vmid, $updatefn);
return undef;
}});