use strict;
use warnings;
use Cwd 'abs_path';
+use Net::SSLeay;
use PVE::Cluster qw (cfs_read_file cfs_write_file);;
use PVE::SafeSyslog;
$res->{$ds} = PVE::QemuServer::print_drive($vmid, $disk);
} else {
- my $path = $rpcenv->check_volume_access($authuser, $storecfg, $vmid, $volid);
-
- my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
+ $rpcenv->check_volume_access($authuser, $storecfg, $vmid, $volid);
- my $volid_is_new = 1;
+ my $volid_is_new = 1;
- if ($conf->{$ds}) {
- my $olddrive = PVE::QemuServer::parse_drive($ds, $conf->{$ds});
- $volid_is_new = undef if $olddrive->{file} && $olddrive->{file} eq $volid;
+ if ($conf->{$ds}) {
+ my $olddrive = PVE::QemuServer::parse_drive($ds, $conf->{$ds});
+ $volid_is_new = undef if $olddrive->{file} && $olddrive->{file} eq $volid;
}
if ($volid_is_new) {
+ my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
+
PVE::Storage::activate_volumes($storecfg, [ $volid ]) if $storeid;
my $size = PVE::Storage::volume_size_info($storecfg, $volid);
die "pipe requires cli environment\n"
if $rpcenv->{type} ne 'cli';
} else {
- my $path = $rpcenv->check_volume_access($authuser, $storecfg, $vmid, $archive);
-
- PVE::Storage::activate_volumes($storecfg, [ $archive ])
- if PVE::Storage::parse_volume_id ($archive, 1);
-
- die "can't find archive file '$archive'\n" if !($path && -f $path);
- $archive = $path;
+ $rpcenv->check_volume_access($authuser, $storecfg, $vmid, $archive);
+ $archive = PVE::Storage::abs_filesystem_path($storecfg, $archive);
}
}
{ subdir => 'rrddata' },
{ subdir => 'monitor' },
{ subdir => 'snapshot' },
+ { subdir => 'spiceproxy' },
+ { subdir => 'sendkey' },
];
return $res;
my $volid = $drive->{file};
if (&$vm_is_volid_owner($storecfg, $vmid, $volid)) {
- if ($force || $key =~ m/^unused/) {
- eval {
- # check if the disk is really unused
+ if ($force || $key =~ m/^unused/) {
+ eval {
+ # check if the disk is really unused
my $used_paths = PVE::QemuServer::get_used_paths($vmid, $storecfg, $conf, 1, $key);
- my $path = PVE::Storage::path($storecfg, $volid);
+ my $path = PVE::Storage::path($storecfg, $volid);
die "unable to delete '$volid' - volume is still in use (snapshot?)\n"
if $used_paths->{$path};
- PVE::Storage::vdisk_free($storecfg, $volid);
+ PVE::Storage::vdisk_free($storecfg, $volid);
};
die $@ if $@;
} else {
my $drive = PVE::QemuServer::parse_drive($opt, $conf->{$opt});
if (my $sid = &$test_deallocate_drive($storecfg, $vmid, $opt, $drive, $force)) {
- $rpcenv->check($authuser, "/storage/$sid", ['Datastore.Allocate']);
+ $rpcenv->check($authuser, "/storage/$sid", ['Datastore.AllocateSpace']);
}
}
my $unplugwarning = "";
- if($conf->{ostype} && $conf->{ostype} eq 'l26'){
+ if ($conf->{ostype} && $conf->{ostype} eq 'l26') {
$unplugwarning = "<br>verify that you have acpiphp && pci_hotplug modules loaded in your guest VM";
- }elsif($conf->{ostype} && $conf->{ostype} eq 'l24'){
+ } elsif ($conf->{ostype} && $conf->{ostype} eq 'l24') {
$unplugwarning = "<br>kernel 2.4 don't support hotplug, please disable hotplug in options";
- }elsif(!$conf->{ostype} || ($conf->{ostype} && $conf->{ostype} eq 'other')){
+ } elsif (!$conf->{ostype} || ($conf->{ostype} && $conf->{ostype} eq 'other')) {
$unplugwarning = "<br>verify that your guest support acpi hotplug";
}
- if($opt eq 'tablet'){
+ if ($opt eq 'tablet') {
PVE::QemuServer::vm_deviceplug(undef, $conf, $vmid, $opt);
- }else{
+ } else {
die "error hot-unplug $opt $unplugwarning" if !PVE::QemuServer::vm_deviceunplug($vmid, $conf, $opt);
}
&$safe_num_ne($drive->{mbps_wr}, $old_drive->{mbps_wr}) ||
&$safe_num_ne($drive->{iops}, $old_drive->{iops}) ||
&$safe_num_ne($drive->{iops_rd}, $old_drive->{iops_rd}) ||
- &$safe_num_ne($drive->{iops_wr}, $old_drive->{iops_wr})) {
- PVE::QemuServer::qemu_block_set_io_throttle($vmid,"drive-$opt", $drive->{mbps}*1024*1024,
- $drive->{mbps_rd}*1024*1024, $drive->{mbps_wr}*1024*1024,
- $drive->{iops}, $drive->{iops_rd}, $drive->{iops_wr})
+ &$safe_num_ne($drive->{iops_wr}, $old_drive->{iops_wr}) ||
+ &$safe_num_ne($drive->{mbps_max}, $old_drive->{mbps_max}) ||
+ &$safe_num_ne($drive->{mbps_rd_max}, $old_drive->{mbps_rd_max}) ||
+ &$safe_num_ne($drive->{mbps_wr_max}, $old_drive->{mbps_wr_max}) ||
+ &$safe_num_ne($drive->{iops_max}, $old_drive->{iops_max}) ||
+ &$safe_num_ne($drive->{iops_rd_max}, $old_drive->{iops_rd_max}) ||
+ &$safe_num_ne($drive->{iops_wr_max}, $old_drive->{iops_wr_max})) {
+ PVE::QemuServer::qemu_block_set_io_throttle($vmid,"drive-$opt",
+ ($drive->{mbps} || 0)*1024*1024,
+ ($drive->{mbps_rd} || 0)*1024*1024,
+ ($drive->{mbps_wr} || 0)*1024*1024,
+ $drive->{iops} || 0,
+ $drive->{iops_rd} || 0,
+ $drive->{iops_wr} || 0,
+ ($drive->{mbps_max} || 0)*1024*1024,
+ ($drive->{mbps_rd_max} || 0)*1024*1024,
+ ($drive->{mbps_wr_max} || 0)*1024*1024,
+ $drive->{iops_max} || 0,
+ $drive->{iops_rd_max} || 0,
+ $drive->{iops_wr_max} || 0)
if !PVE::QemuServer::drive_is_cdrom($drive);
}
}
# involve hot-plug actions, or disk alloc/free. Such actions can take long
# time to complete and have side effects (not idempotent).
#
-# The new implementation uses POST and forks a worker process. We added
+# The new implementation uses POST and forks a worker process. We added
# a new option 'background_delay'. If specified we wait up to
-# 'background_delay' second for the worker task to complete. It returns null
+# 'background_delay' second for the worker task to complete. It returns null
# if the task is finished within that time, else we return the UPID.
-
+
my $update_vm_api = sub {
my ($param, $sync) = @_;
raise_param_exc({ delete => "you can't use '-$opt' and " .
"-delete $opt' at the same time" })
if defined($param->{$opt});
-
+
if (!PVE::QemuServer::option_exists($opt)) {
raise_param_exc({ delete => "unknown option '$opt'" });
}
if $digest && $digest ne $conf->{digest};
PVE::QemuServer::check_lock($conf) if !$skiplock;
-
+
if ($param->{memory} || defined($param->{balloon})) {
my $maxmem = $param->{memory} || $conf->{memory} || $defaults->{memory};
my $balloon = defined($param->{balloon}) ? $param->{balloon} : $conf->{balloon};
-
+
die "balloon value too large (must be smaller than assigned memory)\n"
if $balloon && $balloon > $maxmem;
}
}
my $running = PVE::QemuServer::check_running($vmid);
-
+
foreach my $opt (keys %$param) { # add/change
-
+
$conf = PVE::QemuServer::load_config($vmid); # update/reload
-
+
next if $conf->{$opt} && ($param->{$opt} eq $conf->{$opt}); # skip if nothing changed
if (PVE::QemuServer::valid_drivename($opt)) {
} elsif($opt eq 'tablet' && $param->{$opt} == 0){
PVE::QemuServer::vm_deviceunplug($vmid, $conf, $opt);
}
+
+ if($opt eq 'cores' && $conf->{maxcpus}){
+ PVE::QemuServer::qemu_cpu_hotplug($vmid, $conf, $param->{$opt});
+ }
$conf->{$opt} = $param->{$opt};
PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
if ($background_delay) {
# Note: It would be better to do that in the Event based HTTPServer
- # to avoid blocking call to sleep.
+ # to avoid blocking call to sleep.
my $end_time = time() + $background_delay;
return undef if $status eq 'OK';
die $status;
}
- }
+ }
return $upid;
}
my $vmid = $param->{vmid};
my $node = $param->{node};
+ my $conf = PVE::QemuServer::load_config($vmid, $node); # check if VM exists
+
my $authpath = "/vms/$vmid";
my $ticket = PVE::AccessControl::assemble_vnc_ticket($authuser, $authpath);
my $port = PVE::Tools::next_vnc_port();
my $remip;
+ my $remcmd = [];
if ($node ne 'localhost' && $node ne PVE::INotify::nodename()) {
$remip = PVE::Cluster::remote_node_ip($node);
+ # NOTE: kvm VNC traffic is already TLS encrypted
+ $remcmd = ['/usr/bin/ssh', '-T', '-o', 'BatchMode=yes', $remip];
}
- # NOTE: kvm VNC traffic is already TLS encrypted
- my $remcmd = $remip ? ['/usr/bin/ssh', '-T', '-o', 'BatchMode=yes', $remip] : [];
-
my $timeout = 10;
my $realcmd = sub {
syslog('info', "starting vnc proxy $upid\n");
- my $qmcmd = [@$remcmd, "/usr/sbin/qm", 'vncproxy', $vmid];
+ my $cmd;
+
+ if ($conf->{vga} && ($conf->{vga} =~ m/^serial\d+$/)) {
- my $qmstr = join(' ', @$qmcmd);
+ my $termcmd = [ '/usr/sbin/qm', 'terminal', $vmid, '-iface', $conf->{vga} ];
+ #my $termcmd = "/usr/bin/qm terminal -iface $conf->{vga}";
+ $cmd = ['/usr/bin/vncterm', '-rfbport', $port,
+ '-timeout', $timeout, '-authpath', $authpath,
+ '-perm', 'Sys.Console', '-c', @$remcmd, @$termcmd];
+ } else {
- # also redirect stderr (else we get RFB protocol errors)
- my $cmd = ['/bin/nc', '-l', '-p', $port, '-w', $timeout, '-c', "$qmstr 2>/dev/null"];
+ my $qmcmd = [@$remcmd, "/usr/sbin/qm", 'vncproxy', $vmid];
+
+ my $qmstr = join(' ', @$qmcmd);
+
+ # also redirect stderr (else we get RFB protocol errors)
+ $cmd = ['/bin/nc', '-l', '-p', $port, '-w', $timeout, '-c', "$qmstr 2>/dev/null"];
+ }
PVE::Tools::run_command($cmd);
};
}});
+__PACKAGE__->register_method({
+ name => 'spiceproxy',
+ path => '{vmid}/spiceproxy',
+ method => 'POST',
+ protected => 1,
+ proxyto => 'node',
+ permissions => {
+ check => ['perm', '/vms/{vmid}', [ 'VM.Console' ]],
+ },
+ description => "Returns a SPICE configuration to connect to the VM.",
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid'),
+ proxy => get_standard_option('spice-proxy', { optional => 1 }),
+ },
+ },
+ returns => get_standard_option('remote-viewer-config'),
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+
+ my $authuser = $rpcenv->get_user();
+
+ my $vmid = $param->{vmid};
+ my $node = $param->{node};
+ my $proxy = $param->{proxy};
+
+ my $conf = PVE::QemuServer::load_config($vmid, $node);
+ my $title = "VM $vmid - $conf->{'name'}",
+
+ my $port = PVE::QemuServer::spice_port($vmid);
+
+ my ($ticket, undef, $remote_viewer_config) =
+ PVE::AccessControl::remote_viewer_config($authuser, $vmid, $node, $proxy, $title, $port);
+
+ PVE::QemuServer::vm_mon_cmd($vmid, "set_password", protocol => 'spice', password => $ticket);
+ PVE::QemuServer::vm_mon_cmd($vmid, "expire_password", protocol => 'spice', time => "+30");
+
+ return $remote_viewer_config;
+ }});
+
__PACKAGE__->register_method({
name => 'vmcmdidx',
path => '{vmid}/status',
$status->{ha} = &$vm_is_ha_managed($param->{vmid});
+ $status->{spice} = 1 if PVE::QemuServer::vga_conf_has_spice($conf->{vga});
+
return $status;
}});
raise_param_exc({ migratedfrom => "Only root may use this option." })
if $migratedfrom && $authuser ne 'root@pam';
+ # read spice ticket from STDIN
+ my $spice_ticket;
+ if ($stateuri && ($stateuri eq 'tcp') && $migratedfrom && ($rpcenv->{type} eq 'cli')) {
+ if (defined(my $line = <>)) {
+ chomp $line;
+ $spice_ticket = $line;
+ }
+ }
+
my $storecfg = PVE::Storage::config();
if (&$vm_is_ha_managed($vmid) && !$stateuri &&
syslog('info', "start VM $vmid: $upid\n");
- PVE::QemuServer::vm_start($storecfg, $vmid, $stateuri, $skiplock, $migratedfrom, undef, $machine);
+ PVE::QemuServer::vm_start($storecfg, $vmid, $stateuri, $skiplock, $migratedfrom, undef,
+ $machine, $spice_ticket);
return;
};
type => "object",
properties => {
hasFeature => { type => 'boolean' },
- nodes => {
+ nodes => {
type => 'array',
items => { type => 'string' },
}
my $nodelist = PVE::QemuServer::shared_nodes($conf, $storecfg);
my $hasFeature = PVE::QemuServer::has_feature($feature, $conf, $storecfg, $snapname, $running);
-
+
return {
hasFeature => $hasFeature,
nodes => [ keys %$nodelist ],
- };
+ };
}});
__PACKAGE__->register_method({
my $net = PVE::QemuServer::parse_net($value);
$net->{macaddr} = PVE::Tools::random_ether_addr();
$newconf->{$opt} = PVE::QemuServer::print_net($net);
- } elsif (my $drive = PVE::QemuServer::parse_drive($opt, $value)) {
+ } elsif (PVE::QemuServer::valid_drivename($opt)) {
+ my $drive = PVE::QemuServer::parse_drive($opt, $value);
+ die "unable to parse drive options for '$opt'\n" if !$drive;
if (PVE::QemuServer::drive_is_cdrom($drive)) {
$newconf->{$opt} = $value; # simply copy configuration
} else {
PVE::QemuServer::update_config_nolock($newid, $newconf, 1);
if ($target) {
+ # always deactivate volumes - avoid lvm LVs to be active on several nodes
+ PVE::Storage::deactivate_volumes($storecfg, $vollist);
+
my $newconffile = PVE::QemuServer::config_file($newid, $target);
die "Failed to move config to node '$target' - rename failed: $!\n"
if !rename($conffile, $newconffile);
permissions => {
description => "You need 'VM.Config.Disk' permissions on /vms/{vmid}, " .
"and 'Datastore.AllocateSpace' permissions on the storage.",
- check =>
+ check =>
[ 'and',
['perm', '/vms/{vmid}', [ 'VM.Config.Disk' ]],
['perm', '/storage/{storage}', [ 'Datastore.AllocateSpace' ]],
$oldfmt = $1;
}
- die "you can't move on the same storage with same format\n" if $oldstoreid eq $storeid &&
+ die "you can't move on the same storage with same format\n" if $oldstoreid eq $storeid &&
(!$format || !$oldfmt || $oldfmt eq $format);
PVE::Cluster::log_msg('info', $authuser, "move disk VM $vmid: move --disk $disk --storage $storeid");
$conf->{$disk} = PVE::QemuServer::print_drive($vmid, $newdrive);
PVE::QemuServer::add_unused_volume($conf, $old_volid) if !$param->{delete};
-
+
PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
+
+ eval {
+ # try to deactivate volumes - avoid lvm LVs to be active on several nodes
+ PVE::Storage::deactivate_volumes($storecfg, [ $newdrive->{file} ])
+ if !$running;
+ };
+ warn $@ if $@;
};
if (my $err = $@) {
}
if ($param->{delete}) {
- eval { PVE::Storage::vdisk_free($storecfg, $old_volid); };
- warn $@ if $@;
+ my $used_paths = PVE::QemuServer::get_used_paths($vmid, $storecfg, $conf, 1, 1);
+ my $path = PVE::Storage::path($storecfg, $old_volid);
+ if ($used_paths->{$path}){
+ warn "volume $old_volid have snapshots. Can't delete it\n";
+ PVE::QemuServer::add_unused_volume($conf, $old_volid);
+ PVE::QemuServer::update_config_nolock($vmid, $conf, 1);
+ } else {
+ eval { PVE::Storage::vdisk_free($storecfg, $old_volid); };
+ warn $@ if $@;
+ }
}
};