use Cwd 'abs_path';
use Net::SSLeay;
use UUID;
+use POSIX;
+use IO::Socket::IP;
+use URI::Escape;
use PVE::Cluster qw (cfs_read_file cfs_write_file);;
use PVE::SafeSyslog;
use PVE::Storage;
use PVE::JSONSchema qw(get_standard_option);
use PVE::RESTHandler;
+use PVE::ReplicationConfig;
+use PVE::GuestHelpers;
use PVE::QemuConfig;
use PVE::QemuServer;
use PVE::QemuMigrate;
use PVE::Network;
use PVE::Firewall;
use PVE::API2::Firewall::VM;
-use PVE::HA::Env::PVE2;
-use PVE::HA::Config;
+use PVE::API2::Qemu::Agent;
+
+BEGIN {
+ if (!$ENV{PVE_GENERATING_DOCS}) {
+ require PVE::HA::Env::PVE2;
+ import PVE::HA::Env::PVE2;
+ require PVE::HA::Config;
+ import PVE::HA::Config;
+ }
+}
use Data::Dumper; # fixme: remove
}
};
+my $NEW_DISK_RE = qr!^(([^/:\s]+):)?(\d+(\.\d+)?)$!;
my $check_storage_access = sub {
my ($rpcenv, $authuser, $storecfg, $vmid, $settings, $default_storage) = @_;
my $volid = $drive->{file};
- if (!$volid || $volid eq 'none') {
+ if (!$volid || ($volid eq 'none' || $volid eq 'cloudinit')) {
+ # nothing to check
+ } elsif ($volid =~ m/^(([^:\s]+):)?(cloudinit)$/) {
# nothing to check
} elsif ($isCDROM && ($volid eq 'cdrom')) {
$rpcenv->check($authuser, "/", ['Sys.Console']);
- } elsif (!$isCDROM && ($volid =~ m/^(([^:\s]+):)?(\d+(\.\d+)?)$/)) {
+ } elsif (!$isCDROM && ($volid =~ $NEW_DISK_RE)) {
my ($storeid, $size) = ($2 || $default_storage, $3);
die "no storage ID specified (and no default storage)\n" if !$storeid;
$rpcenv->check($authuser, "/storage/$storeid", ['Datastore.AllocateSpace']);
+ my $scfg = PVE::Storage::storage_config($storecfg, $storeid);
+ raise_param_exc({ storage => "storage '$storeid' does not support vm images"})
+ if !$scfg->{content}->{images};
} else {
- $rpcenv->check_volume_access($authuser, $storecfg, $vmid, $volid);
+ PVE::Storage::check_volume_access($rpcenv, $authuser, $storecfg, $vmid, $volid);
}
});
+
+ $rpcenv->check($authuser, "/storage/$settings->{vmstatestorage}", ['Datastore.AllocateSpace'])
+ if defined($settings->{vmstatestorage});
};
my $check_storage_access_clone = sub {
}
});
+ $rpcenv->check($authuser, "/storage/$conf->{vmstatestorage}", ['Datastore.AllocateSpace'])
+ if defined($conf->{vmstatestorage});
+
return $sharedvm;
};
my $vollist = [];
my $res = {};
- PVE::QemuServer::foreach_drive($settings, sub {
+
+ my $code = sub {
my ($ds, $disk) = @_;
my $volid = $disk->{file};
if (!$volid || $volid eq 'none' || $volid eq 'cdrom') {
delete $disk->{size};
$res->{$ds} = PVE::QemuServer::print_drive($vmid, $disk);
- } elsif ($volid =~ m/^(([^:\s]+):)?(\d+(\.\d+)?)$/) {
+ } elsif ($volid =~ m!^(?:([^/:\s]+):)?cloudinit$!) {
+ my $storeid = $1 || $default_storage;
+ die "no storage ID specified (and no default storage)\n" if !$storeid;
+ my $scfg = PVE::Storage::storage_config($storecfg, $storeid);
+ my $name = "vm-$vmid-cloudinit";
+ my $fmt = undef;
+ if ($scfg->{path}) {
+ $name .= ".qcow2";
+ $fmt = 'qcow2';
+ }else{
+ $fmt = 'raw';
+ }
+ # FIXME: Reasonable size? qcow2 shouldn't grow if the space isn't used anyway?
+ my $cloudinit_iso_size = 5; # in MB
+ my $volid = PVE::Storage::vdisk_alloc($storecfg, $storeid, $vmid,
+ $fmt, $name, $cloudinit_iso_size*1024);
+ $disk->{file} = $volid;
+ $disk->{media} = 'cdrom';
+ push @$vollist, $volid;
+ delete $disk->{format}; # no longer needed
+ $res->{$ds} = PVE::QemuServer::print_drive($vmid, $disk);
+ } elsif ($volid =~ $NEW_DISK_RE) {
my ($storeid, $size) = ($2 || $default_storage, $3);
die "no storage ID specified (and no default storage)\n" if !$storeid;
my $defformat = PVE::Storage::storage_default_format($storecfg, $storeid);
my $fmt = $disk->{format} || $defformat;
+ $size = PVE::Tools::convert_size($size, 'gb' => 'kb'); # vdisk_alloc uses kb
+
my $volid;
if ($ds eq 'efidisk0') {
- # handle efidisk
- my $ovmfvars = '/usr/share/kvm/OVMF_VARS-pure-efi.fd';
- die "uefi vars image not found\n" if ! -f $ovmfvars;
- $volid = PVE::Storage::vdisk_alloc($storecfg, $storeid, $vmid,
- $fmt, undef, 128);
- $disk->{file} = $volid;
- $disk->{size} = 128*1024;
- my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid);
- my $scfg = PVE::Storage::storage_config($storecfg, $storeid);
- my $qemufmt = PVE::QemuServer::qemu_img_format($scfg, $volname);
- my $path = PVE::Storage::path($storecfg, $volid);
- my $efidiskcmd = ['/usr/bin/qemu-img', 'convert', '-n', '-f', 'raw', '-O', $qemufmt];
- push @$efidiskcmd, $ovmfvars;
- push @$efidiskcmd, $path;
-
- PVE::Storage::activate_volumes($storecfg, [$volid]);
-
- eval { PVE::Tools::run_command($efidiskcmd); };
- my $err = $@;
- die "Copying of EFI Vars image failed: $err" if $err;
+ ($volid, $size) = PVE::QemuServer::create_efidisk($storecfg, $storeid, $vmid, $fmt);
} else {
- $volid = PVE::Storage::vdisk_alloc($storecfg, $storeid, $vmid,
- $fmt, undef, $size*1024*1024);
- $disk->{file} = $volid;
- $disk->{size} = $size*1024*1024*1024;
+ $volid = PVE::Storage::vdisk_alloc($storecfg, $storeid, $vmid, $fmt, undef, $size);
}
push @$vollist, $volid;
+ $disk->{file} = $volid;
+ $disk->{size} = PVE::Tools::convert_size($size, 'kb' => 'b');
delete $disk->{format}; # no longer needed
$res->{$ds} = PVE::QemuServer::print_drive($vmid, $disk);
} else {
- $rpcenv->check_volume_access($authuser, $storecfg, $vmid, $volid);
+ PVE::Storage::check_volume_access($rpcenv, $authuser, $storecfg, $vmid, $volid);
my $volid_is_new = 1;
$res->{$ds} = PVE::QemuServer::print_drive($vmid, $disk);
}
- });
+ };
+
+ eval { PVE::QemuServer::foreach_drive($settings, $code); };
# free allocated images on error
if (my $err = $@) {
my $diskoptions = {
'boot' => 1,
'bootdisk' => 1,
+ 'vmstatestorage' => 1,
};
my $check_vm_modify_config_perm = sub {
$rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.PowerMgmt']);
} elsif ($diskoptions->{$opt}) {
$rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.Disk']);
- } elsif ($opt =~ m/^net\d+$/) {
+ } elsif ($opt =~ m/^(?:net|ipconfig)\d+$/) {
$rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.Network']);
} else {
# catches usb\d+, hostpci\d+, args, lock, etc.
my $storecfg = PVE::Storage::config();
+ if (defined(my $ssh_keys = $param->{sshkeys})) {
+ $ssh_keys = URI::Escape::uri_unescape($ssh_keys);
+ PVE::Tools::validate_ssh_public_keys($ssh_keys);
+ }
+
PVE::Cluster::check_cfs_quorum();
if (defined($pool)) {
die "pipe requires cli environment\n"
if $rpcenv->{type} ne 'cli';
} else {
- $rpcenv->check_volume_access($authuser, $storecfg, $vmid, $archive);
+ PVE::Storage::check_volume_access($rpcenv, $authuser, $storecfg, $vmid, $archive);
$archive = PVE::Storage::abs_filesystem_path($storecfg, $archive);
}
}
PVE::AccessControl::add_vm_to_pool($vmid, $pool) if $pool;
};
+ # ensure no old replication state are exists
+ PVE::ReplicationState::delete_guest_states($vmid);
+
return $rpcenv->fork_worker('qmrestore', $vmid, $authuser, $realcmd);
};
# test after locking
PVE::Cluster::check_vmid_unused($vmid);
+ # ensure no old replication state are exists
+ PVE::ReplicationState::delete_guest_states($vmid);
+
my $realcmd = sub {
my $vollist = [];
$vollist = &$create_disks($rpcenv, $authuser, $conf, $storecfg, $vmid, $pool, $param, $storage);
- # try to be smart about bootdisk
- my @disks = PVE::QemuServer::valid_drive_names();
- my $firstdisk;
- foreach my $ds (reverse @disks) {
- next if !$conf->{$ds};
- my $disk = PVE::QemuServer::parse_drive($ds, $conf->{$ds});
- next if PVE::QemuServer::drive_is_cdrom($disk);
- $firstdisk = $ds;
- }
-
- if (!$conf->{bootdisk} && $firstdisk) {
- $conf->{bootdisk} = $firstdisk;
+ if (!$conf->{bootdisk}) {
+ my $firstdisk = PVE::QemuServer::resolve_first_disk($conf);
+ $conf->{bootdisk} = $firstdisk if $firstdisk;
}
# auto generate uuid if user did not specify smbios1 option
if (!$conf->{smbios1}) {
- my ($uuid, $uuid_str);
- UUID::generate($uuid);
- UUID::unparse($uuid, $uuid_str);
- $conf->{smbios1} = "uuid=$uuid_str";
+ $conf->{smbios1} = PVE::QemuServer::generate_smbios1_uuid();
}
PVE::QemuConfig->write_config($vmid, $conf);
{ subdir => 'status' },
{ subdir => 'unlink' },
{ subdir => 'vncproxy' },
+ { subdir => 'termproxy' },
{ subdir => 'migrate' },
{ subdir => 'resize' },
{ subdir => 'move' },
{ subdir => 'rrd' },
{ subdir => 'rrddata' },
{ subdir => 'monitor' },
+ { subdir => 'agent' },
{ subdir => 'snapshot' },
{ subdir => 'spiceproxy' },
{ subdir => 'sendkey' },
path => '{vmid}/firewall',
});
+__PACKAGE__->register_method ({
+ subclass => "PVE::API2::Qemu::Agent",
+ path => '{vmid}/agent',
+});
+
__PACKAGE__->register_method({
name => 'rrd',
path => '{vmid}/rrd',
delete $conf->{pending};
+ # hide cloudinit password
+ if ($conf->{cipassword}) {
+ $conf->{cipassword} = '**********';
+ }
+
return $conf;
}});
$item->{value} = $conf->{$opt} if defined($conf->{$opt});
$item->{pending} = $conf->{pending}->{$opt} if defined($conf->{pending}->{$opt});
$item->{delete} = ($pending_delete_hash->{$opt} ? 2 : 1) if exists $pending_delete_hash->{$opt};
+
+ # hide cloudinit password
+ if ($opt eq 'cipassword') {
+ $item->{value} = '**********' if defined($item->{value});
+ # the trailing space so that the pending string is different
+ $item->{pending} = '********** ' if defined($item->{pending});
+ }
push @$res, $item;
}
next if defined($conf->{$opt});
my $item = { key => $opt };
$item->{pending} = $conf->{pending}->{$opt};
+
+ # hide cloudinit password
+ if ($opt eq 'cipassword') {
+ $item->{pending} = '**********' if defined($item->{pending});
+ }
push @$res, $item;
}
my $background_delay = extract_param($param, 'background_delay');
+ if (defined(my $cipassword = $param->{cipassword})) {
+ # Same logic as in cloud-init (but with the regex fixed...)
+ $param->{cipassword} = PVE::Tools::encrypt_pw($cipassword)
+ if $cipassword !~ /^\$(?:[156]|2[ay])(\$.+){2}/;
+ }
+
my @paramarr = (); # used for log message
- foreach my $key (keys %$param) {
- push @paramarr, "-$key", $param->{$key};
+ foreach my $key (sort keys %$param) {
+ my $value = $key eq 'cipassword' ? '<hidden>' : $param->{$key};
+ push @paramarr, "-$key", $value;
}
my $skiplock = extract_param($param, 'skiplock');
my $force = extract_param($param, 'force');
+ if (defined(my $ssh_keys = $param->{sshkeys})) {
+ $ssh_keys = URI::Escape::uri_unescape($ssh_keys);
+ PVE::Tools::validate_ssh_public_keys($ssh_keys);
+ }
+
die "no options specified\n" if !$delete_str && !$revert_str && !scalar(keys %$param);
my $storecfg = PVE::Storage::config();
push @delete, $opt;
}
+ my $repl_conf = PVE::ReplicationConfig->new();
+ my $is_replicated = $repl_conf->check_for_existing_jobs($vmid, 1);
+ my $check_replication = sub {
+ my ($drive) = @_;
+ return if !$is_replicated;
+ my $volid = $drive->{file};
+ return if !$volid || !($drive->{replicate}//1);
+ return if PVE::QemuServer::drive_is_cdrom($drive);
+ my ($storeid, $format);
+ if ($volid =~ $NEW_DISK_RE) {
+ $storeid = $2;
+ $format = $drive->{format} || PVE::Storage::storage_default_format($storecfg, $storeid);
+ } else {
+ ($storeid, undef) = PVE::Storage::parse_volume_id($volid, 1);
+ $format = (PVE::Storage::parse_volname($storecfg, $volid))[6];
+ }
+ return if PVE::Storage::storage_can_replicate($storecfg, $storeid, $format);
+ my $scfg = PVE::Storage::storage_config($storecfg, $storeid);
+ return if $scfg->{shared};
+ die "cannot add non-replicatable volume to a replicated VM\n";
+ };
+
foreach my $opt (keys %$param) {
if (PVE::QemuServer::is_valid_drivename($opt)) {
# cleanup drive path
my $drive = PVE::QemuServer::parse_drive($opt, $param->{$opt});
raise_param_exc({ $opt => "unable to parse drive options" }) if !$drive;
PVE::QemuServer::cleanup_drive_path($opt, $storecfg, $drive);
+ $check_replication->($drive);
$param->{$opt} = PVE::QemuServer::print_drive($vmid, $drive);
} elsif ($opt =~ m/^net(\d+)$/) {
# add macaddr
foreach my $opt (@delete) {
$modified->{$opt} = 1;
$conf = PVE::QemuConfig->load_config($vmid); # update/reload
+ if (!defined($conf->{$opt}) && !defined($conf->{pending}->{$opt})) {
+ warn "cannot delete '$opt' - not set in current configuration!\n";
+ $modified->{$opt} = 0;
+ next;
+ }
+
if ($opt =~ m/^unused/) {
my $drive = PVE::QemuServer::parse_drive($opt, $conf->{$opt});
PVE::QemuConfig->check_protection($conf, "can't remove unused disk '$drive->{file}'");
if (PVE::QemuServer::is_valid_drivename($opt)) {
my $drive = PVE::QemuServer::parse_drive($opt, $param->{$opt});
+ # FIXME: cloudinit: CDROM or Disk?
if (PVE::QemuServer::drive_is_cdrom($drive)) { # CDROM
$rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.CDROM']);
} else {
die "unable to remove VM $vmid - used in HA resources\n"
if PVE::HA::Config::vm_is_ha_managed($vmid);
+ # do not allow destroy if there are replication jobs
+ my $repl_conf = PVE::ReplicationConfig->new();
+ $repl_conf->check_for_existing_jobs($vmid);
+
# early tests (repeat after locking)
die "VM $vmid is running - destroy failed\n"
if PVE::QemuServer::check_running($vmid);
if ($node ne 'localhost' && $node ne PVE::INotify::nodename()) {
($remip, $family) = PVE::Cluster::remote_node_ip($node);
# NOTE: kvm VNC traffic is already TLS encrypted or is known unsecure
- $remcmd = ['/usr/bin/ssh', '-T', '-o', 'BatchMode=yes', $remip];
+ $remcmd = ['/usr/bin/ssh', '-e', 'none', '-T', '-o', 'BatchMode=yes', $remip];
} else {
$family = PVE::Tools::get_host_address_family($node);
}
if ($conf->{vga} && ($conf->{vga} =~ m/^serial\d+$/)) {
- die "Websocket mode is not supported in vga serial mode!" if $websocket;
- my $termcmd = [ '/usr/sbin/qm', 'terminal', $vmid, '-iface', $conf->{vga} ];
- #my $termcmd = "/usr/bin/qm terminal -iface $conf->{vga}";
+ my $termcmd = [ '/usr/sbin/qm', 'terminal', $vmid, '-iface', $conf->{vga}, '-escape', '0' ];
+
$cmd = ['/usr/bin/vncterm', '-rfbport', $port,
'-timeout', $timeout, '-authpath', $authpath,
- '-perm', 'Sys.Console', '-c', @$remcmd, @$termcmd];
+ '-perm', 'Sys.Console'];
+
+ if ($param->{websocket}) {
+ $ENV{PVE_VNC_TICKET} = $ticket; # pass ticket to vncterm
+ push @$cmd, '-notls', '-listen', 'localhost';
+ }
+
+ push @$cmd, '-c', @$remcmd, @$termcmd;
+
+ PVE::Tools::run_command($cmd);
+
} else {
$ENV{LC_PVE_TICKET} = $ticket if $websocket; # set ticket with "qm vncproxy"
- my $qmcmd = [@$remcmd, "/usr/sbin/qm", 'vncproxy', $vmid];
+ $cmd = [@$remcmd, "/usr/sbin/qm", 'vncproxy', $vmid];
+
+ my $sock = IO::Socket::IP->new(
+ ReuseAddr => 1,
+ Listen => 1,
+ LocalPort => $port,
+ Proto => 'tcp',
+ GetAddrInfoFlags => 0,
+ ) or die "failed to create socket: $!\n";
+ # Inside the worker we shouldn't have any previous alarms
+ # running anyway...:
+ alarm(0);
+ local $SIG{ALRM} = sub { die "connection timed out\n" };
+ alarm $timeout;
+ accept(my $cli, $sock) or die "connection failed: $!\n";
+ alarm(0);
+ close($sock);
+ if (PVE::Tools::run_command($cmd,
+ output => '>&'.fileno($cli),
+ input => '<&'.fileno($cli),
+ noerr => 1) != 0)
+ {
+ die "Failed to run vncproxy.\n";
+ }
+ }
+
+ return;
+ };
+
+ my $upid = $rpcenv->fork_worker('vncproxy', $vmid, $authuser, $realcmd, 1);
+
+ PVE::Tools::wait_for_vnc_port($port);
+
+ return {
+ user => $authuser,
+ ticket => $ticket,
+ port => $port,
+ upid => $upid,
+ cert => $sslcert,
+ };
+ }});
+
+__PACKAGE__->register_method({
+ name => 'termproxy',
+ path => '{vmid}/termproxy',
+ method => 'POST',
+ protected => 1,
+ permissions => {
+ check => ['perm', '/vms/{vmid}', [ 'VM.Console' ]],
+ },
+ description => "Creates a TCP proxy connections.",
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid'),
+ serial=> {
+ optional => 1,
+ type => 'string',
+ enum => [qw(serial0 serial1 serial2 serial3)],
+ description => "opens a serial terminal (defaults to display)",
+ },
+ },
+ },
+ returns => {
+ additionalProperties => 0,
+ properties => {
+ user => { type => 'string' },
+ ticket => { type => 'string' },
+ port => { type => 'integer' },
+ upid => { type => 'string' },
+ },
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+
+ my $authuser = $rpcenv->get_user();
+
+ my $vmid = $param->{vmid};
+ my $node = $param->{node};
+ my $serial = $param->{serial};
- my $qmstr = join(' ', @$qmcmd);
+ my $conf = PVE::QemuConfig->load_config($vmid, $node); # check if VM exists
- # also redirect stderr (else we get RFB protocol errors)
- $cmd = ['/bin/nc6', '-l', '-p', $port, '-w', $timeout, '-e', "$qmstr 2>/dev/null"];
+ if (!defined($serial)) {
+ if ($conf->{vga} && $conf->{vga} =~ m/^serial\d+$/) {
+ $serial = $conf->{vga};
}
+ }
- PVE::Tools::run_command($cmd);
+ my $authpath = "/vms/$vmid";
- return;
+ my $ticket = PVE::AccessControl::assemble_vnc_ticket($authuser, $authpath);
+
+ my ($remip, $family);
+
+ if ($node ne 'localhost' && $node ne PVE::INotify::nodename()) {
+ ($remip, $family) = PVE::Cluster::remote_node_ip($node);
+ } else {
+ $family = PVE::Tools::get_host_address_family($node);
+ }
+
+ my $port = PVE::Tools::next_vnc_port($family);
+
+ my $remcmd = $remip ?
+ ['/usr/bin/ssh', '-e', 'none', '-t', $remip, '--'] : [];
+
+ my $termcmd = [ '/usr/sbin/qm', 'terminal', $vmid, '-escape', '0'];
+ push @$termcmd, '-iface', $serial if $serial;
+
+ my $realcmd = sub {
+ my $upid = shift;
+
+ syslog('info', "starting qemu termproxy $upid\n");
+
+ my $cmd = ['/usr/bin/termproxy', $port, '--path', $authpath,
+ '--perm', 'VM.Console', '--'];
+ push @$cmd, @$remcmd, @$termcmd;
+
+ PVE::Tools::run_command($cmd);
};
- my $upid = $rpcenv->fork_worker('vncproxy', $vmid, $authuser, $realcmd);
+ my $upid = $rpcenv->fork_worker('vncproxy', $vmid, $authuser, $realcmd, 1);
PVE::Tools::wait_for_vnc_port($port);
ticket => $ticket,
port => $port,
upid => $upid,
- cert => $sslcert,
};
}});
$status->{spice} = 1 if PVE::QemuServer::vga_conf_has_spice($conf->{vga});
+ $status->{agent} = 1 if $conf->{agent};
+
return $status;
}});
optional => 1,
},
migration_network => {
- type => 'string',
- format => 'CIDR',
+ type => 'string', format => 'CIDR',
description => "CIDR of the (sub) network that is used for migration.",
optional => 1,
},
machine => get_standard_option('pve-qm-machine'),
+ targetstorage => {
+ description => "Target storage for the migration. (Can be '1' to use the same storage id as on the source node.)",
+ type => 'string',
+ optional => 1
+ }
},
},
returns => {
raise_param_exc({ migration_network => "Only root may use this option." })
if $migration_network && $authuser ne 'root@pam';
+ my $targetstorage = extract_param($param, 'targetstorage');
+ raise_param_exc({ targetstorage => "Only root may use this option." })
+ if $targetstorage && $authuser ne 'root@pam';
+
+ raise_param_exc({ targetstorage => "targetstorage can only by used with migratedfrom." })
+ if $targetstorage && !$migratedfrom;
+
# read spice ticket from STDIN
my $spice_ticket;
if ($stateuri && ($stateuri eq 'tcp') && $migratedfrom && ($rpcenv->{type} eq 'cli')) {
- if (defined(my $line = <>)) {
+ if (defined(my $line = <STDIN>)) {
chomp $line;
$spice_ticket = $line;
}
my $cmd = ['ha-manager', 'set', $service, '--state', 'started'];
- print "Executing HA start for VM $vmid\n";
+ print "Requesting HA start for VM $vmid\n";
PVE::Tools::run_command($cmd);
syslog('info', "start VM $vmid: $upid\n");
PVE::QemuServer::vm_start($storecfg, $vmid, $stateuri, $skiplock, $migratedfrom, undef,
- $machine, $spice_ticket, $migration_network, $migration_type);
+ $machine, $spice_ticket, $migration_network, $migration_type, $targetstorage);
return;
};
my $cmd = ['ha-manager', 'set', $service, '--state', 'stopped'];
- print "Executing HA stop for VM $vmid\n";
+ print "Requesting HA stop for VM $vmid\n";
PVE::Tools::run_command($cmd);
}
}
- my $realcmd = sub {
- my $upid = shift;
+ if (PVE::HA::Config::vm_is_ha_managed($vmid) &&
+ ($rpcenv->{type} ne 'ha')) {
- syslog('info', "shutdown VM $vmid: $upid\n");
+ my $hacmd = sub {
+ my $upid = shift;
- PVE::QemuServer::vm_stop($storecfg, $vmid, $skiplock, 0, $param->{timeout},
- $shutdown, $param->{forceStop}, $keepActive);
+ my $service = "vm:$vmid";
- return;
- };
+ my $cmd = ['ha-manager', 'set', $service, '--state', 'stopped'];
+
+ print "Requesting HA stop for VM $vmid\n";
+
+ PVE::Tools::run_command($cmd);
+
+ return;
+ };
- return $rpcenv->fork_worker('qmshutdown', $vmid, $authuser, $realcmd);
+ return $rpcenv->fork_worker('hastop', $vmid, $authuser, $hacmd);
+
+ } else {
+
+ my $realcmd = sub {
+ my $upid = shift;
+
+ syslog('info', "shutdown VM $vmid: $upid\n");
+
+ PVE::QemuServer::vm_stop($storecfg, $vmid, $skiplock, 0, $param->{timeout},
+ $shutdown, $param->{forceStop}, $keepActive);
+
+ return;
+ };
+
+ return $rpcenv->fork_worker('qmshutdown', $vmid, $authuser, $realcmd);
+ }
}});
__PACKAGE__->register_method({
} elsif (PVE::QemuServer::is_valid_drivename($opt)) {
my $drive = PVE::QemuServer::parse_drive($opt, $value);
die "unable to parse drive options for '$opt'\n" if !$drive;
- if (PVE::QemuServer::drive_is_cdrom($drive)) {
+ if (PVE::QemuServer::drive_is_cdrom($drive, 1)) {
$newconf->{$opt} = $value; # simply copy configuration
} else {
- if ($param->{full}) {
- die "Full clone feature is not available"
+ if ($param->{full} || PVE::QemuServer::drive_is_cloudinit($drive)) {
+ die "Full clone feature is not supported for drive '$opt'\n"
if !PVE::Storage::volume_has_feature($storecfg, 'copy', $drive->{file}, $snapname, $running);
$fullclone->{$opt} = 1;
} else {
# not full means clone instead of copy
- die "Linked clone feature is not available"
+ die "Linked clone feature is not supported for drive '$opt'\n"
if !PVE::Storage::volume_has_feature($storecfg, 'clone', $drive->{file}, $snapname, $running);
}
$drives->{$opt} = $drive;
my $upid = shift;
my $newvollist = [];
+ my $jobs = {};
eval {
- local $SIG{INT} = $SIG{TERM} = $SIG{QUIT} = $SIG{HUP} = sub { die "interrupted by signal\n"; };
+ local $SIG{INT} =
+ local $SIG{TERM} =
+ local $SIG{QUIT} =
+ local $SIG{HUP} = sub { die "interrupted by signal\n"; };
PVE::Storage::activate_volumes($storecfg, $vollist, $snapname);
+ my $total_jobs = scalar(keys %{$drives});
+ my $i = 1;
+
foreach my $opt (keys %$drives) {
my $drive = $drives->{$opt};
+ my $skipcomplete = ($total_jobs != $i); # finish after last drive
my $newdrive = PVE::QemuServer::clone_disk($storecfg, $vmid, $running, $opt, $drive, $snapname,
- $newid, $storage, $format, $fullclone->{$opt}, $newvollist);
+ $newid, $storage, $format, $fullclone->{$opt}, $newvollist,
+ $jobs, $skipcomplete, $oldconf->{agent});
$newconf->{$opt} = PVE::QemuServer::print_drive($vmid, $newdrive);
PVE::QemuConfig->write_config($newid, $newconf);
+ $i++;
}
delete $newconf->{lock};
if (my $err = $@) {
unlink $conffile;
+ eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $jobs) };
+
sleep 1; # some storage like rbd need to wait before release volume - really?
foreach my $volid (@$newvollist) {
proxyto => 'node',
description => "Move volume to different storage.",
permissions => {
- description => "You need 'VM.Config.Disk' permissions on /vms/{vmid}, " .
- "and 'Datastore.AllocateSpace' permissions on the storage.",
- check =>
- [ 'and',
- ['perm', '/vms/{vmid}', [ 'VM.Config.Disk' ]],
- ['perm', '/storage/{storage}', [ 'Datastore.AllocateSpace' ]],
- ],
+ description => "You need 'VM.Config.Disk' permissions on /vms/{vmid}, and 'Datastore.AllocateSpace' permissions on the storage.",
+ check => [ 'and',
+ ['perm', '/vms/{vmid}', [ 'VM.Config.Disk' ]],
+ ['perm', '/storage/{storage}', [ 'Datastore.AllocateSpace' ]],
+ ],
},
parameters => {
additionalProperties => 0,
- properties => {
+ properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
disk => {
my $old_volid = $drive->{file} || die "disk '$disk' has no associated volume\n";
- die "you can't move a cdrom\n" if PVE::QemuServer::drive_is_cdrom($drive);
+ die "you can't move a cdrom\n" if PVE::QemuServer::drive_is_cdrom($drive, 1);
my $oldfmt;
my ($oldstoreid, $oldvolname) = PVE::Storage::parse_volume_id($old_volid);
my $newvollist = [];
eval {
- local $SIG{INT} = $SIG{TERM} = $SIG{QUIT} = $SIG{HUP} = sub { die "interrupted by signal\n"; };
+ local $SIG{INT} =
+ local $SIG{TERM} =
+ local $SIG{QUIT} =
+ local $SIG{HUP} = sub { die "interrupted by signal\n"; };
warn "moving disk with snapshots, snapshots will not be moved!\n"
if $snapshotted;
PVE::QemuConfig->add_unused_volume($conf, $old_volid) if !$param->{delete};
+ # convert moved disk to base if part of template
+ PVE::QemuServer::template_create($vmid, $conf, $disk)
+ if PVE::QemuConfig->is_template($conf);
+
PVE::QemuConfig->write_config($vmid, $conf);
eval {
migration_type => {
type => 'string',
enum => ['secure', 'insecure'],
- description => "Migration traffic is encrypted using an SSH " .
- "tunnel by default. On secure, completely private networks " .
- "this can be disabled to increase performance.",
+ description => "Migration traffic is encrypted using an SSH tunnel by default. On secure, completely private networks this can be disabled to increase performance.",
optional => 1,
},
migration_network => {
- type => 'string',
- format => 'CIDR',
+ type => 'string', format => 'CIDR',
description => "CIDR of the (sub) network that is used for migration.",
optional => 1,
},
+ "with-local-disks" => {
+ type => 'boolean',
+ description => "Enable live storage migration for local disk",
+ optional => 1,
+ },
+ targetstorage => get_standard_option('pve-storage-id', {
+ description => "Default target storage.",
+ optional => 1,
+ completion => \&PVE::QemuServer::complete_storage,
+ }),
},
},
returns => {
my $vmid = extract_param($param, 'vmid');
+ raise_param_exc({ targetstorage => "Live storage migration can only be done online." })
+ if !$param->{online} && $param->{targetstorage};
+
raise_param_exc({ force => "Only root may use this option." })
if $param->{force} && $authuser ne 'root@pam';
}
my $storecfg = PVE::Storage::config();
- PVE::QemuServer::check_storage_availability($storecfg, $conf, $target);
+
+ if( $param->{targetstorage}) {
+ PVE::Storage::storage_check_node($storecfg, $param->{targetstorage}, $target);
+ } else {
+ PVE::QemuServer::check_storage_availability($storecfg, $conf, $target);
+ }
if (PVE::HA::Config::vm_is_ha_managed($vmid) && $rpcenv->{type} ne 'ha') {
my $cmd = ['ha-manager', 'migrate', $service, $target];
- print "Executing HA migrate for VM $vmid to node $target\n";
+ print "Requesting HA migration for VM $vmid to node $target\n";
PVE::Tools::run_command($cmd);
} else {
my $realcmd = sub {
- my $upid = shift;
-
PVE::QemuMigrate->migrate($target, $targetip, $vmid, $param);
};
- return $rpcenv->fork_worker('qmigrate', $vmid, $authuser, $realcmd);
+ my $worker = sub {
+ return PVE::GuestHelpers::guest_migration_lock($vmid, 10, $realcmd);
+ };
+
+ return $rpcenv->fork_worker('qmigrate', $vmid, $authuser, $worker);
}
}});
proxyto => 'node',
description => "Execute Qemu monitor commands.",
permissions => {
- check => ['perm', '/vms/{vmid}', [ 'VM.Monitor' ]],
+ description => "Sys.Modify is required for (sub)commands which are not read-only ('info *' and 'help')",
+ check => ['perm', '/vms/{vmid}', [ 'VM.Monitor' ]],
},
parameters => {
additionalProperties => 0,
code => sub {
my ($param) = @_;
+ my $rpcenv = PVE::RPCEnvironment::get();
+ my $authuser = $rpcenv->get_user();
+
+ my $is_ro = sub {
+ my $command = shift;
+ return $command =~ m/^\s*info(\s+|$)/
+ || $command =~ m/^\s*help\s*$/;
+ };
+
+ $rpcenv->check_full($authuser, "/", ['Sys.Modify'])
+ if !&$is_ro($param->{command});
+
my $vmid = $param->{vmid};
my $conf = PVE::QemuConfig->load_config ($vmid); # check if VM exists
size => {
type => 'string',
pattern => '\+?\d+(\.\d+)?[KMGT]?',
- description => "The new size. With the '+' sign the value is added to the actual size of the volume and without it, the value is taken as an absolute one. Shrinking disk size is not supported.",
+ description => "The new size. With the `+` sign the value is added to the actual size of the volume and without it, the value is taken as an absolute one. Shrinking disk size is not supported.",
},
digest => {
type => 'string',
$newsize += $size if $ext;
$newsize = int($newsize);
- die "unable to skrink disk size\n" if $newsize < $size;
+ die "shrinking disks is not supported\n" if $newsize < $size;
return if $size == $newsize;
proxyto => 'node',
description => "Get snapshot configuration",
permissions => {
- check => ['perm', '/vms/{vmid}', [ 'VM.Snapshot' ]],
+ check => ['perm', '/vms/{vmid}', [ 'VM.Snapshot', 'VM.Snapshot.Rollback' ], any => 1],
},
parameters => {
additionalProperties => 0,
proxyto => 'node',
description => "Rollback VM state to specified snapshot.",
permissions => {
- check => ['perm', '/vms/{vmid}', [ 'VM.Snapshot' ]],
+ check => ['perm', '/vms/{vmid}', [ 'VM.Snapshot', 'VM.Snapshot.Rollback' ], any => 1],
},
parameters => {
additionalProperties => 0,
PVE::QemuConfig->snapshot_rollback($vmid, $snapname);
};
- return $rpcenv->fork_worker('qmrollback', $vmid, $authuser, $realcmd);
+ my $worker = sub {
+ # hold migration lock, this makes sure that nobody create replication snapshots
+ return PVE::GuestHelpers::guest_migration_lock($vmid, 10, $realcmd);
+ };
+
+ return $rpcenv->fork_worker('qmrollback', $vmid, $authuser, $worker);
}});
__PACKAGE__->register_method({