use PVE::GuestHelpers;
use PVE::QemuConfig;
use PVE::QemuServer;
-use PVE::QemuServer::Drive;
use PVE::QemuServer::CPUConfig;
+use PVE::QemuServer::Drive;
+use PVE::QemuServer::ImportDisk;
use PVE::QemuServer::Monitor qw(mon_cmd);
+use PVE::QemuServer::Machine;
use PVE::QemuMigrate;
use PVE::RPCEnvironment;
use PVE::AccessControl;
use PVE::VZDump::Plugin;
use PVE::DataCenterConfig;
use PVE::SSHInfo;
+use PVE::Replication;
BEGIN {
if (!$ENV{PVE_GENERATING_DOCS}) {
}
};
+# Used in import-enabled API endpoints. Parses drives using the extended '_with_alloc' schema.
+my $foreach_volume_with_alloc = sub {
+ my ($param, $func) = @_;
+
+ for my $opt (sort keys $param->%*) {
+ next if !PVE::QemuServer::is_valid_drivename($opt);
+
+ my $drive = PVE::QemuServer::Drive::parse_drive($opt, $param->{$opt}, 1);
+ next if !$drive;
+
+ $func->($opt, $drive);
+ }
+};
+
my $NEW_DISK_RE = qr!^(([^/:\s]+):)?(\d+(\.\d+)?)$!;
+
+my $check_drive_param = sub {
+ my ($param, $storecfg, $extra_checks) = @_;
+
+ for my $opt (sort keys $param->%*) {
+ next if !PVE::QemuServer::is_valid_drivename($opt);
+
+ my $drive = PVE::QemuServer::parse_drive($opt, $param->{$opt}, 1);
+ raise_param_exc({ $opt => "unable to parse drive options" }) if !$drive;
+
+ if ($drive->{'import-from'}) {
+ if ($drive->{file} !~ $NEW_DISK_RE || $3 != 0) {
+ raise_param_exc({
+ $opt => "'import-from' requires special syntax - ".
+ "use <storage ID>:0,import-from=<source>",
+ });
+ }
+
+ if ($opt eq 'efidisk0') {
+ for my $required (qw(efitype pre-enrolled-keys)) {
+ if (!defined($drive->{$required})) {
+ raise_param_exc({
+ $opt => "need to specify '$required' when using 'import-from'",
+ });
+ }
+ }
+ } elsif ($opt eq 'tpmstate0') {
+ raise_param_exc({ $opt => "need to specify 'version' when using 'import-from'" })
+ if !defined($drive->{version});
+ }
+ }
+
+ PVE::QemuServer::cleanup_drive_path($opt, $storecfg, $drive);
+
+ $extra_checks->($drive) if $extra_checks;
+
+ $param->{$opt} = PVE::QemuServer::print_drive($drive, 1);
+ }
+};
+
my $check_storage_access = sub {
my ($rpcenv, $authuser, $storecfg, $vmid, $settings, $default_storage) = @_;
- PVE::QemuConfig->foreach_volume($settings, sub {
+ $foreach_volume_with_alloc->($settings, sub {
my ($ds, $drive) = @_;
my $isCDROM = PVE::QemuServer::drive_is_cdrom($drive);
if !$scfg->{content}->{images};
} else {
PVE::Storage::check_volume_access($rpcenv, $authuser, $storecfg, $vmid, $volid);
+ if ($storeid) {
+ my ($vtype) = PVE::Storage::parse_volname($storecfg, $volid);
+ raise_param_exc({ $ds => "content type needs to be 'images' or 'iso'" })
+ if $vtype ne 'images' && $vtype ne 'iso';
+ }
+ }
+
+ if (my $src_image = $drive->{'import-from'}) {
+ my $src_vmid;
+ if (PVE::Storage::parse_volume_id($src_image, 1)) { # PVE-managed volume
+ (my $vtype, undef, $src_vmid) = PVE::Storage::parse_volname($storecfg, $src_image);
+ raise_param_exc({ $ds => "$src_image has wrong type '$vtype' - not an image" })
+ if $vtype ne 'images';
+ }
+
+ if ($src_vmid) { # might be actively used by VM and will be copied via clone_disk()
+ $rpcenv->check($authuser, "/vms/${src_vmid}", ['VM.Clone']);
+ } else {
+ PVE::Storage::check_volume_access($rpcenv, $authuser, $storecfg, $vmid, $src_image);
+ }
}
});
return $sharedvm;
};
+my $check_storage_access_migrate = sub {
+ my ($rpcenv, $authuser, $storecfg, $storage, $node) = @_;
+
+ PVE::Storage::storage_check_enabled($storecfg, $storage, $node);
+
+ $rpcenv->check($authuser, "/storage/$storage", ['Datastore.AllocateSpace']);
+
+ my $scfg = PVE::Storage::storage_config($storecfg, $storage);
+ die "storage '$storage' does not support vm images\n"
+ if !$scfg->{content}->{images};
+};
+
+my $import_from_volid = sub {
+ my ($storecfg, $src_volid, $dest_info, $vollist) = @_;
+
+ die "could not get size of $src_volid\n"
+ if !PVE::Storage::volume_size_info($storecfg, $src_volid, 10);
+
+ die "cannot import from cloudinit disk\n"
+ if PVE::QemuServer::Drive::drive_is_cloudinit({ file => $src_volid });
+
+ my $src_vmid = (PVE::Storage::parse_volname($storecfg, $src_volid))[2];
+
+ my $src_vm_state = sub {
+ my $exists = $src_vmid && PVE::Cluster::get_vmlist()->{ids}->{$src_vmid} ? 1 : 0;
+
+ my $runs = 0;
+ if ($exists) {
+ eval { PVE::QemuConfig::assert_config_exists_on_node($src_vmid); };
+ die "owner VM $src_vmid not on local node\n" if $@;
+ $runs = PVE::QemuServer::Helpers::vm_running_locally($src_vmid) || 0;
+ }
+
+ return ($exists, $runs);
+ };
+
+ my ($src_vm_exists, $running) = $src_vm_state->();
+
+ die "cannot import from '$src_volid' - full clone feature is not supported\n"
+ if !PVE::Storage::volume_has_feature($storecfg, 'copy', $src_volid, undef, $running);
+
+ my $clonefn = sub {
+ my ($src_vm_exists_now, $running_now) = $src_vm_state->();
+
+ die "owner VM $src_vmid changed state unexpectedly\n"
+ if $src_vm_exists_now != $src_vm_exists || $running_now != $running;
+
+ my $src_conf = $src_vm_exists_now ? PVE::QemuConfig->load_config($src_vmid) : {};
+
+ my $src_drive = { file => $src_volid };
+ my $src_drivename;
+ PVE::QemuConfig->foreach_volume($src_conf, sub {
+ my ($ds, $drive) = @_;
+
+ return if $src_drivename;
+
+ if ($drive->{file} eq $src_volid) {
+ $src_drive = $drive;
+ $src_drivename = $ds;
+ }
+ });
+
+ my $source_info = {
+ vmid => $src_vmid,
+ running => $running_now,
+ drivename => $src_drivename,
+ drive => $src_drive,
+ snapname => undef,
+ };
+
+ my ($src_storeid) = PVE::Storage::parse_volume_id($src_volid);
+
+ return PVE::QemuServer::clone_disk(
+ $storecfg,
+ $source_info,
+ $dest_info,
+ 1,
+ $vollist,
+ undef,
+ undef,
+ $src_conf->{agent},
+ PVE::Storage::get_bandwidth_limit('clone', [$src_storeid, $dest_info->{storage}]),
+ );
+ };
+
+ my $cloned;
+ if ($running) {
+ $cloned = PVE::QemuConfig->lock_config_full($src_vmid, 30, $clonefn);
+ } elsif ($src_vmid) {
+ $cloned = PVE::QemuConfig->lock_config_shared($src_vmid, 30, $clonefn);
+ } else {
+ $cloned = $clonefn->();
+ }
+
+ return $cloned->@{qw(file size)};
+};
+
# Note: $pool is only needed when creating a VM, because pool permissions
# are automatically inherited if VM already exists inside a pool.
my $create_disks = sub {
} elsif (defined($volname) && $volname eq 'cloudinit') {
$storeid = $storeid // $default_storage;
die "no storage ID specified (and no default storage)\n" if !$storeid;
+
+ if (
+ my $ci_key = PVE::QemuConfig->has_cloudinit($conf, $ds)
+ || PVE::QemuConfig->has_cloudinit($conf->{pending} || {}, $ds)
+ || PVE::QemuConfig->has_cloudinit($res, $ds)
+ ) {
+ die "$ds - cloud-init drive is already attached at '$ci_key'\n";
+ }
+
my $scfg = PVE::Storage::storage_config($storecfg, $storeid);
my $name = "vm-$vmid-cloudinit";
push @$vollist, $volid;
delete $disk->{format}; # no longer needed
$res->{$ds} = PVE::QemuServer::print_drive($disk);
+ print "$ds: successfully created disk '$res->{$ds}'\n";
} elsif ($volid =~ $NEW_DISK_RE) {
my ($storeid, $size) = ($2 || $default_storage, $3);
die "no storage ID specified (and no default storage)\n" if !$storeid;
- my $defformat = PVE::Storage::storage_default_format($storecfg, $storeid);
- my $fmt = $disk->{format} || $defformat;
- $size = PVE::Tools::convert_size($size, 'gb' => 'kb'); # vdisk_alloc uses kb
+ if (my $source = delete $disk->{'import-from'}) {
+ my $dst_volid;
- my $volid;
- if ($ds eq 'efidisk0') {
- ($volid, $size) = PVE::QemuServer::create_efidisk($storecfg, $storeid, $vmid, $fmt, $arch);
- } else {
- $volid = PVE::Storage::vdisk_alloc($storecfg, $storeid, $vmid, $fmt, undef, $size);
- }
- push @$vollist, $volid;
- $disk->{file} = $volid;
- $disk->{size} = PVE::Tools::convert_size($size, 'kb' => 'b');
- delete $disk->{format}; # no longer needed
- $res->{$ds} = PVE::QemuServer::print_drive($disk);
- } else {
+ if (PVE::Storage::parse_volume_id($source, 1)) { # PVE-managed volume
+ my $dest_info = {
+ vmid => $vmid,
+ drivename => $ds,
+ storage => $storeid,
+ format => $disk->{format},
+ };
- PVE::Storage::check_volume_access($rpcenv, $authuser, $storecfg, $vmid, $volid);
+ $dest_info->{efisize} = PVE::QemuServer::get_efivars_size($conf, $disk)
+ if $ds eq 'efidisk0';
- my $volid_is_new = 1;
+ ($dst_volid, $size) = eval {
+ $import_from_volid->($storecfg, $source, $dest_info, $vollist);
+ };
+ die "cannot import from '$source' - $@" if $@;
+ } else {
+ $source = PVE::Storage::abs_filesystem_path($storecfg, $source, 1);
+ $size = PVE::Storage::file_size_info($source);
+ die "could not get file size of $source\n" if !$size;
+
+ (undef, $dst_volid) = PVE::QemuServer::ImportDisk::do_import(
+ $source,
+ $vmid,
+ $storeid,
+ {
+ drive_name => $ds,
+ format => $disk->{format},
+ 'skip-config-update' => 1,
+ },
+ );
+ push @$vollist, $dst_volid;
+ }
- if ($conf->{$ds}) {
- my $olddrive = PVE::QemuServer::parse_drive($ds, $conf->{$ds});
- $volid_is_new = undef if $olddrive->{file} && $olddrive->{file} eq $volid;
+ $disk->{file} = $dst_volid;
+ $disk->{size} = $size;
+ delete $disk->{format}; # no longer needed
+ $res->{$ds} = PVE::QemuServer::print_drive($disk);
+ } else {
+ my $defformat = PVE::Storage::storage_default_format($storecfg, $storeid);
+ my $fmt = $disk->{format} || $defformat;
+
+ $size = PVE::Tools::convert_size($size, 'gb' => 'kb'); # vdisk_alloc uses kb
+
+ my $volid;
+ if ($ds eq 'efidisk0') {
+ my $smm = PVE::QemuServer::Machine::machine_type_is_q35($conf);
+ ($volid, $size) = PVE::QemuServer::create_efidisk(
+ $storecfg, $storeid, $vmid, $fmt, $arch, $disk, $smm);
+ } elsif ($ds eq 'tpmstate0') {
+ # swtpm can only use raw volumes, and uses a fixed size
+ $size = PVE::Tools::convert_size(PVE::QemuServer::Drive::TPMSTATE_DISK_SIZE, 'b' => 'kb');
+ $volid = PVE::Storage::vdisk_alloc($storecfg, $storeid, $vmid, "raw", undef, $size);
+ } else {
+ $volid = PVE::Storage::vdisk_alloc($storecfg, $storeid, $vmid, $fmt, undef, $size);
+ }
+ push @$vollist, $volid;
+ $disk->{file} = $volid;
+ $disk->{size} = PVE::Tools::convert_size($size, 'kb' => 'b');
+ delete $disk->{format}; # no longer needed
+ $res->{$ds} = PVE::QemuServer::print_drive($disk);
}
- if ($volid_is_new) {
-
- PVE::Storage::activate_volumes($storecfg, [ $volid ]) if $storeid;
-
- my $size = PVE::Storage::volume_size_info($storecfg, $volid);
+ print "$ds: successfully created disk '$res->{$ds}'\n";
+ } else {
+ PVE::Storage::check_volume_access($rpcenv, $authuser, $storecfg, $vmid, $volid);
+ if ($storeid) {
+ my ($vtype) = PVE::Storage::parse_volname($storecfg, $volid);
+ die "cannot use volume $volid - content type needs to be 'images' or 'iso'"
+ if $vtype ne 'images' && $vtype ne 'iso';
+
+ if (PVE::QemuServer::Drive::drive_is_cloudinit($disk)) {
+ if (
+ my $ci_key = PVE::QemuConfig->has_cloudinit($conf, $ds)
+ || PVE::QemuConfig->has_cloudinit($conf->{pending} || {}, $ds)
+ || PVE::QemuConfig->has_cloudinit($res, $ds)
+ ) {
+ die "$ds - cloud-init drive is already attached at '$ci_key'\n";
+ }
+ }
+ }
- die "volume $volid does not exist\n" if !$size;
+ PVE::Storage::activate_volumes($storecfg, [ $volid ]) if $storeid;
- $disk->{size} = $size;
- }
+ my $size = PVE::Storage::volume_size_info($storecfg, $volid);
+ die "volume $volid does not exist\n" if !$size;
+ $disk->{size} = $size;
$res->{$ds} = PVE::QemuServer::print_drive($disk);
}
};
- eval { PVE::QemuConfig->foreach_volume($settings, $code); };
+ eval { $foreach_volume_with_alloc->($settings, $code); };
# free allocated images on error
if (my $err = $@) {
die $err;
}
- # modify vm config if everything went well
- foreach my $ds (keys %$res) {
- $conf->{$ds} = $res->{$ds};
- }
-
- return $vollist;
+ return ($vollist, $res);
};
my $check_cpu_model_access = sub {
sshkeys => 1,
};
+my $check_vm_create_serial_perm = sub {
+ my ($rpcenv, $authuser, $vmid, $pool, $param) = @_;
+
+ return 1 if $authuser eq 'root@pam';
+
+ foreach my $opt (keys %{$param}) {
+ next if $opt !~ m/^serial\d+$/;
+
+ if ($param->{$opt} eq 'socket') {
+ $rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.HWType']);
+ } else {
+ die "only root can set '$opt' config for real devices\n";
+ }
+ }
+
+ return 1;
+};
+
+my $check_vm_create_usb_perm = sub {
+ my ($rpcenv, $authuser, $vmid, $pool, $param) = @_;
+
+ return 1 if $authuser eq 'root@pam';
+
+ foreach my $opt (keys %{$param}) {
+ next if $opt !~ m/^usb\d+$/;
+
+ if ($param->{$opt} =~ m/spice/) {
+ $rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.HWType']);
+ } else {
+ die "only root can set '$opt' config for real devices\n";
+ }
+ }
+
+ return 1;
+};
+
my $check_vm_modify_config_perm = sub {
my ($rpcenv, $authuser, $vmid, $pool, $key_list) = @_;
proxyto => 'node',
protected => 1, # qemu pid files are only readable by root
parameters => {
- additionalProperties => 0,
+ additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
full => {
protected => 1,
proxyto => 'node',
parameters => {
- additionalProperties => 0,
+ additionalProperties => 0,
properties => PVE::QemuServer::json_config_properties(
{
node => get_standard_option('pve-node'),
description => "Assign a unique random ethernet address.",
requires => 'archive',
},
+ 'live-restore' => {
+ optional => 1,
+ type => 'boolean',
+ description => "Start the VM immediately from the backup and restore in background. PBS only.",
+ requires => 'archive',
+ },
pool => {
optional => 1,
type => 'string', format => 'pve-poolid',
default => 0,
description => "Start VM after it was created successfully.",
},
- }),
+ },
+ 1, # with_disk_alloc
+ ),
},
returns => {
type => 'string',
my $start_after_create = extract_param($param, 'start');
my $storage = extract_param($param, 'storage');
my $unique = extract_param($param, 'unique');
+ my $live_restore = extract_param($param, 'live-restore');
if (defined(my $ssh_keys = $param->{sshkeys})) {
$ssh_keys = URI::Escape::uri_unescape($ssh_keys);
raise_perm_exc();
}
- if (!$archive) {
+ if ($archive) {
+ for my $opt (sort keys $param->%*) {
+ if (PVE::QemuServer::Drive::is_valid_drivename($opt)) {
+ raise_param_exc({ $opt => "option conflicts with option 'archive'" });
+ }
+ }
+
+ if ($archive eq '-') {
+ die "pipe requires cli environment\n" if $rpcenv->{type} ne 'cli';
+ $archive = { type => 'pipe' };
+ } else {
+ PVE::Storage::check_volume_access(
+ $rpcenv,
+ $authuser,
+ $storecfg,
+ $vmid,
+ $archive,
+ 'backup',
+ );
+
+ $archive = $parse_restore_archive->($storecfg, $archive);
+ }
+ }
+
+ if (scalar(keys $param->%*) > 0) {
&$resolve_cdrom_alias($param);
&$check_storage_access($rpcenv, $authuser, $storecfg, $vmid, $param, $storage);
&$check_vm_modify_config_perm($rpcenv, $authuser, $vmid, $pool, [ keys %$param]);
- &$check_cpu_model_access($rpcenv, $authuser, $param);
+ &$check_vm_create_serial_perm($rpcenv, $authuser, $vmid, $pool, $param);
+ &$check_vm_create_usb_perm($rpcenv, $authuser, $vmid, $pool, $param);
- foreach my $opt (keys %$param) {
- if (PVE::QemuServer::is_valid_drivename($opt)) {
- my $drive = PVE::QemuServer::parse_drive($opt, $param->{$opt});
- raise_param_exc({ $opt => "unable to parse drive options" }) if !$drive;
+ &$check_cpu_model_access($rpcenv, $authuser, $param);
- PVE::QemuServer::cleanup_drive_path($opt, $storecfg, $drive);
- $param->{$opt} = PVE::QemuServer::print_drive($drive);
- }
- }
+ $check_drive_param->($param, $storecfg);
PVE::QemuServer::add_random_macs($param);
- } else {
- my $keystr = join(' ', keys %$param);
- raise_param_exc({ archive => "option conflicts with other options ($keystr)"}) if $keystr;
-
- if ($archive eq '-') {
- die "pipe requires cli environment\n"
- if $rpcenv->{type} ne 'cli';
- $archive = { type => 'pipe' };
- } else {
- PVE::Storage::check_volume_access($rpcenv, $authuser, $storecfg, $vmid, $archive);
-
- $archive = $parse_restore_archive->($storecfg, $archive);
- }
}
my $emsg = $is_restore ? "unable to restore VM $vmid -" : "unable to create VM $vmid -";
eval { PVE::QemuConfig->create_and_lock_config($vmid, $force) };
die "$emsg $@" if $@;
+ my $restored_data = 0;
my $restorefn = sub {
my $conf = PVE::QemuConfig->load_config($vmid);
pool => $pool,
unique => $unique,
bwlimit => $bwlimit,
+ live => $live_restore,
+ override_conf => $param,
};
if ($archive->{type} eq 'file' || $archive->{type} eq 'pipe') {
+ die "live-restore is only compatible with backup images from a Proxmox Backup Server\n"
+ if $live_restore;
PVE::QemuServer::restore_file_archive($archive->{path} // '-', $vmid, $authuser, $restore_options);
} elsif ($archive->{type} eq 'pbs') {
PVE::QemuServer::restore_proxmox_backup_archive($archive->{volid}, $vmid, $authuser, $restore_options);
} else {
die "unknown backup archive type\n";
}
+ $restored_data = 1;
+
my $restored_conf = PVE::QemuConfig->load_config($vmid);
# Convert restored VM to template if backup was VM template
if (PVE::QemuConfig->is_template($restored_conf)) {
eval { PVE::QemuServer::template_create($vmid, $restored_conf) };
warn $@ if $@;
}
-
- PVE::AccessControl::add_vm_to_pool($vmid, $pool) if $pool;
};
# ensure no old replication state are exists
PVE::QemuConfig->lock_config_full($vmid, 1, $realcmd);
- if ($start_after_create) {
+ if ($start_after_create && !$live_restore) {
print "Execute autostart\n";
eval { PVE::API2::Qemu->vm_start({ vmid => $vmid, node => $node }) };
warn $@ if $@;
my $conf = $param;
my $arch = PVE::QemuServer::get_vm_arch($conf);
+ $conf->{meta} = PVE::QemuServer::new_meta_info_string();
+
my $vollist = [];
eval {
- $vollist = &$create_disks($rpcenv, $authuser, $conf, $arch, $storecfg, $vmid, $pool, $param, $storage);
+ ($vollist, my $created_opts) = $create_disks->(
+ $rpcenv,
+ $authuser,
+ $conf,
+ $arch,
+ $storecfg,
+ $vmid,
+ $pool,
+ $param,
+ $storage,
+ );
+ $conf->{$_} = $created_opts->{$_} for keys $created_opts->%*;
if (!$conf->{boot}) {
my $devs = PVE::QemuServer::get_default_bootdevices($conf);
$conf->{vmgenid} = PVE::QemuServer::generate_uuid();
}
+ my $machine = $conf->{machine};
+ if (!$machine || $machine =~ m/^(?:pc|q35|virt)$/) {
+ # always pin Windows' machine version on create, they get to easily confused
+ if (PVE::QemuServer::windows_version($conf->{ostype})) {
+ $conf->{machine} = PVE::QemuServer::windows_get_pinned_machine_version($machine);
+ }
+ }
+
PVE::QemuConfig->write_config($vmid, $conf);
};
if (my $err = $@) {
eval { PVE::QemuConfig->remove_lock($vmid, 'create') };
warn $@ if $@;
+ if ($restored_data) {
+ warn "error after data was restored, VM disks should be OK but config may "
+ ."require adaptions. VM $vmid state is NOT cleaned up.\n";
+ } else {
+ warn "error before or during data restore, some or all disks were not "
+ ."completely restored. VM $vmid state is NOT cleaned up.\n";
+ }
die $err;
}
};
user => 'all',
},
parameters => {
- additionalProperties => 0,
+ additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid'),
},
description => "Read VM RRD statistics (returns PNG)",
parameters => {
- additionalProperties => 0,
+ additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid'),
},
description => "Read VM RRD statistics",
parameters => {
- additionalProperties => 0,
+ additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid'),
check => ['perm', '/vms/{vmid}', [ 'VM.Audit' ]],
},
parameters => {
- additionalProperties => 0,
+ additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
die "cannot add non-replicatable volume to a replicated VM\n";
};
+ $check_drive_param->($param, $storecfg, $check_replication);
+
foreach my $opt (keys %$param) {
- if (PVE::QemuServer::is_valid_drivename($opt)) {
- # cleanup drive path
- my $drive = PVE::QemuServer::parse_drive($opt, $param->{$opt});
- raise_param_exc({ $opt => "unable to parse drive options" }) if !$drive;
- PVE::QemuServer::cleanup_drive_path($opt, $storecfg, $drive);
- $check_replication->($drive);
- $param->{$opt} = PVE::QemuServer::print_drive($drive);
- } elsif ($opt =~ m/^net(\d+)$/) {
+ if ($opt =~ m/^net(\d+)$/) {
# add macaddr
my $net = PVE::QemuServer::parse_net($param->{$opt});
$param->{$opt} = PVE::QemuServer::print_net($net);
my $modified = {}; # record what $option we modify
- my $bootcfg = PVE::JSONSchema::parse_property_string('pve-qm-boot', $conf->{boot})
- if $conf->{boot};
- my @bootorder = PVE::Tools::split_list($bootcfg->{order})
- if $bootcfg && $bootcfg->{order};
+ my @bootorder;
+ if (my $boot = $conf->{boot}) {
+ my $bootcfg = PVE::JSONSchema::parse_property_string('pve-qm-boot', $boot);
+ @bootorder = PVE::Tools::split_list($bootcfg->{order}) if $bootcfg && $bootcfg->{order};
+ }
my $bootorder_deleted = grep {$_ eq 'bootorder'} @delete;
+ my $check_drive_perms = sub {
+ my ($opt, $val) = @_;
+ my $drive = PVE::QemuServer::parse_drive($opt, $val, 1);
+ # FIXME: cloudinit: CDROM or Disk?
+ if (PVE::QemuServer::drive_is_cdrom($drive)) { # CDROM
+ $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.CDROM']);
+ } else {
+ $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.Disk']);
+ }
+ };
+
foreach my $opt (@delete) {
$modified->{$opt} = 1;
$conf = PVE::QemuConfig->load_config($vmid); # update/reload
}
} elsif (PVE::QemuServer::is_valid_drivename($opt)) {
PVE::QemuConfig->check_protection($conf, "can't remove drive '$opt'");
- $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.Disk']);
+ $check_drive_perms->($opt, $val);
PVE::QemuServer::vmconfig_register_unused_drive($storecfg, $vmid, $conf, PVE::QemuServer::parse_drive($opt, $val))
if $is_pending_val;
PVE::QemuConfig->add_to_pending_delete($conf, $opt, $force);
my $arch = PVE::QemuServer::get_vm_arch($conf);
if (PVE::QemuServer::is_valid_drivename($opt)) {
- my $drive = PVE::QemuServer::parse_drive($opt, $param->{$opt});
- # FIXME: cloudinit: CDROM or Disk?
- if (PVE::QemuServer::drive_is_cdrom($drive)) { # CDROM
- $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.CDROM']);
- } else {
- $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.Disk']);
+ # old drive
+ if ($conf->{$opt}) {
+ $check_drive_perms->($opt, $conf->{$opt});
}
+
+ # new drive
+ $check_drive_perms->($opt, $param->{$opt});
PVE::QemuServer::vmconfig_register_unused_drive($storecfg, $vmid, $conf, PVE::QemuServer::parse_drive($opt, $conf->{pending}->{$opt}))
if defined($conf->{pending}->{$opt});
- &$create_disks($rpcenv, $authuser, $conf->{pending}, $arch, $storecfg, $vmid, undef, {$opt => $param->{$opt}});
+ my (undef, $created_opts) = $create_disks->(
+ $rpcenv,
+ $authuser,
+ $conf,
+ $arch,
+ $storecfg,
+ $vmid,
+ undef,
+ {$opt => $param->{$opt}},
+ );
+ $conf->{pending}->{$_} = $created_opts->{$_} for keys $created_opts->%*;
+
+ # default legacy boot order implies all cdroms anyway
+ if (@bootorder) {
+ # append new CD drives to bootorder to mark them bootable
+ my $drive = PVE::QemuServer::parse_drive($opt, $param->{$opt}, 1);
+ if (PVE::QemuServer::drive_is_cdrom($drive, 1) && !grep(/^$opt$/, @bootorder)) {
+ push @bootorder, $opt;
+ $conf->{pending}->{boot} = PVE::QemuServer::print_bootorder(\@bootorder);
+ $modified->{boot} = 1;
+ }
+ }
} elsif ($opt =~ m/^serial\d+/) {
if ((!defined($conf->{$opt}) || $conf->{$opt} eq 'socket') && $param->{$opt} eq 'socket') {
$rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.HWType']);
if ($new_bootcfg->{order}) {
my @devs = PVE::Tools::split_list($new_bootcfg->{order});
for my $dev (@devs) {
- my $exists = $conf->{$dev} || $conf->{pending}->{$dev};
+ my $exists = $conf->{$dev} || $conf->{pending}->{$dev} || $param->{$dev};
my $deleted = grep {$_ eq $dev} @delete;
die "invalid bootorder: device '$dev' does not exist'\n"
if !$exists || $deleted;
if ($running) {
PVE::QemuServer::vmconfig_hotplug_pending($vmid, $conf, $storecfg, $modified, $errors);
} else {
- PVE::QemuServer::vmconfig_apply_pending($vmid, $conf, $storecfg, $running, $errors);
+ PVE::QemuServer::vmconfig_apply_pending($vmid, $conf, $storecfg, $errors);
}
raise_param_exc($errors) if scalar(keys %$errors);
if (!$running) {
my $status = PVE::Tools::upid_read_status($upid);
- return if $status eq 'OK';
- die $status;
+ return if !PVE::Tools::upid_status_is_error($status);
+ die "failed to update VM $vmid: $status\n";
}
}
check => ['perm', '/vms/{vmid}', $vm_config_perm_list, any => 1],
},
parameters => {
- additionalProperties => 0,
+ additionalProperties => 0,
properties => PVE::QemuServer::json_config_properties(
{
node => get_standard_option('pve-node'),
maximum => 30,
optional => 1,
},
- }),
+ },
+ 1, # with_disk_alloc
+ ),
},
returns => {
type => 'string',
check => ['perm', '/vms/{vmid}', $vm_config_perm_list, any => 1],
},
parameters => {
- additionalProperties => 0,
+ additionalProperties => 0,
properties => PVE::QemuServer::json_config_properties(
{
node => get_standard_option('pve-node'),
maxLength => 40,
optional => 1,
},
- }),
+ },
+ 1, # with_disk_alloc
+ ),
},
returns => { type => 'null' },
code => sub {
method => 'DELETE',
protected => 1,
proxyto => 'node',
- description => "Destroy the vm (also delete all used/owned volumes).",
+ description => "Destroy the VM and all used/owned volumes. Removes any VM specific permissions"
+ ." and firewall rules",
permissions => {
check => [ 'perm', '/vms/{vmid}', ['VM.Allocate']],
},
parameters => {
- additionalProperties => 0,
+ additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid_stopped }),
skiplock => get_standard_option('skiplock'),
purge => {
type => 'boolean',
- description => "Remove vmid from backup cron jobs.",
+ description => "Remove VMID from configurations, like backup & replication jobs and HA.",
optional => 1,
},
+ 'destroy-unreferenced-disks' => {
+ type => 'boolean',
+ description => "If set, destroy additionally all disks not referenced in the config"
+ ." but with a matching VMID from all enabled storages.",
+ optional => 1,
+ default => 0,
+ },
},
},
returns => {
# repeat, config might have changed
my $ha_managed = $early_checks->();
- PVE::QemuServer::destroy_vm($storecfg, $vmid, $skiplock, { lock => 'destroyed' });
+ my $purge_unreferenced = $param->{'destroy-unreferenced-disks'};
+
+ PVE::QemuServer::destroy_vm(
+ $storecfg,
+ $vmid,
+ $skiplock, { lock => 'destroyed' },
+ $purge_unreferenced,
+ );
PVE::AccessControl::remove_vm_access($vmid);
PVE::Firewall::remove_vmfw_conf($vmid);
check => [ 'perm', '/vms/{vmid}', ['VM.Config.Disk']],
},
parameters => {
- additionalProperties => 0,
+ additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
},
description => "Creates a TCP VNC proxy connections.",
parameters => {
- additionalProperties => 0,
+ additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid'),
},
},
returns => {
- additionalProperties => 0,
+ additionalProperties => 0,
properties => {
user => { type => 'string' },
ticket => { type => 'string' },
},
description => "Opens a weksocket for VNC traffic.",
parameters => {
- additionalProperties => 0,
+ additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid'),
},
description => "Returns a SPICE configuration to connect to the VM.",
parameters => {
- additionalProperties => 0,
+ additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid'),
user => 'all',
},
parameters => {
- additionalProperties => 0,
+ additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid'),
check => ['perm', '/vms/{vmid}', [ 'VM.Audit' ]],
},
parameters => {
- additionalProperties => 0,
+ additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid'),
$status->{ha} = PVE::HA::Config::get_service_status("vm:$param->{vmid}");
- $status->{spice} = 1 if PVE::QemuServer::vga_conf_has_spice($conf->{vga});
- $status->{agent} = 1 if (PVE::QemuServer::parse_guest_agent($conf)->{enabled});
+ if ($conf->{vga}) {
+ my $vga = PVE::QemuServer::parse_vga($conf->{vga});
+ my $spice = defined($vga->{type}) && $vga->{type} =~ /^virtio/;
+ $spice ||= PVE::QemuServer::vga_conf_has_spice($conf->{vga});
+ $status->{spice} = 1 if $spice;
+ }
+ $status->{agent} = 1 if PVE::QemuServer::get_qga_key($conf, 'enabled');
return $status;
}});
check => ['perm', '/vms/{vmid}', [ 'VM.PowerMgmt' ]],
},
parameters => {
- additionalProperties => 0,
+ additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid',
my $node = extract_param($param, 'node');
my $vmid = extract_param($param, 'vmid');
my $timeout = extract_param($param, 'timeout');
-
my $machine = extract_param($param, 'machine');
- my $force_cpu = extract_param($param, 'force-cpu');
my $get_root_param = sub {
my $value = extract_param($param, $_[0]);
my $migration_type = $get_root_param->('migration_type');
my $migration_network = $get_root_param->('migration_network');
my $targetstorage = $get_root_param->('targetstorage');
+ my $force_cpu = $get_root_param->('force-cpu');
my $storagemap;
my $spice_ticket;
my $nbd_protocol_version = 0;
my $replicated_volumes = {};
+ my $offline_volumes = {};
if ($stateuri && ($stateuri eq 'tcp' || $stateuri eq 'unix') && $migratedfrom && ($rpcenv->{type} eq 'cli')) {
while (defined(my $line = <STDIN>)) {
chomp $line;
$nbd_protocol_version = $1;
} elsif ($line =~ m/^replicated_volume: (.*)$/) {
$replicated_volumes->{$1} = 1;
- } else {
+ } elsif ($line =~ m/^tpmstate0: (.*)$/) { # Deprecated, use offline_volume instead
+ $offline_volumes->{tpmstate0} = $1;
+ } elsif ($line =~ m/^offline_volume: ([^:]+): (.*)$/) {
+ $offline_volumes->{$1} = $2;
+ } elsif (!$spice_ticket) {
# fallback for old source node
$spice_ticket = $line;
+ } else {
+ warn "unknown 'start' parameter on STDIN: '$line'\n";
}
}
}
storagemap => $storagemap,
nbd_proto_version => $nbd_protocol_version,
replicated_volumes => $replicated_volumes,
+ offline_volumes => $offline_volumes,
};
my $params = {
check => ['perm', '/vms/{vmid}', [ 'VM.PowerMgmt' ]],
},
parameters => {
- additionalProperties => 0,
+ additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid',
check => ['perm', '/vms/{vmid}', [ 'VM.PowerMgmt' ]],
},
parameters => {
- additionalProperties => 0,
+ additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid',
check => ['perm', '/vms/{vmid}', [ 'VM.PowerMgmt' ]],
},
parameters => {
- additionalProperties => 0,
+ additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid',
#
# checking the qmp status here to get feedback to the gui/cli/api
# and the status query should not take too long
- my $qmpstatus = eval {
- PVE::QemuConfig::assert_config_exists_on_node($vmid);
- mon_cmd($vmid, "query-status");
- };
- my $err = $@ if $@;
-
- if (!$err && $qmpstatus->{status} eq "paused") {
+ if (PVE::QemuServer::vm_is_paused($vmid)) {
if ($param->{forceStop}) {
warn "VM is paused - stop instead of shutdown\n";
$shutdown = 0;
my $node = extract_param($param, 'node');
my $vmid = extract_param($param, 'vmid');
- my $qmpstatus = eval {
- PVE::QemuConfig::assert_config_exists_on_node($vmid);
- mon_cmd($vmid, "query-status");
- };
- my $err = $@ if $@;
-
- if (!$err && $qmpstatus->{status} eq "paused") {
- die "VM is paused - cannot shutdown\n";
- }
+ die "VM is paused - cannot shutdown\n" if PVE::QemuServer::vm_is_paused($vmid);
die "VM $vmid not running\n" if !PVE::QemuServer::check_running($vmid);
check => ['perm', '/vms/{vmid}', [ 'VM.PowerMgmt' ]],
},
parameters => {
- additionalProperties => 0,
+ additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid',
# early check for storage permission, for better user feedback
if ($todisk) {
$rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.Disk']);
+ my $conf = PVE::QemuConfig->load_config($vmid);
+
+ # cannot save the state of a non-virtualized PCIe device, so resume cannot really work
+ for my $key (keys %$conf) {
+ next if $key !~ /^hostpci\d+/;
+ die "cannot suspend VM to disk due to passed-through PCI device(s), which lack the"
+ ." possibility to save/restore their internal state\n";
+ }
if (!$statestorage) {
# get statestorage from config if none is given
- my $conf = PVE::QemuConfig->load_config($vmid);
my $storecfg = PVE::Storage::config();
$statestorage = PVE::QemuServer::find_vmstate_storage($conf, $storecfg);
}
check => ['perm', '/vms/{vmid}', [ 'VM.PowerMgmt' ]],
},
parameters => {
- additionalProperties => 0,
+ additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid',
check => ['perm', '/vms/{vmid}', [ 'VM.Console' ]],
},
parameters => {
- additionalProperties => 0,
+ additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid',
check => ['perm', '/vms/{vmid}', [ 'VM.Audit' ]],
},
parameters => {
- additionalProperties => 0,
+ additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid'),
]
},
parameters => {
- additionalProperties => 0,
+ additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
my $vmid = extract_param($param, 'vmid');
my $newid = extract_param($param, 'newid');
my $pool = extract_param($param, 'pool');
- $rpcenv->check_pool_exist($pool) if defined($pool);
my $snapname = extract_param($param, 'snapname');
my $storage = extract_param($param, 'storage');
undef $target;
}
- PVE::Cluster::check_node_exists($target) if $target;
-
- my $storecfg = PVE::Storage::config();
+ my $running = PVE::QemuServer::check_running($vmid) || 0;
- if ($storage) {
- # check if storage is enabled on local node
- PVE::Storage::storage_check_enabled($storecfg, $storage);
- if ($target) {
- # check if storage is available on target node
- PVE::Storage::storage_check_node($storecfg, $storage, $target);
- # clone only works if target storage is shared
- my $scfg = PVE::Storage::storage_config($storecfg, $storage);
- die "can't clone to non-shared storage '$storage'\n" if !$scfg->{shared};
- }
- }
+ my $load_and_check = sub {
+ $rpcenv->check_pool_exist($pool) if defined($pool);
+ PVE::Cluster::check_node_exists($target) if $target;
- PVE::Cluster::check_cfs_quorum();
+ my $storecfg = PVE::Storage::config();
- my $running = PVE::QemuServer::check_running($vmid) || 0;
+ if ($storage) {
+ # check if storage is enabled on local node
+ PVE::Storage::storage_check_enabled($storecfg, $storage);
+ if ($target) {
+ # check if storage is available on target node
+ PVE::Storage::storage_check_enabled($storecfg, $storage, $target);
+ # clone only works if target storage is shared
+ my $scfg = PVE::Storage::storage_config($storecfg, $storage);
+ die "can't clone to non-shared storage '$storage'\n"
+ if !$scfg->{shared};
+ }
+ }
- my $clonefn = sub {
- # do all tests after lock but before forking worker - if possible
+ PVE::Cluster::check_cfs_quorum();
my $conf = PVE::QemuConfig->load_config($vmid);
PVE::QemuConfig->check_lock($conf);
die "snapshot '$snapname' does not exist\n"
if $snapname && !defined( $conf->{snapshots}->{$snapname});
- my $full = extract_param($param, 'full') // !PVE::QemuConfig->is_template($conf);
+ my $full = $param->{full} // !PVE::QemuConfig->is_template($conf);
die "parameter 'storage' not allowed for linked clones\n"
if defined($storage) && !$full;
# no need to copy unused images, because VMID(owner) changes anyways
next if $opt =~ m/^unused\d+$/;
+ die "cannot clone TPM state while VM is running\n"
+ if $full && $running && !$snapname && $opt eq 'tpmstate0';
+
# always change MAC! address
if ($opt =~ m/^net(\d+)$/) {
my $net = PVE::QemuServer::parse_net($value);
}
}
- # auto generate a new uuid
+ return ($conffile, $newconf, $oldconf, $vollist, $drives, $fullclone);
+ };
+
+ my $clonefn = sub {
+ my ($conffile, $newconf, $oldconf, $vollist, $drives, $fullclone) = $load_and_check->();
+ my $storecfg = PVE::Storage::config();
+
+ # auto generate a new uuid
my $smbios1 = PVE::QemuServer::parse_smbios1($newconf->{smbios1} || '');
$smbios1->{uuid} = PVE::QemuServer::generate_uuid();
$newconf->{smbios1} = PVE::QemuServer::print_smbios1($smbios1);
# FIXME use PVE::QemuConfig->create_and_lock_config and adapt code
PVE::Tools::file_set_contents($conffile, "# qmclone temporary file\nlock: clone\n");
- my $realcmd = sub {
- my $upid = shift;
+ PVE::Firewall::clone_vmfw_conf($vmid, $newid);
- my $newvollist = [];
- my $jobs = {};
+ my $newvollist = [];
+ my $jobs = {};
- eval {
- local $SIG{INT} =
- local $SIG{TERM} =
- local $SIG{QUIT} =
- local $SIG{HUP} = sub { die "interrupted by signal\n"; };
+ eval {
+ local $SIG{INT} =
+ local $SIG{TERM} =
+ local $SIG{QUIT} =
+ local $SIG{HUP} = sub { die "interrupted by signal\n"; };
- PVE::Storage::activate_volumes($storecfg, $vollist, $snapname);
+ PVE::Storage::activate_volumes($storecfg, $vollist, $snapname);
- my $bwlimit = extract_param($param, 'bwlimit');
+ my $bwlimit = extract_param($param, 'bwlimit');
- my $total_jobs = scalar(keys %{$drives});
- my $i = 1;
+ my $total_jobs = scalar(keys %{$drives});
+ my $i = 1;
- foreach my $opt (keys %$drives) {
- my $drive = $drives->{$opt};
- my $skipcomplete = ($total_jobs != $i); # finish after last drive
- my $completion = $skipcomplete ? 'skip' : 'complete';
+ foreach my $opt (sort keys %$drives) {
+ my $drive = $drives->{$opt};
+ my $skipcomplete = ($total_jobs != $i); # finish after last drive
+ my $completion = $skipcomplete ? 'skip' : 'complete';
- my $src_sid = PVE::Storage::parse_volume_id($drive->{file});
- my $storage_list = [ $src_sid ];
- push @$storage_list, $storage if defined($storage);
- my $clonelimit = PVE::Storage::get_bandwidth_limit('clone', $storage_list, $bwlimit);
+ my $src_sid = PVE::Storage::parse_volume_id($drive->{file});
+ my $storage_list = [ $src_sid ];
+ push @$storage_list, $storage if defined($storage);
+ my $clonelimit = PVE::Storage::get_bandwidth_limit('clone', $storage_list, $bwlimit);
- my $newdrive = PVE::QemuServer::clone_disk($storecfg, $vmid, $running, $opt, $drive, $snapname,
- $newid, $storage, $format, $fullclone->{$opt}, $newvollist,
- $jobs, $completion, $oldconf->{agent}, $clonelimit, $oldconf);
+ my $source_info = {
+ vmid => $vmid,
+ running => $running,
+ drivename => $opt,
+ drive => $drive,
+ snapname => $snapname,
+ };
- $newconf->{$opt} = PVE::QemuServer::print_drive($newdrive);
+ my $dest_info = {
+ vmid => $newid,
+ drivename => $opt,
+ storage => $storage,
+ format => $format,
+ };
- PVE::QemuConfig->write_config($newid, $newconf);
- $i++;
- }
+ $dest_info->{efisize} = PVE::QemuServer::get_efivars_size($oldconf)
+ if $opt eq 'efidisk0';
- delete $newconf->{lock};
+ my $newdrive = PVE::QemuServer::clone_disk(
+ $storecfg,
+ $source_info,
+ $dest_info,
+ $fullclone->{$opt},
+ $newvollist,
+ $jobs,
+ $completion,
+ $oldconf->{agent},
+ $clonelimit,
+ );
- # do not write pending changes
- if (my @changes = keys %{$newconf->{pending}}) {
- my $pending = join(',', @changes);
- warn "found pending changes for '$pending', discarding for clone\n";
- delete $newconf->{pending};
- }
+ $newconf->{$opt} = PVE::QemuServer::print_drive($newdrive);
PVE::QemuConfig->write_config($newid, $newconf);
+ $i++;
+ }
- if ($target) {
- # always deactivate volumes - avoid lvm LVs to be active on several nodes
- PVE::Storage::deactivate_volumes($storecfg, $vollist, $snapname) if !$running;
- PVE::Storage::deactivate_volumes($storecfg, $newvollist);
+ delete $newconf->{lock};
- my $newconffile = PVE::QemuConfig->config_file($newid, $target);
- die "Failed to move config to node '$target' - rename failed: $!\n"
- if !rename($conffile, $newconffile);
- }
+ # do not write pending changes
+ if (my @changes = keys %{$newconf->{pending}}) {
+ my $pending = join(',', @changes);
+ warn "found pending changes for '$pending', discarding for clone\n";
+ delete $newconf->{pending};
+ }
- PVE::AccessControl::add_vm_to_pool($newid, $pool) if $pool;
- };
- if (my $err = $@) {
- eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $jobs) };
- sleep 1; # some storage like rbd need to wait before release volume - really?
+ PVE::QemuConfig->write_config($newid, $newconf);
- foreach my $volid (@$newvollist) {
- eval { PVE::Storage::vdisk_free($storecfg, $volid); };
- warn $@ if $@;
- }
+ if ($target) {
+ # always deactivate volumes - avoid lvm LVs to be active on several nodes
+ PVE::Storage::deactivate_volumes($storecfg, $vollist, $snapname) if !$running;
+ PVE::Storage::deactivate_volumes($storecfg, $newvollist);
- PVE::Firewall::remove_vmfw_conf($newid);
+ my $newconffile = PVE::QemuConfig->config_file($newid, $target);
+ die "Failed to move config to node '$target' - rename failed: $!\n"
+ if !rename($conffile, $newconffile);
+ }
- unlink $conffile; # avoid races -> last thing before die
+ PVE::AccessControl::add_vm_to_pool($newid, $pool) if $pool;
+ };
+ if (my $err = $@) {
+ eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $jobs) };
+ sleep 1; # some storage like rbd need to wait before release volume - really?
- die "clone failed: $err";
+ foreach my $volid (@$newvollist) {
+ eval { PVE::Storage::vdisk_free($storecfg, $volid); };
+ warn $@ if $@;
}
- return;
- };
+ PVE::Firewall::remove_vmfw_conf($newid);
- PVE::Firewall::clone_vmfw_conf($vmid, $newid);
+ unlink $conffile; # avoid races -> last thing before die
+
+ die "clone failed: $err";
+ }
- return $rpcenv->fork_worker('qmclone', $vmid, $authuser, $realcmd);
+ return;
};
# Aquire exclusive lock lock for $newid
return PVE::QemuConfig->lock_config_full($newid, 1, $clonefn);
};
- # exclusive lock if VM is running - else shared lock is enough;
- if ($running) {
- return PVE::QemuConfig->lock_config_full($vmid, 1, $lock_target_vm);
- } else {
- return PVE::QemuConfig->lock_config_shared($vmid, 1, $lock_target_vm);
- }
+ my $lock_source_vm = sub {
+ # exclusive lock if VM is running - else shared lock is enough;
+ if ($running) {
+ return PVE::QemuConfig->lock_config_full($vmid, 1, $lock_target_vm);
+ } else {
+ return PVE::QemuConfig->lock_config_shared($vmid, 1, $lock_target_vm);
+ }
+ };
+
+ $load_and_check->(); # early checks before forking/locking
+
+ return $rpcenv->fork_worker('qmclone', $vmid, $authuser, $lock_source_vm);
}});
__PACKAGE__->register_method({
method => 'POST',
protected => 1,
proxyto => 'node',
- description => "Move volume to different storage.",
+ description => "Move volume to different storage or to a different VM.",
permissions => {
- description => "You need 'VM.Config.Disk' permissions on /vms/{vmid}, and 'Datastore.AllocateSpace' permissions on the storage.",
- check => [ 'and',
- ['perm', '/vms/{vmid}', [ 'VM.Config.Disk' ]],
- ['perm', '/storage/{storage}', [ 'Datastore.AllocateSpace' ]],
- ],
+ description => "You need 'VM.Config.Disk' permissions on /vms/{vmid}, " .
+ "and 'Datastore.AllocateSpace' permissions on the storage. To move ".
+ "a disk to another VM, you need the permissions on the target VM as well.",
+ check => ['perm', '/vms/{vmid}', [ 'VM.Config.Disk' ]],
},
parameters => {
- additionalProperties => 0,
+ additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
+ 'target-vmid' => get_standard_option('pve-vmid', {
+ completion => \&PVE::QemuServer::complete_vmid,
+ optional => 1,
+ }),
disk => {
type => 'string',
description => "The disk you want to move.",
- enum => [PVE::QemuServer::Drive::valid_drive_names()],
+ enum => [PVE::QemuServer::Drive::valid_drive_names_with_unused()],
},
storage => get_standard_option('pve-storage-id', {
description => "Target storage.",
completion => \&PVE::QemuServer::complete_storage,
+ optional => 1,
}),
- 'format' => {
- type => 'string',
- description => "Target Format.",
- enum => [ 'raw', 'qcow2', 'vmdk' ],
- optional => 1,
- },
+ 'format' => {
+ type => 'string',
+ description => "Target Format.",
+ enum => [ 'raw', 'qcow2', 'vmdk' ],
+ optional => 1,
+ },
delete => {
type => 'boolean',
- description => "Delete the original disk after successful copy. By default the original disk is kept as unused disk.",
+ description => "Delete the original disk after successful copy. By default the"
+ ." original disk is kept as unused disk.",
optional => 1,
default => 0,
},
digest => {
type => 'string',
- description => 'Prevent changes if current configuration file has different SHA1 digest. This can be used to prevent concurrent modifications.',
+ description => 'Prevent changes if current configuration file has different SHA1"
+ ." digest. This can be used to prevent concurrent modifications.',
maxLength => 40,
optional => 1,
},
minimum => '0',
default => 'move limit from datacenter or storage config',
},
+ 'target-disk' => {
+ type => 'string',
+ description => "The config key the disk will be moved to on the target VM"
+ ." (for example, ide0 or scsi1). Default is the source disk key.",
+ enum => [PVE::QemuServer::Drive::valid_drive_names_with_unused()],
+ optional => 1,
+ },
+ 'target-digest' => {
+ type => 'string',
+ description => 'Prevent changes if the current config file of the target VM has a"
+ ." different SHA1 digest. This can be used to detect concurrent modifications.',
+ maxLength => 40,
+ optional => 1,
+ },
},
},
returns => {
my $node = extract_param($param, 'node');
my $vmid = extract_param($param, 'vmid');
+ my $target_vmid = extract_param($param, 'target-vmid');
my $digest = extract_param($param, 'digest');
+ my $target_digest = extract_param($param, 'target-digest');
my $disk = extract_param($param, 'disk');
+ my $target_disk = extract_param($param, 'target-disk') // $disk;
my $storeid = extract_param($param, 'storage');
my $format = extract_param($param, 'format');
my $storecfg = PVE::Storage::config();
- my $updatefn = sub {
+ my $load_and_check_move = sub {
my $conf = PVE::QemuConfig->load_config($vmid);
PVE::QemuConfig->check_lock($conf);
- die "VM config checksum missmatch (file change by other user?)\n"
- if $digest && $digest ne $conf->{digest};
+ PVE::Tools::assert_if_modified($digest, $conf->{digest});
die "disk '$disk' does not exist\n" if !$conf->{$disk};
$oldfmt = $1;
}
- die "you can't move to the same storage with same format\n" if $oldstoreid eq $storeid &&
- (!$format || !$oldfmt || $oldfmt eq $format);
+ die "you can't move to the same storage with same format\n"
+ if $oldstoreid eq $storeid && (!$format || !$oldfmt || $oldfmt eq $format);
# this only checks snapshots because $disk is passed!
- my $snapshotted = PVE::QemuServer::Drive::is_volume_in_use($storecfg, $conf, $disk, $old_volid);
+ my $snapshotted = PVE::QemuServer::Drive::is_volume_in_use(
+ $storecfg,
+ $conf,
+ $disk,
+ $old_volid
+ );
die "you can't move a disk with snapshots and delete the source\n"
if $snapshotted && $param->{delete};
- PVE::Cluster::log_msg('info', $authuser, "move disk VM $vmid: move --disk $disk --storage $storeid");
+ return ($conf, $drive, $oldstoreid, $snapshotted);
+ };
+
+ my $move_updatefn = sub {
+ my ($conf, $drive, $oldstoreid, $snapshotted) = $load_and_check_move->();
+ my $old_volid = $drive->{file};
+
+ PVE::Cluster::log_msg(
+ 'info',
+ $authuser,
+ "move disk VM $vmid: move --disk $disk --storage $storeid"
+ );
my $running = PVE::QemuServer::check_running($vmid);
PVE::Storage::activate_volumes($storecfg, [ $drive->{file} ]);
- my $realcmd = sub {
- my $newvollist = [];
+ my $newvollist = [];
+
+ eval {
+ local $SIG{INT} =
+ local $SIG{TERM} =
+ local $SIG{QUIT} =
+ local $SIG{HUP} = sub { die "interrupted by signal\n"; };
+
+ warn "moving disk with snapshots, snapshots will not be moved!\n"
+ if $snapshotted;
+
+ my $bwlimit = extract_param($param, 'bwlimit');
+ my $movelimit = PVE::Storage::get_bandwidth_limit(
+ 'move',
+ [$oldstoreid, $storeid],
+ $bwlimit
+ );
+
+ my $source_info = {
+ vmid => $vmid,
+ running => $running,
+ drivename => $disk,
+ drive => $drive,
+ snapname => undef,
+ };
+
+ my $dest_info = {
+ vmid => $vmid,
+ drivename => $disk,
+ storage => $storeid,
+ format => $format,
+ };
+
+ $dest_info->{efisize} = PVE::QemuServer::get_efivars_size($conf)
+ if $disk eq 'efidisk0';
+
+ my $newdrive = PVE::QemuServer::clone_disk(
+ $storecfg,
+ $source_info,
+ $dest_info,
+ 1,
+ $newvollist,
+ undef,
+ undef,
+ undef,
+ $movelimit,
+ );
+ $conf->{$disk} = PVE::QemuServer::print_drive($newdrive);
+
+ PVE::QemuConfig->add_unused_volume($conf, $old_volid) if !$param->{delete};
+
+ # convert moved disk to base if part of template
+ PVE::QemuServer::template_create($vmid, $conf, $disk)
+ if PVE::QemuConfig->is_template($conf);
+
+ PVE::QemuConfig->write_config($vmid, $conf);
+
+ my $do_trim = PVE::QemuServer::get_qga_key($conf, 'fstrim_cloned_disks');
+ if ($running && $do_trim && PVE::QemuServer::qga_check_running($vmid)) {
+ eval { mon_cmd($vmid, "guest-fstrim") };
+ }
+
+ eval {
+ # try to deactivate volumes - avoid lvm LVs to be active on several nodes
+ PVE::Storage::deactivate_volumes($storecfg, [ $newdrive->{file} ])
+ if !$running;
+ };
+ warn $@ if $@;
+ };
+ if (my $err = $@) {
+ foreach my $volid (@$newvollist) {
+ eval { PVE::Storage::vdisk_free($storecfg, $volid) };
+ warn $@ if $@;
+ }
+ die "storage migration failed: $err";
+ }
+ if ($param->{delete}) {
eval {
- local $SIG{INT} =
- local $SIG{TERM} =
- local $SIG{QUIT} =
- local $SIG{HUP} = sub { die "interrupted by signal\n"; };
+ PVE::Storage::deactivate_volumes($storecfg, [$old_volid]);
+ PVE::Storage::vdisk_free($storecfg, $old_volid);
+ };
+ warn $@ if $@;
+ }
+ };
- warn "moving disk with snapshots, snapshots will not be moved!\n"
- if $snapshotted;
+ my $load_and_check_reassign_configs = sub {
+ my $vmlist = PVE::Cluster::get_vmlist()->{ids};
- my $bwlimit = extract_param($param, 'bwlimit');
- my $movelimit = PVE::Storage::get_bandwidth_limit('move', [$oldstoreid, $storeid], $bwlimit);
+ die "could not find VM ${vmid}\n" if !exists($vmlist->{$vmid});
+ die "could not find target VM ${target_vmid}\n" if !exists($vmlist->{$target_vmid});
- my $newdrive = PVE::QemuServer::clone_disk($storecfg, $vmid, $running, $disk, $drive, undef,
- $vmid, $storeid, $format, 1, $newvollist, undef, undef, undef, $movelimit, $conf);
+ my $source_node = $vmlist->{$vmid}->{node};
+ my $target_node = $vmlist->{$target_vmid}->{node};
- $conf->{$disk} = PVE::QemuServer::print_drive($newdrive);
+ die "Both VMs need to be on the same node ($source_node != $target_node)\n"
+ if $source_node ne $target_node;
- PVE::QemuConfig->add_unused_volume($conf, $old_volid) if !$param->{delete};
+ my $source_conf = PVE::QemuConfig->load_config($vmid);
+ PVE::QemuConfig->check_lock($source_conf);
+ my $target_conf = PVE::QemuConfig->load_config($target_vmid);
+ PVE::QemuConfig->check_lock($target_conf);
- # convert moved disk to base if part of template
- PVE::QemuServer::template_create($vmid, $conf, $disk)
- if PVE::QemuConfig->is_template($conf);
+ die "Can't move disks from or to template VMs\n"
+ if ($source_conf->{template} || $target_conf->{template});
- PVE::QemuConfig->write_config($vmid, $conf);
+ if ($digest) {
+ eval { PVE::Tools::assert_if_modified($digest, $source_conf->{digest}) };
+ die "VM ${vmid}: $@" if $@;
+ }
- my $do_trim = PVE::QemuServer::parse_guest_agent($conf)->{fstrim_cloned_disks};
- if ($running && $do_trim && PVE::QemuServer::qga_check_running($vmid)) {
- eval { mon_cmd($vmid, "guest-fstrim") };
- }
+ if ($target_digest) {
+ eval { PVE::Tools::assert_if_modified($target_digest, $target_conf->{digest}) };
+ die "VM ${target_vmid}: $@" if $@;
+ }
- eval {
- # try to deactivate volumes - avoid lvm LVs to be active on several nodes
- PVE::Storage::deactivate_volumes($storecfg, [ $newdrive->{file} ])
- if !$running;
- };
- warn $@ if $@;
- };
- if (my $err = $@) {
- foreach my $volid (@$newvollist) {
- eval { PVE::Storage::vdisk_free($storecfg, $volid) };
- warn $@ if $@;
+ die "Disk '${disk}' for VM '$vmid' does not exist\n" if !defined($source_conf->{$disk});
+
+ die "Target disk key '${target_disk}' is already in use for VM '$target_vmid'\n"
+ if $target_conf->{$target_disk};
+
+ my $drive = PVE::QemuServer::parse_drive(
+ $disk,
+ $source_conf->{$disk},
+ );
+ die "failed to parse source disk - $@\n" if !$drive;
+
+ my $source_volid = $drive->{file};
+
+ die "disk '${disk}' has no associated volume\n" if !$source_volid;
+ die "CD drive contents can't be moved to another VM\n"
+ if PVE::QemuServer::drive_is_cdrom($drive, 1);
+
+ my $storeid = PVE::Storage::parse_volume_id($source_volid, 1);
+ die "Volume '$source_volid' not managed by PVE\n" if !defined($storeid);
+
+ die "Can't move disk used by a snapshot to another VM\n"
+ if PVE::QemuServer::Drive::is_volume_in_use($storecfg, $source_conf, $disk, $source_volid);
+ die "Storage does not support moving of this disk to another VM\n"
+ if (!PVE::Storage::volume_has_feature($storecfg, 'rename', $source_volid));
+ die "Cannot move disk to another VM while the source VM is running - detach first\n"
+ if PVE::QemuServer::check_running($vmid) && $disk !~ m/^unused\d+$/;
+
+ # now re-parse using target disk slot format
+ if ($target_disk =~ /^unused\d+$/) {
+ $drive = PVE::QemuServer::parse_drive(
+ $target_disk,
+ $source_volid,
+ );
+ } else {
+ $drive = PVE::QemuServer::parse_drive(
+ $target_disk,
+ $source_conf->{$disk},
+ );
+ }
+ die "failed to parse source disk for target disk format - $@\n" if !$drive;
+
+ my $repl_conf = PVE::ReplicationConfig->new();
+ if ($repl_conf->check_for_existing_jobs($target_vmid, 1)) {
+ my $format = (PVE::Storage::parse_volname($storecfg, $source_volid))[6];
+ die "Cannot move disk to a replicated VM. Storage does not support replication!\n"
+ if !PVE::Storage::storage_can_replicate($storecfg, $storeid, $format);
+ }
+
+ return ($source_conf, $target_conf, $drive);
+ };
+
+ my $logfunc = sub {
+ my ($msg) = @_;
+ print STDERR "$msg\n";
+ };
+
+ my $disk_reassignfn = sub {
+ return PVE::QemuConfig->lock_config($vmid, sub {
+ return PVE::QemuConfig->lock_config($target_vmid, sub {
+ my ($source_conf, $target_conf, $drive) = &$load_and_check_reassign_configs();
+
+ my $source_volid = $drive->{file};
+
+ print "moving disk '$disk' from VM '$vmid' to '$target_vmid'\n";
+ my ($storeid, $source_volname) = PVE::Storage::parse_volume_id($source_volid);
+
+ my $fmt = (PVE::Storage::parse_volname($storecfg, $source_volid))[6];
+
+ my $new_volid = PVE::Storage::rename_volume(
+ $storecfg,
+ $source_volid,
+ $target_vmid,
+ );
+
+ $drive->{file} = $new_volid;
+
+ my $boot_order = PVE::QemuServer::device_bootorder($source_conf);
+ if (defined(delete $boot_order->{$disk})) {
+ print "removing disk '$disk' from boot order config\n";
+ my $boot_devs = [ sort { $boot_order->{$a} <=> $boot_order->{$b} } keys %$boot_order ];
+ $source_conf->{boot} = PVE::QemuServer::print_bootorder($boot_devs);
}
- die "storage migration failed: $err";
- }
- if ($param->{delete}) {
- eval {
- PVE::Storage::deactivate_volumes($storecfg, [$old_volid]);
- PVE::Storage::vdisk_free($storecfg, $old_volid);
- };
- warn $@ if $@;
- }
- };
+ delete $source_conf->{$disk};
+ print "removing disk '${disk}' from VM '${vmid}' config\n";
+ PVE::QemuConfig->write_config($vmid, $source_conf);
+
+ my $drive_string = PVE::QemuServer::print_drive($drive);
+
+ if ($target_disk =~ /^unused\d+$/) {
+ $target_conf->{$target_disk} = $drive_string;
+ PVE::QemuConfig->write_config($target_vmid, $target_conf);
+ } else {
+ &$update_vm_api(
+ {
+ node => $node,
+ vmid => $target_vmid,
+ digest => $target_digest,
+ $target_disk => $drive_string,
+ },
+ 1,
+ );
+ }
- return $rpcenv->fork_worker('qmmove', $vmid, $authuser, $realcmd);
+ # remove possible replication snapshots
+ if (PVE::Storage::volume_has_feature(
+ $storecfg,
+ 'replicate',
+ $source_volid),
+ ) {
+ eval {
+ PVE::Replication::prepare(
+ $storecfg,
+ [$new_volid],
+ undef,
+ 1,
+ undef,
+ $logfunc,
+ )
+ };
+ if (my $err = $@) {
+ print "Failed to remove replication snapshots on moved disk " .
+ "'$target_disk'. Manual cleanup could be necessary.\n";
+ }
+ }
+ });
+ });
};
- return PVE::QemuConfig->lock_config($vmid, $updatefn);
+ if ($target_vmid && $storeid) {
+ my $msg = "either set 'storage' or 'target-vmid', but not both";
+ raise_param_exc({ 'target-vmid' => $msg, 'storage' => $msg });
+ } elsif ($target_vmid) {
+ $rpcenv->check_vm_perm($authuser, $target_vmid, undef, ['VM.Config.Disk'])
+ if $authuser ne 'root@pam';
+
+ raise_param_exc({ 'target-vmid' => "must be different than source VMID to reassign disk" })
+ if $vmid eq $target_vmid;
+
+ my (undef, undef, $drive) = &$load_and_check_reassign_configs();
+ my $storage = PVE::Storage::parse_volume_id($drive->{file});
+ $rpcenv->check($authuser, "/storage/$storage", ['Datastore.AllocateSpace']);
+
+ return $rpcenv->fork_worker(
+ 'qmmove',
+ "${vmid}-${disk}>${target_vmid}-${target_disk}",
+ $authuser,
+ $disk_reassignfn
+ );
+ } elsif ($storeid) {
+ $rpcenv->check($authuser, "/storage/$storeid", ['Datastore.AllocateSpace']);
+
+ die "cannot move disk '$disk', only configured disks can be moved to another storage\n"
+ if $disk =~ m/^unused\d+$/;
+
+ $load_and_check_move->(); # early checks before forking/locking
+
+ my $realcmd = sub {
+ PVE::QemuConfig->lock_config($vmid, $move_updatefn);
+ };
+
+ return $rpcenv->fork_worker('qmmove', $vmid, $authuser, $realcmd);
+ } else {
+ my $msg = "both 'storage' and 'target-vmid' missing, either needs to be set";
+ raise_param_exc({ 'target-vmid' => $msg, 'storage' => $msg });
+ }
}});
my $check_vm_disks_local = sub {
check => ['perm', '/vms/{vmid}', [ 'VM.Migrate' ]],
},
parameters => {
- additionalProperties => 0,
+ additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
my $repl_conf = PVE::ReplicationConfig->new();
my $is_replicated = $repl_conf->check_for_existing_jobs($vmid, 1);
my $is_replicated_to_target = defined($repl_conf->find_local_replication_job($vmid, $target));
- if ($is_replicated && !$is_replicated_to_target) {
- if ($param->{force}) {
- warn "WARNING: Node '$target' is not a replication target. Existing replication " .
- "jobs will fail after migration!\n";
- } else {
- die "Cannot live-migrate replicated VM to node '$target' - not a replication target." .
- " Use 'force' to override.\n";
- }
+ if (!$param->{force} && $is_replicated && !$is_replicated_to_target) {
+ die "Cannot live-migrate replicated VM to node '$target' - not a replication " .
+ "target. Use 'force' to override.\n";
}
} else {
warn "VM isn't running. Doing offline migration instead.\n" if $param->{online};
}
my $storecfg = PVE::Storage::config();
-
if (my $targetstorage = $param->{targetstorage}) {
- my $check_storage = sub {
- my ($target_sid) = @_;
- PVE::Storage::storage_check_node($storecfg, $target_sid, $target);
- $rpcenv->check($authuser, "/storage/$target_sid", ['Datastore.AllocateSpace']);
- my $scfg = PVE::Storage::storage_config($storecfg, $target_sid);
- raise_param_exc({ targetstorage => "storage '$target_sid' does not support vm images"})
- if !$scfg->{content}->{images};
- };
-
my $storagemap = eval { PVE::JSONSchema::parse_idmap($targetstorage, 'pve-storage-id') };
raise_param_exc({ targetstorage => "failed to parse storage map: $@" })
if $@;
$rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.Disk'])
if !defined($storagemap->{identity});
- foreach my $source (values %{$storagemap->{entries}}) {
- $check_storage->($source);
+ foreach my $target_sid (values %{$storagemap->{entries}}) {
+ $check_storage_access_migrate->($rpcenv, $authuser, $storecfg, $target_sid, $target);
}
- $check_storage->($storagemap->{default})
+ $check_storage_access_migrate->($rpcenv, $authuser, $storecfg, $storagemap->{default}, $target)
if $storagemap->{default};
PVE::QemuServer::check_storage_availability($storecfg, $conf, $target)
check => ['perm', '/vms/{vmid}', [ 'VM.Monitor' ]],
},
parameters => {
- additionalProperties => 0,
+ additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid'),
check => ['perm', '/vms/{vmid}', [ 'VM.Config.Disk' ]],
},
parameters => {
- additionalProperties => 0,
- properties => {
+ additionalProperties => 0,
+ properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
skiplock => get_standard_option('skiplock'),
proxyto => 'node',
protected => 1, # qemu pid files are only readable by root
parameters => {
- additionalProperties => 0,
+ additionalProperties => 0,
properties => {
vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
node => get_standard_option('pve-node'),
user => 'all',
},
parameters => {
- additionalProperties => 0,
+ additionalProperties => 0,
properties => {
vmid => get_standard_option('pve-vmid'),
node => get_standard_option('pve-node'),
my $snapname = extract_param($param, 'snapname');
- my $realcmd = sub {
+ my $lock_obtained;
+ my $do_delete = sub {
+ $lock_obtained = 1;
PVE::Cluster::log_msg('info', $authuser, "delete snapshot VM $vmid: $snapname");
PVE::QemuConfig->snapshot_delete($vmid, $snapname, $param->{force});
};
+ my $realcmd = sub {
+ if ($param->{force}) {
+ $do_delete->();
+ } else {
+ eval { PVE::GuestHelpers::guest_migration_lock($vmid, 10, $do_delete); };
+ if (my $err = $@) {
+ die $err if $lock_obtained;
+ die "Failed to obtain guest migration lock - replication running?\n";
+ }
+ }
+ };
+
return $rpcenv->fork_worker('qmdelsnapshot', $vmid, $authuser, $realcmd);
}});
},
},
- returns => { type => 'null'},
+ returns => {
+ type => 'string',
+ description => "the task ID.",
+ },
code => sub {
my ($param) = @_;
my $disk = extract_param($param, 'disk');
- my $updatefn = sub {
-
+ my $load_and_check = sub {
my $conf = PVE::QemuConfig->load_config($vmid);
PVE::QemuConfig->check_lock($conf);
die "you can't convert a VM to template if VM is running\n"
if PVE::QemuServer::check_running($vmid);
- my $realcmd = sub {
- PVE::QemuServer::template_create($vmid, $conf, $disk);
- };
+ return $conf;
+ };
- $conf->{template} = 1;
- PVE::QemuConfig->write_config($vmid, $conf);
+ $load_and_check->();
+
+ my $realcmd = sub {
+ PVE::QemuConfig->lock_config($vmid, sub {
+ my $conf = $load_and_check->();
- return $rpcenv->fork_worker('qmtemplate', $vmid, $authuser, $realcmd);
+ $conf->{template} = 1;
+ PVE::QemuConfig->write_config($vmid, $conf);
+
+ PVE::QemuServer::template_create($vmid, $conf, $disk);
+ });
};
- PVE::QemuConfig->lock_config($vmid, $updatefn);
- return;
+ return $rpcenv->fork_worker('qmtemplate', $vmid, $authuser, $realcmd);
}});
__PACKAGE__->register_method({