X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=PVE%2FAPI2%2FQemu.pm;h=5e6fd42513dbaf509e9ae128c85a6eab84276018;hb=e3d3194446b8a99518251da27028ce4f45803f0f;hp=25c2836071b810547ab6ce17d807faf1bbd2d981;hpb=75c24bba0a520f310fa9a70a19a11dbc3281f1fa;p=qemu-server.git diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm index 25c2836..5e6fd42 100644 --- a/PVE/API2/Qemu.pm +++ b/PVE/API2/Qemu.pm @@ -20,6 +20,8 @@ use PVE::ReplicationConfig; use PVE::GuestHelpers; use PVE::QemuConfig; use PVE::QemuServer; +use PVE::QemuServer::Drive; +use PVE::QemuServer::CPUConfig; use PVE::QemuServer::Monitor qw(mon_cmd); use PVE::QemuMigrate; use PVE::RPCEnvironment; @@ -62,7 +64,7 @@ my $NEW_DISK_RE = qr!^(([^/:\s]+):)?(\d+(\.\d+)?)$!; my $check_storage_access = sub { my ($rpcenv, $authuser, $storecfg, $vmid, $settings, $default_storage) = @_; - PVE::QemuServer::foreach_drive($settings, sub { + PVE::QemuConfig->foreach_volume($settings, sub { my ($ds, $drive) = @_; my $isCDROM = PVE::QemuServer::drive_is_cdrom($drive); @@ -95,7 +97,7 @@ my $check_storage_access_clone = sub { my $sharedvm = 1; - PVE::QemuServer::foreach_drive($conf, sub { + PVE::QemuConfig->foreach_volume($conf, sub { my ($ds, $drive) = @_; my $isCDROM = PVE::QemuServer::drive_is_cdrom($drive); @@ -206,7 +208,7 @@ my $create_disks = sub { my $size = PVE::Storage::volume_size_info($storecfg, $volid); - die "volume $volid does not exists\n" if !$size; + die "volume $volid does not exist\n" if !$size; $disk->{size} = $size; } @@ -215,7 +217,7 @@ my $create_disks = sub { } }; - eval { PVE::QemuServer::foreach_drive($settings, $code); }; + eval { PVE::QemuConfig->foreach_volume($settings, $code); }; # free allocated images on error if (my $err = $@) { @@ -235,6 +237,26 @@ my $create_disks = sub { return $vollist; }; +my $check_cpu_model_access = sub { + my ($rpcenv, $authuser, $new, $existing) = @_; + + return if !defined($new->{cpu}); + + my $cpu = PVE::JSONSchema::check_format('pve-vm-cpu-conf', $new->{cpu}); + return if !$cpu || !$cpu->{cputype}; # always allow default + my $cputype = $cpu->{cputype}; + + if ($existing && $existing->{cpu}) { + # changing only other settings doesn't require permissions for CPU model + my $existingCpu = PVE::JSONSchema::check_format('pve-vm-cpu-conf', $existing->{cpu}); + return if $existingCpu->{cputype} eq $cputype; + } + + if (PVE::QemuServer::CPUConfig::is_custom_model($cputype)) { + $rpcenv->check($authuser, "/nodes", ['Sys.Audit']); + } +}; + my $cpuoptions = { 'cores' => 1, 'cpu' => 1, @@ -315,7 +337,6 @@ my $check_vm_modify_config_perm = sub { # some checks (e.g., disk, serial port, usb) need to be done somewhere # else, as there the permission can be value dependend next if PVE::QemuServer::is_valid_drivename($opt); - next if $opt eq 'vmstate'; next if $opt eq 'cdrom'; next if $opt =~ m/^(?:unused|serial|usb)\d+$/; @@ -338,6 +359,10 @@ my $check_vm_modify_config_perm = sub { $rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.Disk']); } elsif ($cloudinitoptions->{$opt} || ($opt =~ m/^(?:net|ipconfig)\d+$/)) { $rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.Network']); + } elsif ($opt eq 'vmstate') { + # the user needs Disk and PowerMgmt privileges to change the vmstate + # also needs privileges on the storage, that will be checked later + $rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.Disk', 'VM.PowerMgmt' ]); } else { # catches hostpci\d+, args, lock, etc. # new options will be checked here @@ -397,6 +422,26 @@ __PACKAGE__->register_method({ return $res; }}); +my $parse_restore_archive = sub { + my ($storecfg, $archive) = @_; + + my ($archive_storeid, $archive_volname) = PVE::Storage::parse_volume_id($archive, 1); + + if (defined($archive_storeid)) { + my $scfg = PVE::Storage::storage_config($storecfg, $archive_storeid); + if ($scfg->{type} eq 'pbs') { + return { + type => 'pbs', + volid => $archive, + }; + } + } + my $path = PVE::Storage::abs_filesystem_path($storecfg, $archive); + return { + type => 'file', + path => $path, + }; +}; __PACKAGE__->register_method({ @@ -419,7 +464,7 @@ __PACKAGE__->register_method({ node => get_standard_option('pve-node'), vmid => get_standard_option('pve-vmid', { completion => \&PVE::Cluster::complete_next_vmid }), archive => { - description => "The backup file.", + description => "The backup archive. Either the file system path to a .tar or .vma file (use '-' to pipe data from stdin) or a proxmox storage backup volume identifier.", type => 'string', optional => 1, maxLength => 255, @@ -519,6 +564,8 @@ __PACKAGE__->register_method({ &$check_vm_modify_config_perm($rpcenv, $authuser, $vmid, $pool, [ keys %$param]); + &$check_cpu_model_access($rpcenv, $authuser, $param); + foreach my $opt (keys %$param) { if (PVE::QemuServer::is_valid_drivename($opt)) { my $drive = PVE::QemuServer::parse_drive($opt, $param->{$opt}); @@ -537,9 +584,11 @@ __PACKAGE__->register_method({ if ($archive eq '-') { die "pipe requires cli environment\n" if $rpcenv->{type} ne 'cli'; + $archive = { type => 'pipe' }; } else { PVE::Storage::check_volume_access($rpcenv, $authuser, $storecfg, $vmid, $archive); - $archive = PVE::Storage::abs_filesystem_path($storecfg, $archive); + + $archive = $parse_restore_archive->($storecfg, $archive); } } @@ -556,12 +605,19 @@ __PACKAGE__->register_method({ die "$emsg vm is running\n" if PVE::QemuServer::check_running($vmid); my $realcmd = sub { - PVE::QemuServer::restore_archive($archive, $vmid, $authuser, { + my $restore_options = { storage => $storage, pool => $pool, unique => $unique, bwlimit => $bwlimit, - }); + }; + if ($archive->{type} eq 'file' || $archive->{type} eq 'pipe') { + PVE::QemuServer::restore_file_archive($archive->{path} // '-', $vmid, $authuser, $restore_options); + } elsif ($archive->{type} eq 'pbs') { + PVE::QemuServer::restore_proxmox_backup_archive($archive->{volid}, $vmid, $authuser, $restore_options); + } else { + die "unknown backup archive type\n"; + } my $restored_conf = PVE::QemuConfig->load_config($vmid); # Convert restored VM to template if backup was VM template if (PVE::QemuConfig->is_template($restored_conf)) { @@ -571,17 +627,18 @@ __PACKAGE__->register_method({ } PVE::AccessControl::add_vm_to_pool($vmid, $pool) if $pool; - - if ($start_after_create) { - eval { PVE::API2::Qemu->vm_start({ vmid => $vmid, node => $node }) }; - warn $@ if $@; - } }; # ensure no old replication state are exists PVE::ReplicationState::delete_guest_states($vmid); - return PVE::QemuConfig->lock_config_full($vmid, 1, $realcmd); + PVE::QemuConfig->lock_config_full($vmid, 1, $realcmd); + + if ($start_after_create) { + print "Execute autostart\n"; + eval { PVE::API2::Qemu->vm_start({ vmid => $vmid, node => $node }) }; + warn $@ if $@; + } }; my $createfn = sub { @@ -589,19 +646,15 @@ __PACKAGE__->register_method({ PVE::ReplicationState::delete_guest_states($vmid); my $realcmd = sub { - - my $vollist = []; - my $conf = $param; - my $arch = PVE::QemuServer::get_vm_arch($conf); + my $vollist = []; eval { - $vollist = &$create_disks($rpcenv, $authuser, $conf, $arch, $storecfg, $vmid, $pool, $param, $storage); if (!$conf->{bootdisk}) { - my $firstdisk = PVE::QemuServer::resolve_first_disk($conf); + my $firstdisk = PVE::QemuServer::Drive::resolve_first_disk($conf); $conf->{bootdisk} = $firstdisk if $firstdisk; } @@ -823,7 +876,8 @@ __PACKAGE__->register_method({ path => '{vmid}/config', method => 'GET', proxyto => 'node', - description => "Get current virtual machine configuration. This does not include pending configuration changes (see 'pending' API).", + description => "Get the virtual machine configuration with pending configuration " . + "changes applied. Set the 'current' parameter to get the current configuration instead.", permissions => { check => ['perm', '/vms/{vmid}', [ 'VM.Audit' ]], }, @@ -849,7 +903,7 @@ __PACKAGE__->register_method({ }, }, returns => { - description => "The current VM configuration.", + description => "The VM configuration.", type => "object", properties => PVE::QemuServer::json_config_properties({ digest => { @@ -881,7 +935,7 @@ __PACKAGE__->register_method({ path => '{vmid}/pending', method => 'GET', proxyto => 'node', - description => "Get virtual machine configuration, including pending changes.", + description => "Get the virtual machine configuration with both current and pending values.", permissions => { check => ['perm', '/vms/{vmid}', [ 'VM.Audit' ]], }, @@ -1041,7 +1095,7 @@ my $update_vm_api = sub { return if PVE::QemuServer::drive_is_cdrom($drive); my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid, 1); - return if $volname eq 'cloudinit'; + return if defined($volname) && $volname eq 'cloudinit'; my $format; if ($volid =~ $NEW_DISK_RE) { @@ -1091,6 +1145,8 @@ my $update_vm_api = sub { die "checksum missmatch (file change by other user?)\n" if $digest && $digest ne $conf->{digest}; + &$check_cpu_model_access($rpcenv, $authuser, $param, $conf); + # FIXME: 'suspended' lock should probabyl be a state or "weak" lock?! if (scalar(@delete) && grep { $_ eq 'vmstate'} @delete) { if (defined($conf->{lock}) && $conf->{lock} eq 'suspended') { @@ -1098,6 +1154,7 @@ my $update_vm_api = sub { push @delete, 'lock'; # this is the real deal to write it out } push @delete, 'runningmachine' if $conf->{runningmachine}; + push @delete, 'runningcpu' if $conf->{runningcpu}; } PVE::QemuConfig->check_lock($conf) if !$skiplock; @@ -1152,8 +1209,6 @@ my $update_vm_api = sub { } } elsif ($opt eq 'vmstate') { PVE::QemuConfig->check_protection($conf, "can't remove vmstate '$val'"); - # the user needs Disk and PowerMgmt privileges to remove the vmstate - $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.Disk', 'VM.PowerMgmt' ]); if (PVE::QemuServer::try_deallocate_drive($storecfg, $vmid, $conf, $opt, { file => $val }, $rpcenv, $authuser, 1)) { delete $conf->{$opt}; PVE::QemuConfig->write_config($vmid, $conf); @@ -1240,13 +1295,13 @@ my $update_vm_api = sub { $conf = PVE::QemuConfig->load_config($vmid); # update/reload + my $errors = {}; if ($running) { - my $errors = {}; PVE::QemuServer::vmconfig_hotplug_pending($vmid, $conf, $storecfg, $modified, $errors); - raise_param_exc($errors) if scalar(keys %$errors); } else { - PVE::QemuServer::vmconfig_apply_pending($vmid, $conf, $storecfg, $running); + PVE::QemuServer::vmconfig_apply_pending($vmid, $conf, $storecfg, $running, $errors); } + raise_param_exc($errors) if scalar(keys %$errors); return; }; @@ -1438,38 +1493,52 @@ __PACKAGE__->register_method({ raise_param_exc({ skiplock => "Only root may use this option." }) if $skiplock && $authuser ne 'root@pam'; - # test if VM exists - my $conf = PVE::QemuConfig->load_config($vmid); - my $storecfg = PVE::Storage::config(); - PVE::QemuConfig->check_protection($conf, "can't remove VM $vmid"); - die "unable to remove VM $vmid - used in HA resources\n" - if PVE::HA::Config::vm_is_ha_managed($vmid); - - if (!$param->{purge}) { - # don't allow destroy if with replication jobs but no purge param - my $repl_conf = PVE::ReplicationConfig->new(); - $repl_conf->check_for_existing_jobs($vmid); - } + my $early_checks = sub { + # test if VM exists + my $conf = PVE::QemuConfig->load_config($vmid); + PVE::QemuConfig->check_protection($conf, "can't remove VM $vmid"); + + my $ha_managed = PVE::HA::Config::service_is_configured("vm:$vmid"); + + if (!$param->{purge}) { + die "unable to remove VM $vmid - used in HA resources and purge parameter not set.\n" + if $ha_managed; + # don't allow destroy if with replication jobs but no purge param + my $repl_conf = PVE::ReplicationConfig->new(); + $repl_conf->check_for_existing_jobs($vmid); + } + + die "VM $vmid is running - destroy failed\n" + if PVE::QemuServer::check_running($vmid); + + return $ha_managed; + }; - # early tests (repeat after locking) - die "VM $vmid is running - destroy failed\n" - if PVE::QemuServer::check_running($vmid); + $early_checks->(); my $realcmd = sub { my $upid = shift; + my $storecfg = PVE::Storage::config(); + syslog('info', "destroy VM $vmid: $upid\n"); PVE::QemuConfig->lock_config($vmid, sub { - die "VM $vmid is running - destroy failed\n" - if (PVE::QemuServer::check_running($vmid)); + # repeat, config might have changed + my $ha_managed = $early_checks->(); PVE::QemuServer::destroy_vm($storecfg, $vmid, $skiplock, { lock => 'destroyed' }); PVE::AccessControl::remove_vm_access($vmid); PVE::Firewall::remove_vmfw_conf($vmid); if ($param->{purge}) { + print "purging VM $vmid from related configurations..\n"; PVE::ReplicationConfig::remove_vmid_jobs($vmid); PVE::VZDump::Plugin::remove_vmid_from_backup_jobs($vmid); + + if ($ha_managed) { + PVE::HA::Config::delete_service_from_config("vm:$vmid"); + print "NOTE: removed VM $vmid from HA resource configuration.\n"; + } } # only now remove the zombie config, else we can have reuse race @@ -1987,11 +2056,19 @@ __PACKAGE__->register_method({ optional => 1, }, machine => get_standard_option('pve-qemu-machine'), - targetstorage => { - description => "Target storage for the migration. (Can be '1' to use the same storage id as on the source node.)", + 'force-cpu' => { + description => "Override QEMU's -cpu argument with the given string.", type => 'string', - optional => 1 - } + optional => 1, + }, + targetstorage => get_standard_option('pve-targetstorage'), + timeout => { + description => "Wait maximal timeout seconds.", + type => 'integer', + minimum => 0, + default => 'max(30, vm memory in GiB)', + optional => 1, + }, }, }, returns => { @@ -2005,8 +2082,10 @@ __PACKAGE__->register_method({ my $node = extract_param($param, 'node'); my $vmid = extract_param($param, 'vmid'); + my $timeout = extract_param($param, 'timeout'); my $machine = extract_param($param, 'machine'); + my $force_cpu = extract_param($param, 'force-cpu'); my $get_root_param = sub { my $value = extract_param($param, $_[0]); @@ -2022,15 +2101,33 @@ __PACKAGE__->register_method({ my $migration_network = $get_root_param->('migration_network'); my $targetstorage = $get_root_param->('targetstorage'); - raise_param_exc({ targetstorage => "targetstorage can only by used with migratedfrom." }) - if $targetstorage && !$migratedfrom; + my $storagemap; + + if ($targetstorage) { + raise_param_exc({ targetstorage => "targetstorage can only by used with migratedfrom." }) + if !$migratedfrom; + $storagemap = eval { PVE::JSONSchema::parse_idmap($targetstorage, 'pve-storage-id') }; + raise_param_exc({ targetstorage => "failed to parse storage map: $@" }) + if $@; + } # read spice ticket from STDIN my $spice_ticket; + my $nbd_protocol_version = 0; + my $replicated_volumes = {}; if ($stateuri && ($stateuri eq 'tcp' || $stateuri eq 'unix') && $migratedfrom && ($rpcenv->{type} eq 'cli')) { - if (defined(my $line = )) { + while (defined(my $line = )) { chomp $line; - $spice_ticket = $line; + if ($line =~ m/^spice_ticket: (.+)$/) { + $spice_ticket = $1; + } elsif ($line =~ m/^nbd_protocol_version: (\d+)$/) { + $nbd_protocol_version = $1; + } elsif ($line =~ m/^replicated_volume: (.*)$/) { + $replicated_volumes->{$1} = 1; + } else { + # fallback for old source node + $spice_ticket = $line; + } } } @@ -2058,8 +2155,25 @@ __PACKAGE__->register_method({ syslog('info', "start VM $vmid: $upid\n"); - PVE::QemuServer::vm_start($storecfg, $vmid, $stateuri, $skiplock, $migratedfrom, undef, - $machine, $spice_ticket, $migration_network, $migration_type, $targetstorage); + my $migrate_opts = { + migratedfrom => $migratedfrom, + spice_ticket => $spice_ticket, + network => $migration_network, + type => $migration_type, + storagemap => $storagemap, + nbd_proto_version => $nbd_protocol_version, + replicated_volumes => $replicated_volumes, + }; + + my $params = { + statefile => $stateuri, + skiplock => $skiplock, + forcemachine => $machine, + timeout => $timeout, + forcecpu => $force_cpu, + }; + + PVE::QemuServer::vm_start($storecfg, $vmid, $params, $migrate_opts); return; }; @@ -2506,6 +2620,8 @@ __PACKAGE__->register_method({ if $skiplock && $authuser ne 'root@pam'; my $nocheck = extract_param($param, 'nocheck'); + raise_param_exc({ nocheck => "Only root may use this option." }) + if $nocheck && $authuser ne 'root@pam'; my $to_disk_suspended; eval { @@ -2527,7 +2643,7 @@ __PACKAGE__->register_method({ PVE::QemuServer::vm_resume($vmid, $skiplock, $nocheck); } else { my $storecfg = PVE::Storage::config(); - PVE::QemuServer::vm_start($storecfg, $vmid, undef, $skiplock); + PVE::QemuServer::vm_start($storecfg, $vmid, { skiplock => $skiplock }); } return; @@ -2728,32 +2844,24 @@ __PACKAGE__->register_method({ my ($param) = @_; my $rpcenv = PVE::RPCEnvironment::get(); - - my $authuser = $rpcenv->get_user(); + my $authuser = $rpcenv->get_user(); my $node = extract_param($param, 'node'); - my $vmid = extract_param($param, 'vmid'); - my $newid = extract_param($param, 'newid'); - my $pool = extract_param($param, 'pool'); - - if (defined($pool)) { - $rpcenv->check_pool_exist($pool); - } + $rpcenv->check_pool_exist($pool) if defined($pool); my $snapname = extract_param($param, 'snapname'); - my $storage = extract_param($param, 'storage'); - my $format = extract_param($param, 'format'); - my $target = extract_param($param, 'target'); my $localnode = PVE::INotify::nodename(); - undef $target if $target && ($target eq $localnode || $target eq 'localhost'); + if ($target && ($target eq $localnode || $target eq 'localhost')) { + undef $target; + } PVE::Cluster::check_node_exists($target) if $target; @@ -2771,33 +2879,23 @@ __PACKAGE__->register_method({ } } - PVE::Cluster::check_cfs_quorum(); + PVE::Cluster::check_cfs_quorum(); my $running = PVE::QemuServer::check_running($vmid) || 0; - # exclusive lock if VM is running - else shared lock is enough; - my $shared_lock = $running ? 0 : 1; - my $clonefn = sub { - - # do all tests after lock - # we also try to do all tests before we fork the worker + # do all tests after lock but before forking worker - if possible my $conf = PVE::QemuConfig->load_config($vmid); - PVE::QemuConfig->check_lock($conf); my $verify_running = PVE::QemuServer::check_running($vmid) || 0; - die "unexpected state change\n" if $verify_running != $running; die "snapshot '$snapname' does not exist\n" if $snapname && !defined( $conf->{snapshots}->{$snapname}); - my $full = extract_param($param, 'full'); - if (!defined($full)) { - $full = !PVE::QemuConfig->is_template($conf); - } + my $full = extract_param($param, 'full') // !PVE::QemuConfig->is_template($conf); die "parameter 'storage' not allowed for linked clones\n" if defined($storage) && !$full; @@ -2809,10 +2907,10 @@ __PACKAGE__->register_method({ my $sharedvm = &$check_storage_access_clone($rpcenv, $authuser, $storecfg, $oldconf, $storage); - die "can't clone VM to node '$target' (VM uses local storage)\n" if $target && !$sharedvm; + die "can't clone VM to node '$target' (VM uses local storage)\n" + if $target && !$sharedvm; my $conffile = PVE::QemuConfig->config_file($newid); - die "unable to create VM $newid: config file already exists\n" if -f $conffile; @@ -2865,8 +2963,7 @@ __PACKAGE__->register_method({ my $smbios1 = PVE::QemuServer::parse_smbios1($newconf->{smbios1} || ''); $smbios1->{uuid} = PVE::QemuServer::generate_uuid(); $newconf->{smbios1} = PVE::QemuServer::print_smbios1($smbios1); - - # auto generate a new vmgenid if the option was set + # auto generate a new vmgenid only if the option was set for template if ($newconf->{vmgenid}) { $newconf->{vmgenid} = PVE::QemuServer::generate_uuid(); } @@ -2876,11 +2973,7 @@ __PACKAGE__->register_method({ if ($param->{name}) { $newconf->{name} = $param->{name}; } else { - if ($oldconf->{name}) { - $newconf->{name} = "Copy-of-$oldconf->{name}"; - } else { - $newconf->{name} = "Copy-of-VM-$vmid"; - } + $newconf->{name} = "Copy-of-VM-" . ($oldconf->{name} // $vmid); } if ($param->{description}) { @@ -2888,6 +2981,7 @@ __PACKAGE__->register_method({ } # create empty/temp config - this fails if VM already exists on other node + # FIXME use PVE::QemuConfig->create_and_lock_config and adapt code PVE::Tools::file_set_contents($conffile, "# qmclone temporary file\nlock: clone\n"); my $realcmd = sub { @@ -2912,6 +3006,7 @@ __PACKAGE__->register_method({ foreach my $opt (keys %$drives) { my $drive = $drives->{$opt}; my $skipcomplete = ($total_jobs != $i); # finish after last drive + my $completion = $skipcomplete ? 'skip' : 'complete'; my $src_sid = PVE::Storage::parse_volume_id($drive->{file}); my $storage_list = [ $src_sid ]; @@ -2920,7 +3015,7 @@ __PACKAGE__->register_method({ my $newdrive = PVE::QemuServer::clone_disk($storecfg, $vmid, $running, $opt, $drive, $snapname, $newid, $storage, $format, $fullclone->{$opt}, $newvollist, - $jobs, $skipcomplete, $oldconf->{agent}, $clonelimit); + $jobs, $completion, $oldconf->{agent}, $clonelimit, $oldconf); $newconf->{$opt} = PVE::QemuServer::print_drive($newdrive); @@ -2952,16 +3047,18 @@ __PACKAGE__->register_method({ PVE::AccessControl::add_vm_to_pool($newid, $pool) if $pool; }; if (my $err = $@) { - unlink $conffile; - eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $jobs) }; - sleep 1; # some storage like rbd need to wait before release volume - really? foreach my $volid (@$newvollist) { eval { PVE::Storage::vdisk_free($storecfg, $volid); }; warn $@ if $@; } + + PVE::Firewall::remove_vmfw_conf($newid); + + unlink $conffile; # avoid races -> last thing before die + die "clone failed: $err"; } @@ -2973,11 +3070,17 @@ __PACKAGE__->register_method({ return $rpcenv->fork_worker('qmclone', $vmid, $authuser, $realcmd); }; - return PVE::QemuConfig->lock_config_mode($vmid, 1, $shared_lock, sub { - # Aquire exclusive lock lock for $newid + # Aquire exclusive lock lock for $newid + my $lock_target_vm = sub { return PVE::QemuConfig->lock_config_full($newid, 1, $clonefn); - }); + }; + # exclusive lock if VM is running - else shared lock is enough; + if ($running) { + return PVE::QemuConfig->lock_config_full($vmid, 1, $lock_target_vm); + } else { + return PVE::QemuConfig->lock_config_shared($vmid, 1, $lock_target_vm); + } }}); __PACKAGE__->register_method({ @@ -3002,7 +3105,7 @@ __PACKAGE__->register_method({ disk => { type => 'string', description => "The disk you want to move.", - enum => [ PVE::QemuServer::valid_drive_names() ], + enum => [PVE::QemuServer::Drive::valid_drive_names()], }, storage => get_standard_option('pve-storage-id', { description => "Target storage.", @@ -3043,51 +3146,43 @@ __PACKAGE__->register_method({ my ($param) = @_; my $rpcenv = PVE::RPCEnvironment::get(); - my $authuser = $rpcenv->get_user(); my $node = extract_param($param, 'node'); - my $vmid = extract_param($param, 'vmid'); - my $digest = extract_param($param, 'digest'); - my $disk = extract_param($param, 'disk'); - my $storeid = extract_param($param, 'storage'); - my $format = extract_param($param, 'format'); my $storecfg = PVE::Storage::config(); my $updatefn = sub { - my $conf = PVE::QemuConfig->load_config($vmid); - PVE::QemuConfig->check_lock($conf); - die "checksum missmatch (file change by other user?)\n" + die "VM config checksum missmatch (file change by other user?)\n" if $digest && $digest ne $conf->{digest}; die "disk '$disk' does not exist\n" if !$conf->{$disk}; my $drive = PVE::QemuServer::parse_drive($disk, $conf->{$disk}); - my $old_volid = $drive->{file} || die "disk '$disk' has no associated volume\n"; - + die "disk '$disk' has no associated volume\n" if !$drive->{file}; die "you can't move a cdrom\n" if PVE::QemuServer::drive_is_cdrom($drive, 1); + my $old_volid = $drive->{file}; my $oldfmt; my ($oldstoreid, $oldvolname) = PVE::Storage::parse_volume_id($old_volid); if ($oldvolname =~ m/\.(raw|qcow2|vmdk)$/){ $oldfmt = $1; } - die "you can't move on the same storage with same format\n" if $oldstoreid eq $storeid && + die "you can't move to the same storage with same format\n" if $oldstoreid eq $storeid && (!$format || !$oldfmt || $oldfmt eq $format); # this only checks snapshots because $disk is passed! - my $snapshotted = PVE::QemuServer::is_volume_in_use($storecfg, $conf, $disk, $old_volid); + my $snapshotted = PVE::QemuServer::Drive::is_volume_in_use($storecfg, $conf, $disk, $old_volid); die "you can't move a disk with snapshots and delete the source\n" if $snapshotted && $param->{delete}; @@ -3098,7 +3193,6 @@ __PACKAGE__->register_method({ PVE::Storage::activate_volumes($storecfg, [ $drive->{file} ]); my $realcmd = sub { - my $newvollist = []; eval { @@ -3114,7 +3208,7 @@ __PACKAGE__->register_method({ my $movelimit = PVE::Storage::get_bandwidth_limit('move', [$oldstoreid, $storeid], $bwlimit); my $newdrive = PVE::QemuServer::clone_disk($storecfg, $vmid, $running, $disk, $drive, undef, - $vmid, $storeid, $format, 1, $newvollist, undef, undef, undef, $movelimit); + $vmid, $storeid, $format, 1, $newvollist, undef, undef, undef, $movelimit, $conf); $conf->{$disk} = PVE::QemuServer::print_drive($newdrive); @@ -3126,8 +3220,9 @@ __PACKAGE__->register_method({ PVE::QemuConfig->write_config($vmid, $conf); - if ($running && PVE::QemuServer::parse_guest_agent($conf)->{fstrim_cloned_disks} && PVE::QemuServer::qga_check_running($vmid)) { - eval { mon_cmd($vmid, "guest-fstrim"); }; + my $do_trim = PVE::QemuServer::parse_guest_agent($conf)->{fstrim_cloned_disks}; + if ($running && $do_trim && PVE::QemuServer::qga_check_running($vmid)) { + eval { mon_cmd($vmid, "guest-fstrim") }; } eval { @@ -3138,11 +3233,10 @@ __PACKAGE__->register_method({ warn $@ if $@; }; if (my $err = $@) { - - foreach my $volid (@$newvollist) { - eval { PVE::Storage::vdisk_free($storecfg, $volid); }; - warn $@ if $@; - } + foreach my $volid (@$newvollist) { + eval { PVE::Storage::vdisk_free($storecfg, $volid) }; + warn $@ if $@; + } die "storage migration failed: $err"; } @@ -3338,9 +3432,7 @@ __PACKAGE__->register_method({ description => "Enable live storage migration for local disk", optional => 1, }, - targetstorage => get_standard_option('pve-storage-id', { - description => "Default target storage.", - optional => 1, + targetstorage => get_standard_option('pve-targetstorage', { completion => \&PVE::QemuServer::complete_migration_storage, }), bwlimit => { @@ -3399,13 +3491,36 @@ __PACKAGE__->register_method({ $param->{online} = 0; } - raise_param_exc({ targetstorage => "Live storage migration can only be done online." }) - if !$param->{online} && $param->{targetstorage}; - my $storecfg = PVE::Storage::config(); - if( $param->{targetstorage}) { - PVE::Storage::storage_check_node($storecfg, $param->{targetstorage}, $target); + if (my $targetstorage = $param->{targetstorage}) { + my $check_storage = sub { + my ($target_sid) = @_; + PVE::Storage::storage_check_node($storecfg, $target_sid, $target); + $rpcenv->check($authuser, "/storage/$target_sid", ['Datastore.AllocateSpace']); + my $scfg = PVE::Storage::storage_config($storecfg, $target_sid); + raise_param_exc({ targetstorage => "storage '$target_sid' does not support vm images"}) + if !$scfg->{content}->{images}; + }; + + my $storagemap = eval { PVE::JSONSchema::parse_idmap($targetstorage, 'pve-storage-id') }; + raise_param_exc({ targetstorage => "failed to parse storage map: $@" }) + if $@; + + $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.Disk']) + if !defined($storagemap->{identity}); + + foreach my $source (values %{$storagemap->{entries}}) { + $check_storage->($source); + } + + $check_storage->($storagemap->{default}) + if $storagemap->{default}; + + PVE::QemuServer::check_storage_availability($storecfg, $conf, $target) + if $storagemap->{identity}; + + $param->{storagemap} = $storagemap; } else { PVE::QemuServer::check_storage_availability($storecfg, $conf, $target); } @@ -3509,7 +3624,7 @@ __PACKAGE__->register_method({ disk => { type => 'string', description => "The disk you want to resize.", - enum => [PVE::QemuServer::valid_drive_names()], + enum => [PVE::QemuServer::Drive::valid_drive_names()], }, size => { type => 'string', @@ -3861,7 +3976,7 @@ __PACKAGE__->register_method({ proxyto => 'node', description => "Get snapshot configuration", permissions => { - check => ['perm', '/vms/{vmid}', [ 'VM.Snapshot', 'VM.Snapshot.Rollback' ], any => 1], + check => ['perm', '/vms/{vmid}', [ 'VM.Snapshot', 'VM.Snapshot.Rollback', 'VM.Audit' ], any => 1], }, parameters => { additionalProperties => 0, @@ -4008,7 +4123,7 @@ __PACKAGE__->register_method({ optional => 1, type => 'string', description => "If you want to convert only 1 disk to base image.", - enum => [PVE::QemuServer::valid_drive_names()], + enum => [PVE::QemuServer::Drive::valid_drive_names()], }, },