X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=PVE%2FAPI2%2FQemu.pm;h=5e6fd42513dbaf509e9ae128c85a6eab84276018;hb=e3d3194446b8a99518251da27028ce4f45803f0f;hp=1aed9875c7722c4942a9970f525989660872ade7;hpb=a4262553deb9a0105280daebd978daf16b0e98c0;p=qemu-server.git diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm index 1aed987..5e6fd42 100644 --- a/PVE/API2/Qemu.pm +++ b/PVE/API2/Qemu.pm @@ -4,12 +4,12 @@ use strict; use warnings; use Cwd 'abs_path'; use Net::SSLeay; -use UUID; use POSIX; use IO::Socket::IP; use URI::Escape; use PVE::Cluster qw (cfs_read_file cfs_write_file);; +use PVE::RRD; use PVE::SafeSyslog; use PVE::Tools qw(extract_param); use PVE::Exception qw(raise raise_param_exc raise_perm_exc); @@ -20,6 +20,9 @@ use PVE::ReplicationConfig; use PVE::GuestHelpers; use PVE::QemuConfig; use PVE::QemuServer; +use PVE::QemuServer::Drive; +use PVE::QemuServer::CPUConfig; +use PVE::QemuServer::Monitor qw(mon_cmd); use PVE::QemuMigrate; use PVE::RPCEnvironment; use PVE::AccessControl; @@ -28,6 +31,9 @@ use PVE::Network; use PVE::Firewall; use PVE::API2::Firewall::VM; use PVE::API2::Qemu::Agent; +use PVE::VZDump::Plugin; +use PVE::DataCenterConfig; +use PVE::SSHInfo; BEGIN { if (!$ENV{PVE_GENERATING_DOCS}) { @@ -58,16 +64,15 @@ my $NEW_DISK_RE = qr!^(([^/:\s]+):)?(\d+(\.\d+)?)$!; my $check_storage_access = sub { my ($rpcenv, $authuser, $storecfg, $vmid, $settings, $default_storage) = @_; - PVE::QemuServer::foreach_drive($settings, sub { + PVE::QemuConfig->foreach_volume($settings, sub { my ($ds, $drive) = @_; my $isCDROM = PVE::QemuServer::drive_is_cdrom($drive); my $volid = $drive->{file}; + my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid, 1); - if (!$volid || ($volid eq 'none' || $volid eq 'cloudinit')) { - # nothing to check - } elsif ($volid =~ m/^(([^:\s]+):)?(cloudinit)$/) { + if (!$volid || ($volid eq 'none' || $volid eq 'cloudinit' || (defined($volname) && $volname eq 'cloudinit'))) { # nothing to check } elsif ($isCDROM && ($volid eq 'cdrom')) { $rpcenv->check($authuser, "/", ['Sys.Console']); @@ -92,7 +97,7 @@ my $check_storage_access_clone = sub { my $sharedvm = 1; - PVE::QemuServer::foreach_drive($conf, sub { + PVE::QemuConfig->foreach_volume($conf, sub { my ($ds, $drive) = @_; my $isCDROM = PVE::QemuServer::drive_is_cdrom($drive); @@ -140,12 +145,13 @@ my $create_disks = sub { my ($ds, $disk) = @_; my $volid = $disk->{file}; + my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid, 1); if (!$volid || $volid eq 'none' || $volid eq 'cdrom') { delete $disk->{size}; - $res->{$ds} = PVE::QemuServer::print_drive($vmid, $disk); - } elsif ($volid =~ m!^(?:([^/:\s]+):)?cloudinit$!) { - my $storeid = $1 || $default_storage; + $res->{$ds} = PVE::QemuServer::print_drive($disk); + } elsif (defined($volname) && $volname eq 'cloudinit') { + $storeid = $storeid // $default_storage; die "no storage ID specified (and no default storage)\n" if !$storeid; my $scfg = PVE::Storage::storage_config($storecfg, $storeid); my $name = "vm-$vmid-cloudinit"; @@ -165,7 +171,7 @@ my $create_disks = sub { $disk->{media} = 'cdrom'; push @$vollist, $volid; delete $disk->{format}; # no longer needed - $res->{$ds} = PVE::QemuServer::print_drive($vmid, $disk); + $res->{$ds} = PVE::QemuServer::print_drive($disk); } elsif ($volid =~ $NEW_DISK_RE) { my ($storeid, $size) = ($2 || $default_storage, $3); die "no storage ID specified (and no default storage)\n" if !$storeid; @@ -184,7 +190,7 @@ my $create_disks = sub { $disk->{file} = $volid; $disk->{size} = PVE::Tools::convert_size($size, 'kb' => 'b'); delete $disk->{format}; # no longer needed - $res->{$ds} = PVE::QemuServer::print_drive($vmid, $disk); + $res->{$ds} = PVE::QemuServer::print_drive($disk); } else { PVE::Storage::check_volume_access($rpcenv, $authuser, $storecfg, $vmid, $volid); @@ -198,22 +204,20 @@ my $create_disks = sub { if ($volid_is_new) { - my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid, 1); - PVE::Storage::activate_volumes($storecfg, [ $volid ]) if $storeid; my $size = PVE::Storage::volume_size_info($storecfg, $volid); - die "volume $volid does not exists\n" if !$size; + die "volume $volid does not exist\n" if !$size; $disk->{size} = $size; } - $res->{$ds} = PVE::QemuServer::print_drive($vmid, $disk); + $res->{$ds} = PVE::QemuServer::print_drive($disk); } }; - eval { PVE::QemuServer::foreach_drive($settings, $code); }; + eval { PVE::QemuConfig->foreach_volume($settings, $code); }; # free allocated images on error if (my $err = $@) { @@ -233,6 +237,26 @@ my $create_disks = sub { return $vollist; }; +my $check_cpu_model_access = sub { + my ($rpcenv, $authuser, $new, $existing) = @_; + + return if !defined($new->{cpu}); + + my $cpu = PVE::JSONSchema::check_format('pve-vm-cpu-conf', $new->{cpu}); + return if !$cpu || !$cpu->{cputype}; # always allow default + my $cputype = $cpu->{cputype}; + + if ($existing && $existing->{cpu}) { + # changing only other settings doesn't require permissions for CPU model + my $existingCpu = PVE::JSONSchema::check_format('pve-vm-cpu-conf', $existing->{cpu}); + return if $existingCpu->{cputype} eq $cputype; + } + + if (PVE::QemuServer::CPUConfig::is_custom_model($cputype)) { + $rpcenv->check($authuser, "/nodes", ['Sys.Audit']); + } +}; + my $cpuoptions = { 'cores' => 1, 'cpu' => 1, @@ -260,6 +284,7 @@ my $hwtypeoptions = { 'tablet' => 1, 'vga' => 1, 'watchdog' => 1, + 'audio0' => 1, }; my $generaloptions = { @@ -280,6 +305,7 @@ my $generaloptions = { 'startup' => 1, 'tdf' => 1, 'template' => 1, + 'tags' => 1, }; my $vmpoweroptions = { @@ -333,6 +359,10 @@ my $check_vm_modify_config_perm = sub { $rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.Disk']); } elsif ($cloudinitoptions->{$opt} || ($opt =~ m/^(?:net|ipconfig)\d+$/)) { $rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.Network']); + } elsif ($opt eq 'vmstate') { + # the user needs Disk and PowerMgmt privileges to change the vmstate + # also needs privileges on the storage, that will be checked later + $rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.Disk', 'VM.PowerMgmt' ]); } else { # catches hostpci\d+, args, lock, etc. # new options will be checked here @@ -392,6 +422,26 @@ __PACKAGE__->register_method({ return $res; }}); +my $parse_restore_archive = sub { + my ($storecfg, $archive) = @_; + + my ($archive_storeid, $archive_volname) = PVE::Storage::parse_volume_id($archive, 1); + + if (defined($archive_storeid)) { + my $scfg = PVE::Storage::storage_config($storecfg, $archive_storeid); + if ($scfg->{type} eq 'pbs') { + return { + type => 'pbs', + volid => $archive, + }; + } + } + my $path = PVE::Storage::abs_filesystem_path($storecfg, $archive); + return { + type => 'file', + path => $path, + }; +}; __PACKAGE__->register_method({ @@ -414,7 +464,7 @@ __PACKAGE__->register_method({ node => get_standard_option('pve-node'), vmid => get_standard_option('pve-vmid', { completion => \&PVE::Cluster::complete_next_vmid }), archive => { - description => "The backup file.", + description => "The backup archive. Either the file system path to a .tar or .vma file (use '-' to pipe data from stdin) or a proxmox storage backup volume identifier.", type => 'string', optional => 1, maxLength => 255, @@ -464,31 +514,20 @@ __PACKAGE__->register_method({ my ($param) = @_; my $rpcenv = PVE::RPCEnvironment::get(); - my $authuser = $rpcenv->get_user(); my $node = extract_param($param, 'node'); - my $vmid = extract_param($param, 'vmid'); my $archive = extract_param($param, 'archive'); my $is_restore = !!$archive; - my $storage = extract_param($param, 'storage'); - + my $bwlimit = extract_param($param, 'bwlimit'); my $force = extract_param($param, 'force'); - - my $unique = extract_param($param, 'unique'); - my $pool = extract_param($param, 'pool'); - - my $bwlimit = extract_param($param, 'bwlimit'); - my $start_after_create = extract_param($param, 'start'); - - my $filename = PVE::QemuConfig->config_file($vmid); - - my $storecfg = PVE::Storage::config(); + my $storage = extract_param($param, 'storage'); + my $unique = extract_param($param, 'unique'); if (defined(my $ssh_keys = $param->{sshkeys})) { $ssh_keys = URI::Escape::uri_unescape($ssh_keys); @@ -497,6 +536,9 @@ __PACKAGE__->register_method({ PVE::Cluster::check_cfs_quorum(); + my $filename = PVE::QemuConfig->config_file($vmid); + my $storecfg = PVE::Storage::config(); + if (defined($pool)) { $rpcenv->check_pool_exist($pool); } @@ -522,13 +564,15 @@ __PACKAGE__->register_method({ &$check_vm_modify_config_perm($rpcenv, $authuser, $vmid, $pool, [ keys %$param]); + &$check_cpu_model_access($rpcenv, $authuser, $param); + foreach my $opt (keys %$param) { if (PVE::QemuServer::is_valid_drivename($opt)) { my $drive = PVE::QemuServer::parse_drive($opt, $param->{$opt}); raise_param_exc({ $opt => "unable to parse drive options" }) if !$drive; PVE::QemuServer::cleanup_drive_path($opt, $storecfg, $drive); - $param->{$opt} = PVE::QemuServer::print_drive($vmid, $drive); + $param->{$opt} = PVE::QemuServer::print_drive($drive); } } @@ -540,9 +584,11 @@ __PACKAGE__->register_method({ if ($archive eq '-') { die "pipe requires cli environment\n" if $rpcenv->{type} ne 'cli'; + $archive = { type => 'pipe' }; } else { PVE::Storage::check_volume_access($rpcenv, $authuser, $storecfg, $vmid, $archive); - $archive = PVE::Storage::abs_filesystem_path($storecfg, $archive); + + $archive = $parse_restore_archive->($storecfg, $archive); } } @@ -559,12 +605,19 @@ __PACKAGE__->register_method({ die "$emsg vm is running\n" if PVE::QemuServer::check_running($vmid); my $realcmd = sub { - PVE::QemuServer::restore_archive($archive, $vmid, $authuser, { + my $restore_options = { storage => $storage, pool => $pool, unique => $unique, bwlimit => $bwlimit, - }); + }; + if ($archive->{type} eq 'file' || $archive->{type} eq 'pipe') { + PVE::QemuServer::restore_file_archive($archive->{path} // '-', $vmid, $authuser, $restore_options); + } elsif ($archive->{type} eq 'pbs') { + PVE::QemuServer::restore_proxmox_backup_archive($archive->{volid}, $vmid, $authuser, $restore_options); + } else { + die "unknown backup archive type\n"; + } my $restored_conf = PVE::QemuConfig->load_config($vmid); # Convert restored VM to template if backup was VM template if (PVE::QemuConfig->is_template($restored_conf)) { @@ -574,17 +627,18 @@ __PACKAGE__->register_method({ } PVE::AccessControl::add_vm_to_pool($vmid, $pool) if $pool; - - if ($start_after_create) { - eval { PVE::API2::Qemu->vm_start({ vmid => $vmid, node => $node }) }; - warn $@ if $@; - } }; # ensure no old replication state are exists PVE::ReplicationState::delete_guest_states($vmid); - return PVE::QemuConfig->lock_config_full($vmid, 1, $realcmd); + PVE::QemuConfig->lock_config_full($vmid, 1, $realcmd); + + if ($start_after_create) { + print "Execute autostart\n"; + eval { PVE::API2::Qemu->vm_start({ vmid => $vmid, node => $node }) }; + warn $@ if $@; + } }; my $createfn = sub { @@ -592,19 +646,15 @@ __PACKAGE__->register_method({ PVE::ReplicationState::delete_guest_states($vmid); my $realcmd = sub { - - my $vollist = []; - my $conf = $param; + my $arch = PVE::QemuServer::get_vm_arch($conf); - my ($arch, undef) = PVE::QemuServer::get_basic_machine_info($conf); - + my $vollist = []; eval { - $vollist = &$create_disks($rpcenv, $authuser, $conf, $arch, $storecfg, $vmid, $pool, $param, $storage); if (!$conf->{bootdisk}) { - my $firstdisk = PVE::QemuServer::resolve_first_disk($conf); + my $firstdisk = PVE::QemuServer::Drive::resolve_first_disk($conf); $conf->{bootdisk} = $firstdisk if $firstdisk; } @@ -773,7 +823,7 @@ __PACKAGE__->register_method({ code => sub { my ($param) = @_; - return PVE::Cluster::create_rrd_graph( + return PVE::RRD::create_rrd_graph( "pve2-vm/$param->{vmid}", $param->{timeframe}, $param->{ds}, $param->{cf}); @@ -816,7 +866,7 @@ __PACKAGE__->register_method({ code => sub { my ($param) = @_; - return PVE::Cluster::create_rrd_data( + return PVE::RRD::create_rrd_data( "pve2-vm/$param->{vmid}", $param->{timeframe}, $param->{cf}); }}); @@ -826,7 +876,8 @@ __PACKAGE__->register_method({ path => '{vmid}/config', method => 'GET', proxyto => 'node', - description => "Get current virtual machine configuration. This does not include pending configuration changes (see 'pending' API).", + description => "Get the virtual machine configuration with pending configuration " . + "changes applied. Set the 'current' parameter to get the current configuration instead.", permissions => { check => ['perm', '/vms/{vmid}', [ 'VM.Audit' ]], }, @@ -852,7 +903,7 @@ __PACKAGE__->register_method({ }, }, returns => { - description => "The current VM configuration.", + description => "The VM configuration.", type => "object", properties => PVE::QemuServer::json_config_properties({ digest => { @@ -864,40 +915,19 @@ __PACKAGE__->register_method({ code => sub { my ($param) = @_; - my $conf = PVE::QemuConfig->load_config($param->{vmid}); - - if (my $snapname = $param->{snapshot}) { - my $snapshot = $conf->{snapshots}->{$snapname}; - die "snapshot '$snapname' does not exist\n" if !defined($snapshot); + raise_param_exc({ snapshot => "cannot use 'snapshot' parameter with 'current'", + current => "cannot use 'snapshot' parameter with 'current'"}) + if ($param->{snapshot} && $param->{current}); - $snapshot->{digest} = $conf->{digest}; # keep file digest for API - - $conf = $snapshot; - } - - delete $conf->{snapshots}; - - if (!$param->{current}) { - foreach my $opt (keys %{$conf->{pending}}) { - next if $opt eq 'delete'; - my $value = $conf->{pending}->{$opt}; - next if ref($value); # just to be sure - $conf->{$opt} = $value; - } - my $pending_delete_hash = PVE::QemuServer::split_flagged_list($conf->{pending}->{delete}); - foreach my $opt (keys %$pending_delete_hash) { - delete $conf->{$opt} if $conf->{$opt}; - } - } - - delete $conf->{pending}; - - # hide cloudinit password - if ($conf->{cipassword}) { - $conf->{cipassword} = '**********'; + my $conf; + if ($param->{snapshot}) { + $conf = PVE::QemuConfig->load_snapshot_config($param->{vmid}, $param->{snapshot}); + } else { + $conf = PVE::QemuConfig->load_current_config($param->{vmid}, $param->{current}); } - + $conf->{cipassword} = '**********' if $conf->{cipassword}; return $conf; + }}); __PACKAGE__->register_method({ @@ -905,7 +935,7 @@ __PACKAGE__->register_method({ path => '{vmid}/pending', method => 'GET', proxyto => 'node', - description => "Get virtual machine configuration, including pending changes.", + description => "Get the virtual machine configuration with both current and pending values.", permissions => { check => ['perm', '/vms/{vmid}', [ 'VM.Audit' ]], }, @@ -951,49 +981,13 @@ __PACKAGE__->register_method({ my $conf = PVE::QemuConfig->load_config($param->{vmid}); - my $pending_delete_hash = PVE::QemuServer::split_flagged_list($conf->{pending}->{delete}); + my $pending_delete_hash = PVE::QemuConfig->parse_pending_delete($conf->{pending}->{delete}); - my $res = []; - - foreach my $opt (keys %$conf) { - next if ref($conf->{$opt}); - my $item = { key => $opt }; - $item->{value} = $conf->{$opt} if defined($conf->{$opt}); - $item->{pending} = $conf->{pending}->{$opt} if defined($conf->{pending}->{$opt}); - $item->{delete} = ($pending_delete_hash->{$opt} ? 2 : 1) if exists $pending_delete_hash->{$opt}; - - # hide cloudinit password - if ($opt eq 'cipassword') { - $item->{value} = '**********' if defined($item->{value}); - # the trailing space so that the pending string is different - $item->{pending} = '********** ' if defined($item->{pending}); - } - push @$res, $item; - } - - foreach my $opt (keys %{$conf->{pending}}) { - next if $opt eq 'delete'; - next if ref($conf->{pending}->{$opt}); # just to be sure - next if defined($conf->{$opt}); - my $item = { key => $opt }; - $item->{pending} = $conf->{pending}->{$opt}; + $conf->{cipassword} = '**********' if defined($conf->{cipassword}); + $conf->{pending}->{cipassword} = '********** ' if defined($conf->{pending}->{cipassword}); - # hide cloudinit password - if ($opt eq 'cipassword') { - $item->{pending} = '**********' if defined($item->{pending}); - } - push @$res, $item; - } - - while (my ($opt, $force) = each %$pending_delete_hash) { - next if $conf->{pending}->{$opt}; # just to be sure - next if $conf->{$opt}; - my $item = { key => $opt, delete => ($force ? 2 : 1)}; - push @$res, $item; - } - - return $res; - }}); + return PVE::GuestHelpers::config_with_pending_array($conf, $pending_delete_hash); + }}); # POST/PUT {vmid}/config implementation # @@ -1099,12 +1093,15 @@ my $update_vm_api = sub { my $volid = $drive->{file}; return if !$volid || !($drive->{replicate}//1); return if PVE::QemuServer::drive_is_cdrom($drive); - my ($storeid, $format); + + my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid, 1); + return if defined($volname) && $volname eq 'cloudinit'; + + my $format; if ($volid =~ $NEW_DISK_RE) { $storeid = $2; $format = $drive->{format} || PVE::Storage::storage_default_format($storecfg, $storeid); } else { - ($storeid, undef) = PVE::Storage::parse_volume_id($volid, 1); $format = (PVE::Storage::parse_volname($storecfg, $volid))[6]; } return if PVE::Storage::storage_can_replicate($storecfg, $storeid, $format); @@ -1120,7 +1117,7 @@ my $update_vm_api = sub { raise_param_exc({ $opt => "unable to parse drive options" }) if !$drive; PVE::QemuServer::cleanup_drive_path($opt, $storecfg, $drive); $check_replication->($drive); - $param->{$opt} = PVE::QemuServer::print_drive($vmid, $drive); + $param->{$opt} = PVE::QemuServer::print_drive($drive); } elsif ($opt =~ m/^net(\d+)$/) { # add macaddr my $net = PVE::QemuServer::parse_net($param->{$opt}); @@ -1148,6 +1145,18 @@ my $update_vm_api = sub { die "checksum missmatch (file change by other user?)\n" if $digest && $digest ne $conf->{digest}; + &$check_cpu_model_access($rpcenv, $authuser, $param, $conf); + + # FIXME: 'suspended' lock should probabyl be a state or "weak" lock?! + if (scalar(@delete) && grep { $_ eq 'vmstate'} @delete) { + if (defined($conf->{lock}) && $conf->{lock} eq 'suspended') { + delete $conf->{lock}; # for check lock check, not written out + push @delete, 'lock'; # this is the real deal to write it out + } + push @delete, 'runningmachine' if $conf->{runningmachine}; + push @delete, 'runningcpu' if $conf->{runningcpu}; + } + PVE::QemuConfig->check_lock($conf) if !$skiplock; foreach my $opt (keys %$revert) { @@ -1179,45 +1188,56 @@ my $update_vm_api = sub { foreach my $opt (@delete) { $modified->{$opt} = 1; $conf = PVE::QemuConfig->load_config($vmid); # update/reload - if (!defined($conf->{$opt}) && !defined($conf->{pending}->{$opt})) { + + # value of what we want to delete, independent if pending or not + my $val = $conf->{$opt} // $conf->{pending}->{$opt}; + if (!defined($val)) { warn "cannot delete '$opt' - not set in current configuration!\n"; $modified->{$opt} = 0; next; } + my $is_pending_val = defined($conf->{pending}->{$opt}); + delete $conf->{pending}->{$opt}; if ($opt =~ m/^unused/) { - my $drive = PVE::QemuServer::parse_drive($opt, $conf->{$opt}); + my $drive = PVE::QemuServer::parse_drive($opt, $val); PVE::QemuConfig->check_protection($conf, "can't remove unused disk '$drive->{file}'"); $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.Disk']); if (PVE::QemuServer::try_deallocate_drive($storecfg, $vmid, $conf, $opt, $drive, $rpcenv, $authuser)) { delete $conf->{$opt}; PVE::QemuConfig->write_config($vmid, $conf); } + } elsif ($opt eq 'vmstate') { + PVE::QemuConfig->check_protection($conf, "can't remove vmstate '$val'"); + if (PVE::QemuServer::try_deallocate_drive($storecfg, $vmid, $conf, $opt, { file => $val }, $rpcenv, $authuser, 1)) { + delete $conf->{$opt}; + PVE::QemuConfig->write_config($vmid, $conf); + } } elsif (PVE::QemuServer::is_valid_drivename($opt)) { PVE::QemuConfig->check_protection($conf, "can't remove drive '$opt'"); $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.Disk']); - PVE::QemuServer::vmconfig_register_unused_drive($storecfg, $vmid, $conf, PVE::QemuServer::parse_drive($opt, $conf->{pending}->{$opt})) - if defined($conf->{pending}->{$opt}); - PVE::QemuServer::vmconfig_delete_pending_option($conf, $opt, $force); + PVE::QemuServer::vmconfig_register_unused_drive($storecfg, $vmid, $conf, PVE::QemuServer::parse_drive($opt, $val)) + if $is_pending_val; + PVE::QemuConfig->add_to_pending_delete($conf, $opt, $force); PVE::QemuConfig->write_config($vmid, $conf); } elsif ($opt =~ m/^serial\d+$/) { - if ($conf->{$opt} eq 'socket') { + if ($val eq 'socket') { $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.HWType']); } elsif ($authuser ne 'root@pam') { die "only root can delete '$opt' config for real devices\n"; } - PVE::QemuServer::vmconfig_delete_pending_option($conf, $opt, $force); + PVE::QemuConfig->add_to_pending_delete($conf, $opt, $force); PVE::QemuConfig->write_config($vmid, $conf); } elsif ($opt =~ m/^usb\d+$/) { - if ($conf->{$opt} =~ m/spice/) { + if ($val =~ m/spice/) { $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.HWType']); } elsif ($authuser ne 'root@pam') { die "only root can delete '$opt' config for real devices\n"; } - PVE::QemuServer::vmconfig_delete_pending_option($conf, $opt, $force); + PVE::QemuConfig->add_to_pending_delete($conf, $opt, $force); PVE::QemuConfig->write_config($vmid, $conf); } else { - PVE::QemuServer::vmconfig_delete_pending_option($conf, $opt, $force); + PVE::QemuConfig->add_to_pending_delete($conf, $opt, $force); PVE::QemuConfig->write_config($vmid, $conf); } } @@ -1227,7 +1247,7 @@ my $update_vm_api = sub { $conf = PVE::QemuConfig->load_config($vmid); # update/reload next if defined($conf->{pending}->{$opt}) && ($param->{$opt} eq $conf->{pending}->{$opt}); # skip if nothing changed - my ($arch, undef) = PVE::QemuServer::get_basic_machine_info($conf); + my $arch = PVE::QemuServer::get_vm_arch($conf); if (PVE::QemuServer::is_valid_drivename($opt)) { my $drive = PVE::QemuServer::parse_drive($opt, $param->{$opt}); @@ -1258,13 +1278,13 @@ my $update_vm_api = sub { } else { $conf->{pending}->{$opt} = $param->{$opt}; } - PVE::QemuServer::vmconfig_undelete_pending_option($conf, $opt); + PVE::QemuConfig->remove_from_pending_delete($conf, $opt); PVE::QemuConfig->write_config($vmid, $conf); } # remove pending changes when nothing changed $conf = PVE::QemuConfig->load_config($vmid); # update/reload - my $changes = PVE::QemuServer::vmconfig_cleanup_pending($conf); + my $changes = PVE::QemuConfig->cleanup_pending($conf); PVE::QemuConfig->write_config($vmid, $conf) if $changes; return if !scalar(keys %{$conf->{pending}}); @@ -1275,13 +1295,13 @@ my $update_vm_api = sub { $conf = PVE::QemuConfig->load_config($vmid); # update/reload + my $errors = {}; if ($running) { - my $errors = {}; PVE::QemuServer::vmconfig_hotplug_pending($vmid, $conf, $storecfg, $modified, $errors); - raise_param_exc($errors) if scalar(keys %$errors); } else { - PVE::QemuServer::vmconfig_apply_pending($vmid, $conf, $storecfg, $running); + PVE::QemuServer::vmconfig_apply_pending($vmid, $conf, $storecfg, $running, $errors); } + raise_param_exc($errors) if scalar(keys %$errors); return; }; @@ -1436,7 +1456,6 @@ __PACKAGE__->register_method({ } }); - __PACKAGE__->register_method({ name => 'destroy_vm', path => '{vmid}', @@ -1453,6 +1472,11 @@ __PACKAGE__->register_method({ node => get_standard_option('pve-node'), vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid_stopped }), skiplock => get_standard_option('skiplock'), + purge => { + type => 'boolean', + description => "Remove vmid from backup cron jobs.", + optional => 1, + }, }, }, returns => { @@ -1462,43 +1486,64 @@ __PACKAGE__->register_method({ my ($param) = @_; my $rpcenv = PVE::RPCEnvironment::get(); - my $authuser = $rpcenv->get_user(); - my $vmid = $param->{vmid}; my $skiplock = $param->{skiplock}; raise_param_exc({ skiplock => "Only root may use this option." }) if $skiplock && $authuser ne 'root@pam'; - # test if VM exists - my $conf = PVE::QemuConfig->load_config($vmid); + my $early_checks = sub { + # test if VM exists + my $conf = PVE::QemuConfig->load_config($vmid); + PVE::QemuConfig->check_protection($conf, "can't remove VM $vmid"); - my $storecfg = PVE::Storage::config(); + my $ha_managed = PVE::HA::Config::service_is_configured("vm:$vmid"); - PVE::QemuConfig->check_protection($conf, "can't remove VM $vmid"); + if (!$param->{purge}) { + die "unable to remove VM $vmid - used in HA resources and purge parameter not set.\n" + if $ha_managed; + # don't allow destroy if with replication jobs but no purge param + my $repl_conf = PVE::ReplicationConfig->new(); + $repl_conf->check_for_existing_jobs($vmid); + } - die "unable to remove VM $vmid - used in HA resources\n" - if PVE::HA::Config::vm_is_ha_managed($vmid); + die "VM $vmid is running - destroy failed\n" + if PVE::QemuServer::check_running($vmid); - # do not allow destroy if there are replication jobs - my $repl_conf = PVE::ReplicationConfig->new(); - $repl_conf->check_for_existing_jobs($vmid); + return $ha_managed; + }; - # early tests (repeat after locking) - die "VM $vmid is running - destroy failed\n" - if PVE::QemuServer::check_running($vmid); + $early_checks->(); my $realcmd = sub { my $upid = shift; + my $storecfg = PVE::Storage::config(); + syslog('info', "destroy VM $vmid: $upid\n"); + PVE::QemuConfig->lock_config($vmid, sub { + # repeat, config might have changed + my $ha_managed = $early_checks->(); + + PVE::QemuServer::destroy_vm($storecfg, $vmid, $skiplock, { lock => 'destroyed' }); - PVE::QemuServer::vm_destroy($storecfg, $vmid, $skiplock); + PVE::AccessControl::remove_vm_access($vmid); + PVE::Firewall::remove_vmfw_conf($vmid); + if ($param->{purge}) { + print "purging VM $vmid from related configurations..\n"; + PVE::ReplicationConfig::remove_vmid_jobs($vmid); + PVE::VZDump::Plugin::remove_vmid_from_backup_jobs($vmid); - PVE::AccessControl::remove_vm_access($vmid); + if ($ha_managed) { + PVE::HA::Config::delete_service_from_config("vm:$vmid"); + print "NOTE: removed VM $vmid from HA resource configuration.\n"; + } + } - PVE::Firewall::remove_vmfw_conf($vmid); + # only now remove the zombie config, else we can have reuse race + PVE::QemuConfig->destroy_config($vmid); + }); }; return $rpcenv->fork_worker('qmdestroy', $vmid, $authuser, $realcmd); @@ -1600,9 +1645,9 @@ __PACKAGE__->register_method({ if ($node ne 'localhost' && $node ne PVE::INotify::nodename()) { (undef, $family) = PVE::Cluster::remote_node_ip($node); - my $sshinfo = PVE::Cluster::get_ssh_info($node); + my $sshinfo = PVE::SSHInfo::get_ssh_info($node); # NOTE: kvm VNC traffic is already TLS encrypted or is known unsecure - $remcmd = PVE::Cluster::ssh_info_to_command($sshinfo, $use_serial ? '-t' : '-T'); + $remcmd = PVE::SSHInfo::ssh_info_to_command($sshinfo, $use_serial ? '-t' : '-T'); } else { $family = PVE::Tools::get_host_address_family($node); } @@ -1740,8 +1785,8 @@ __PACKAGE__->register_method({ if ($node ne 'localhost' && $node ne PVE::INotify::nodename()) { (undef, $family) = PVE::Cluster::remote_node_ip($node); - my $sshinfo = PVE::Cluster::get_ssh_info($node); - $remcmd = PVE::Cluster::ssh_info_to_command($sshinfo, '-t'); + my $sshinfo = PVE::SSHInfo::get_ssh_info($node); + $remcmd = PVE::SSHInfo::ssh_info_to_command($sshinfo, '-t'); push @$remcmd, '--'; } else { $family = PVE::Tools::get_host_address_family($node); @@ -1873,8 +1918,8 @@ __PACKAGE__->register_method({ my ($ticket, undef, $remote_viewer_config) = PVE::AccessControl::remote_viewer_config($authuser, $vmid, $node, $proxy, $title, $port); - PVE::QemuServer::vm_mon_cmd($vmid, "set_password", protocol => 'spice', password => $ticket); - PVE::QemuServer::vm_mon_cmd($vmid, "expire_password", protocol => 'spice', time => "+30"); + mon_cmd($vmid, "set_password", protocol => 'spice', password => $ticket); + mon_cmd($vmid, "expire_password", protocol => 'spice', time => "+30"); return $remote_viewer_config; }}); @@ -1915,6 +1960,10 @@ __PACKAGE__->register_method({ { subdir => 'current' }, { subdir => 'start' }, { subdir => 'stop' }, + { subdir => 'reset' }, + { subdir => 'shutdown' }, + { subdir => 'suspend' }, + { subdir => 'reboot' }, ]; return $res; @@ -2006,12 +2055,20 @@ __PACKAGE__->register_method({ description => "CIDR of the (sub) network that is used for migration.", optional => 1, }, - machine => get_standard_option('pve-qm-machine'), - targetstorage => { - description => "Target storage for the migration. (Can be '1' to use the same storage id as on the source node.)", + machine => get_standard_option('pve-qemu-machine'), + 'force-cpu' => { + description => "Override QEMU's -cpu argument with the given string.", type => 'string', - optional => 1 - } + optional => 1, + }, + targetstorage => get_standard_option('pve-targetstorage'), + timeout => { + description => "Wait maximal timeout seconds.", + type => 'integer', + minimum => 0, + default => 'max(30, vm memory in GiB)', + optional => 1, + }, }, }, returns => { @@ -2025,42 +2082,52 @@ __PACKAGE__->register_method({ my $node = extract_param($param, 'node'); my $vmid = extract_param($param, 'vmid'); + my $timeout = extract_param($param, 'timeout'); my $machine = extract_param($param, 'machine'); + my $force_cpu = extract_param($param, 'force-cpu'); - my $stateuri = extract_param($param, 'stateuri'); - raise_param_exc({ stateuri => "Only root may use this option." }) - if $stateuri && $authuser ne 'root@pam'; - - my $skiplock = extract_param($param, 'skiplock'); - raise_param_exc({ skiplock => "Only root may use this option." }) - if $skiplock && $authuser ne 'root@pam'; - - my $migratedfrom = extract_param($param, 'migratedfrom'); - raise_param_exc({ migratedfrom => "Only root may use this option." }) - if $migratedfrom && $authuser ne 'root@pam'; - - my $migration_type = extract_param($param, 'migration_type'); - raise_param_exc({ migration_type => "Only root may use this option." }) - if $migration_type && $authuser ne 'root@pam'; - - my $migration_network = extract_param($param, 'migration_network'); - raise_param_exc({ migration_network => "Only root may use this option." }) - if $migration_network && $authuser ne 'root@pam'; - - my $targetstorage = extract_param($param, 'targetstorage'); - raise_param_exc({ targetstorage => "Only root may use this option." }) - if $targetstorage && $authuser ne 'root@pam'; + my $get_root_param = sub { + my $value = extract_param($param, $_[0]); + raise_param_exc({ "$_[0]" => "Only root may use this option." }) + if $value && $authuser ne 'root@pam'; + return $value; + }; - raise_param_exc({ targetstorage => "targetstorage can only by used with migratedfrom." }) - if $targetstorage && !$migratedfrom; + my $stateuri = $get_root_param->('stateuri'); + my $skiplock = $get_root_param->('skiplock'); + my $migratedfrom = $get_root_param->('migratedfrom'); + my $migration_type = $get_root_param->('migration_type'); + my $migration_network = $get_root_param->('migration_network'); + my $targetstorage = $get_root_param->('targetstorage'); + + my $storagemap; + + if ($targetstorage) { + raise_param_exc({ targetstorage => "targetstorage can only by used with migratedfrom." }) + if !$migratedfrom; + $storagemap = eval { PVE::JSONSchema::parse_idmap($targetstorage, 'pve-storage-id') }; + raise_param_exc({ targetstorage => "failed to parse storage map: $@" }) + if $@; + } # read spice ticket from STDIN my $spice_ticket; - if ($stateuri && ($stateuri eq 'tcp') && $migratedfrom && ($rpcenv->{type} eq 'cli')) { - if (defined(my $line = )) { + my $nbd_protocol_version = 0; + my $replicated_volumes = {}; + if ($stateuri && ($stateuri eq 'tcp' || $stateuri eq 'unix') && $migratedfrom && ($rpcenv->{type} eq 'cli')) { + while (defined(my $line = )) { chomp $line; - $spice_ticket = $line; + if ($line =~ m/^spice_ticket: (.+)$/) { + $spice_ticket = $1; + } elsif ($line =~ m/^nbd_protocol_version: (\d+)$/) { + $nbd_protocol_version = $1; + } elsif ($line =~ m/^replicated_volume: (.*)$/) { + $replicated_volumes->{$1} = 1; + } else { + # fallback for old source node + $spice_ticket = $line; + } } } @@ -2088,8 +2155,25 @@ __PACKAGE__->register_method({ syslog('info', "start VM $vmid: $upid\n"); - PVE::QemuServer::vm_start($storecfg, $vmid, $stateuri, $skiplock, $migratedfrom, undef, - $machine, $spice_ticket, $migration_network, $migration_type, $targetstorage); + my $migrate_opts = { + migratedfrom => $migratedfrom, + spice_ticket => $spice_ticket, + network => $migration_network, + type => $migration_type, + storagemap => $storagemap, + nbd_proto_version => $nbd_protocol_version, + replicated_volumes => $replicated_volumes, + }; + + my $params = { + statefile => $stateuri, + skiplock => $skiplock, + forcemachine => $machine, + timeout => $timeout, + forcecpu => $force_cpu, + }; + + PVE::QemuServer::vm_start($storecfg, $vmid, $params, $migrate_opts); return; }; @@ -2164,7 +2248,7 @@ __PACKAGE__->register_method({ print "Requesting HA stop for VM $vmid\n"; - my $cmd = ['ha-manager', 'set', "vm:$vmid", '--state', 'stopped']; + my $cmd = ['ha-manager', 'crm-command', 'stop', "vm:$vmid", '0']; PVE::Tools::run_command($cmd); return; }; @@ -2305,7 +2389,8 @@ __PACKAGE__->register_method({ # checking the qmp status here to get feedback to the gui/cli/api # and the status query should not take too long my $qmpstatus = eval { - PVE::QemuServer::vm_qmp_command($vmid, { execute => "query-status" }, 0); + PVE::QemuConfig::assert_config_exists_on_node($vmid); + mon_cmd($vmid, "query-status"); }; my $err = $@ if $@; @@ -2320,12 +2405,13 @@ __PACKAGE__->register_method({ if (PVE::HA::Config::vm_is_ha_managed($vmid) && $rpcenv->{type} ne 'ha') { + my $timeout = $param->{timeout} // 60; my $hacmd = sub { my $upid = shift; print "Requesting HA stop for VM $vmid\n"; - my $cmd = ['ha-manager', 'set', "vm:$vmid", '--state', 'stopped']; + my $cmd = ['ha-manager', 'crm-command', 'stop', "vm:$vmid", "$timeout"]; PVE::Tools::run_command($cmd); return; }; @@ -2348,6 +2434,65 @@ __PACKAGE__->register_method({ } }}); +__PACKAGE__->register_method({ + name => 'vm_reboot', + path => '{vmid}/status/reboot', + method => 'POST', + protected => 1, + proxyto => 'node', + description => "Reboot the VM by shutting it down, and starting it again. Applies pending changes.", + permissions => { + check => ['perm', '/vms/{vmid}', [ 'VM.PowerMgmt' ]], + }, + parameters => { + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + vmid => get_standard_option('pve-vmid', + { completion => \&PVE::QemuServer::complete_vmid_running }), + timeout => { + description => "Wait maximal timeout seconds for the shutdown.", + type => 'integer', + minimum => 0, + optional => 1, + }, + }, + }, + returns => { + type => 'string', + }, + code => sub { + my ($param) = @_; + + my $rpcenv = PVE::RPCEnvironment::get(); + my $authuser = $rpcenv->get_user(); + + my $node = extract_param($param, 'node'); + my $vmid = extract_param($param, 'vmid'); + + my $qmpstatus = eval { + PVE::QemuConfig::assert_config_exists_on_node($vmid); + mon_cmd($vmid, "query-status"); + }; + my $err = $@ if $@; + + if (!$err && $qmpstatus->{status} eq "paused") { + die "VM is paused - cannot shutdown\n"; + } + + die "VM $vmid not running\n" if !PVE::QemuServer::check_running($vmid); + + my $realcmd = sub { + my $upid = shift; + + syslog('info', "requesting reboot of VM $vmid: $upid\n"); + PVE::QemuServer::vm_reboot($vmid, $param->{timeout}); + return; + }; + + return $rpcenv->fork_worker('qmreboot', $vmid, $authuser, $realcmd); + }}); + __PACKAGE__->register_method({ name => 'vm_suspend', path => '{vmid}/status/suspend', @@ -2356,6 +2501,9 @@ __PACKAGE__->register_method({ proxyto => 'node', description => "Suspend virtual machine.", permissions => { + description => "You need 'VM.PowerMgmt' on /vms/{vmid}, and if you have set 'todisk',". + " you need also 'VM.Config.Disk' on /vms/{vmid} and 'Datastore.AllocateSpace'". + " on the storage for the vmstate.", check => ['perm', '/vms/{vmid}', [ 'VM.PowerMgmt' ]], }, parameters => { @@ -2404,6 +2552,20 @@ __PACKAGE__->register_method({ die "Cannot suspend HA managed VM to disk\n" if $todisk && PVE::HA::Config::vm_is_ha_managed($vmid); + # early check for storage permission, for better user feedback + if ($todisk) { + $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.Disk']); + + if (!$statestorage) { + # get statestorage from config if none is given + my $conf = PVE::QemuConfig->load_config($vmid); + my $storecfg = PVE::Storage::config(); + $statestorage = PVE::QemuServer::find_vmstate_storage($conf, $storecfg); + } + + $rpcenv->check($authuser, "/storage/$statestorage", ['Datastore.AllocateSpace']); + } + my $realcmd = sub { my $upid = shift; @@ -2458,6 +2620,8 @@ __PACKAGE__->register_method({ if $skiplock && $authuser ne 'root@pam'; my $nocheck = extract_param($param, 'nocheck'); + raise_param_exc({ nocheck => "Only root may use this option." }) + if $nocheck && $authuser ne 'root@pam'; my $to_disk_suspended; eval { @@ -2479,7 +2643,7 @@ __PACKAGE__->register_method({ PVE::QemuServer::vm_resume($vmid, $skiplock, $nocheck); } else { my $storecfg = PVE::Storage::config(); - PVE::QemuServer::vm_start($storecfg, $vmid, undef, $skiplock); + PVE::QemuServer::vm_start($storecfg, $vmid, { skiplock => $skiplock }); } return; @@ -2680,32 +2844,24 @@ __PACKAGE__->register_method({ my ($param) = @_; my $rpcenv = PVE::RPCEnvironment::get(); - - my $authuser = $rpcenv->get_user(); + my $authuser = $rpcenv->get_user(); my $node = extract_param($param, 'node'); - my $vmid = extract_param($param, 'vmid'); - my $newid = extract_param($param, 'newid'); - my $pool = extract_param($param, 'pool'); - - if (defined($pool)) { - $rpcenv->check_pool_exist($pool); - } + $rpcenv->check_pool_exist($pool) if defined($pool); my $snapname = extract_param($param, 'snapname'); - my $storage = extract_param($param, 'storage'); - my $format = extract_param($param, 'format'); - my $target = extract_param($param, 'target'); my $localnode = PVE::INotify::nodename(); - undef $target if $target && ($target eq $localnode || $target eq 'localhost'); + if ($target && ($target eq $localnode || $target eq 'localhost')) { + undef $target; + } PVE::Cluster::check_node_exists($target) if $target; @@ -2723,33 +2879,23 @@ __PACKAGE__->register_method({ } } - PVE::Cluster::check_cfs_quorum(); + PVE::Cluster::check_cfs_quorum(); my $running = PVE::QemuServer::check_running($vmid) || 0; - # exclusive lock if VM is running - else shared lock is enough; - my $shared_lock = $running ? 0 : 1; - my $clonefn = sub { - - # do all tests after lock - # we also try to do all tests before we fork the worker + # do all tests after lock but before forking worker - if possible my $conf = PVE::QemuConfig->load_config($vmid); - PVE::QemuConfig->check_lock($conf); my $verify_running = PVE::QemuServer::check_running($vmid) || 0; - die "unexpected state change\n" if $verify_running != $running; die "snapshot '$snapname' does not exist\n" if $snapname && !defined( $conf->{snapshots}->{$snapname}); - my $full = extract_param($param, 'full'); - if (!defined($full)) { - $full = !PVE::QemuConfig->is_template($conf); - } + my $full = extract_param($param, 'full') // !PVE::QemuConfig->is_template($conf); die "parameter 'storage' not allowed for linked clones\n" if defined($storage) && !$full; @@ -2761,10 +2907,10 @@ __PACKAGE__->register_method({ my $sharedvm = &$check_storage_access_clone($rpcenv, $authuser, $storecfg, $oldconf, $storage); - die "can't clone VM to node '$target' (VM uses local storage)\n" if $target && !$sharedvm; + die "can't clone VM to node '$target' (VM uses local storage)\n" + if $target && !$sharedvm; my $conffile = PVE::QemuConfig->config_file($newid); - die "unable to create VM $newid: config file already exists\n" if -f $conffile; @@ -2817,8 +2963,7 @@ __PACKAGE__->register_method({ my $smbios1 = PVE::QemuServer::parse_smbios1($newconf->{smbios1} || ''); $smbios1->{uuid} = PVE::QemuServer::generate_uuid(); $newconf->{smbios1} = PVE::QemuServer::print_smbios1($smbios1); - - # auto generate a new vmgenid if the option was set + # auto generate a new vmgenid only if the option was set for template if ($newconf->{vmgenid}) { $newconf->{vmgenid} = PVE::QemuServer::generate_uuid(); } @@ -2828,11 +2973,7 @@ __PACKAGE__->register_method({ if ($param->{name}) { $newconf->{name} = $param->{name}; } else { - if ($oldconf->{name}) { - $newconf->{name} = "Copy-of-$oldconf->{name}"; - } else { - $newconf->{name} = "Copy-of-VM-$vmid"; - } + $newconf->{name} = "Copy-of-VM-" . ($oldconf->{name} // $vmid); } if ($param->{description}) { @@ -2840,6 +2981,7 @@ __PACKAGE__->register_method({ } # create empty/temp config - this fails if VM already exists on other node + # FIXME use PVE::QemuConfig->create_and_lock_config and adapt code PVE::Tools::file_set_contents($conffile, "# qmclone temporary file\nlock: clone\n"); my $realcmd = sub { @@ -2864,6 +3006,7 @@ __PACKAGE__->register_method({ foreach my $opt (keys %$drives) { my $drive = $drives->{$opt}; my $skipcomplete = ($total_jobs != $i); # finish after last drive + my $completion = $skipcomplete ? 'skip' : 'complete'; my $src_sid = PVE::Storage::parse_volume_id($drive->{file}); my $storage_list = [ $src_sid ]; @@ -2872,9 +3015,9 @@ __PACKAGE__->register_method({ my $newdrive = PVE::QemuServer::clone_disk($storecfg, $vmid, $running, $opt, $drive, $snapname, $newid, $storage, $format, $fullclone->{$opt}, $newvollist, - $jobs, $skipcomplete, $oldconf->{agent}, $clonelimit); + $jobs, $completion, $oldconf->{agent}, $clonelimit, $oldconf); - $newconf->{$opt} = PVE::QemuServer::print_drive($vmid, $newdrive); + $newconf->{$opt} = PVE::QemuServer::print_drive($newdrive); PVE::QemuConfig->write_config($newid, $newconf); $i++; @@ -2904,16 +3047,18 @@ __PACKAGE__->register_method({ PVE::AccessControl::add_vm_to_pool($newid, $pool) if $pool; }; if (my $err = $@) { - unlink $conffile; - eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $jobs) }; - sleep 1; # some storage like rbd need to wait before release volume - really? foreach my $volid (@$newvollist) { eval { PVE::Storage::vdisk_free($storecfg, $volid); }; warn $@ if $@; } + + PVE::Firewall::remove_vmfw_conf($newid); + + unlink $conffile; # avoid races -> last thing before die + die "clone failed: $err"; } @@ -2925,11 +3070,17 @@ __PACKAGE__->register_method({ return $rpcenv->fork_worker('qmclone', $vmid, $authuser, $realcmd); }; - return PVE::QemuConfig->lock_config_mode($vmid, 1, $shared_lock, sub { - # Aquire exclusive lock lock for $newid + # Aquire exclusive lock lock for $newid + my $lock_target_vm = sub { return PVE::QemuConfig->lock_config_full($newid, 1, $clonefn); - }); + }; + # exclusive lock if VM is running - else shared lock is enough; + if ($running) { + return PVE::QemuConfig->lock_config_full($vmid, 1, $lock_target_vm); + } else { + return PVE::QemuConfig->lock_config_shared($vmid, 1, $lock_target_vm); + } }}); __PACKAGE__->register_method({ @@ -2954,7 +3105,7 @@ __PACKAGE__->register_method({ disk => { type => 'string', description => "The disk you want to move.", - enum => [ PVE::QemuServer::valid_drive_names() ], + enum => [PVE::QemuServer::Drive::valid_drive_names()], }, storage => get_standard_option('pve-storage-id', { description => "Target storage.", @@ -2995,51 +3146,43 @@ __PACKAGE__->register_method({ my ($param) = @_; my $rpcenv = PVE::RPCEnvironment::get(); - my $authuser = $rpcenv->get_user(); my $node = extract_param($param, 'node'); - my $vmid = extract_param($param, 'vmid'); - my $digest = extract_param($param, 'digest'); - my $disk = extract_param($param, 'disk'); - my $storeid = extract_param($param, 'storage'); - my $format = extract_param($param, 'format'); my $storecfg = PVE::Storage::config(); my $updatefn = sub { - my $conf = PVE::QemuConfig->load_config($vmid); - PVE::QemuConfig->check_lock($conf); - die "checksum missmatch (file change by other user?)\n" + die "VM config checksum missmatch (file change by other user?)\n" if $digest && $digest ne $conf->{digest}; die "disk '$disk' does not exist\n" if !$conf->{$disk}; my $drive = PVE::QemuServer::parse_drive($disk, $conf->{$disk}); - my $old_volid = $drive->{file} || die "disk '$disk' has no associated volume\n"; - + die "disk '$disk' has no associated volume\n" if !$drive->{file}; die "you can't move a cdrom\n" if PVE::QemuServer::drive_is_cdrom($drive, 1); + my $old_volid = $drive->{file}; my $oldfmt; my ($oldstoreid, $oldvolname) = PVE::Storage::parse_volume_id($old_volid); if ($oldvolname =~ m/\.(raw|qcow2|vmdk)$/){ $oldfmt = $1; } - die "you can't move on the same storage with same format\n" if $oldstoreid eq $storeid && + die "you can't move to the same storage with same format\n" if $oldstoreid eq $storeid && (!$format || !$oldfmt || $oldfmt eq $format); # this only checks snapshots because $disk is passed! - my $snapshotted = PVE::QemuServer::is_volume_in_use($storecfg, $conf, $disk, $old_volid); + my $snapshotted = PVE::QemuServer::Drive::is_volume_in_use($storecfg, $conf, $disk, $old_volid); die "you can't move a disk with snapshots and delete the source\n" if $snapshotted && $param->{delete}; @@ -3050,7 +3193,6 @@ __PACKAGE__->register_method({ PVE::Storage::activate_volumes($storecfg, [ $drive->{file} ]); my $realcmd = sub { - my $newvollist = []; eval { @@ -3066,9 +3208,9 @@ __PACKAGE__->register_method({ my $movelimit = PVE::Storage::get_bandwidth_limit('move', [$oldstoreid, $storeid], $bwlimit); my $newdrive = PVE::QemuServer::clone_disk($storecfg, $vmid, $running, $disk, $drive, undef, - $vmid, $storeid, $format, 1, $newvollist, undef, undef, undef, $movelimit); + $vmid, $storeid, $format, 1, $newvollist, undef, undef, undef, $movelimit, $conf); - $conf->{$disk} = PVE::QemuServer::print_drive($vmid, $newdrive); + $conf->{$disk} = PVE::QemuServer::print_drive($newdrive); PVE::QemuConfig->add_unused_volume($conf, $old_volid) if !$param->{delete}; @@ -3078,8 +3220,9 @@ __PACKAGE__->register_method({ PVE::QemuConfig->write_config($vmid, $conf); - if ($running && PVE::QemuServer::parse_guest_agent($conf)->{fstrim_cloned_disks} && PVE::QemuServer::qga_check_running($vmid)) { - eval { PVE::QemuServer::vm_mon_cmd($vmid, "guest-fstrim"); }; + my $do_trim = PVE::QemuServer::parse_guest_agent($conf)->{fstrim_cloned_disks}; + if ($running && $do_trim && PVE::QemuServer::qga_check_running($vmid)) { + eval { mon_cmd($vmid, "guest-fstrim") }; } eval { @@ -3090,11 +3233,10 @@ __PACKAGE__->register_method({ warn $@ if $@; }; if (my $err = $@) { - - foreach my $volid (@$newvollist) { - eval { PVE::Storage::vdisk_free($storecfg, $volid); }; - warn $@ if $@; - } + foreach my $volid (@$newvollist) { + eval { PVE::Storage::vdisk_free($storecfg, $volid) }; + warn $@ if $@; + } die "storage migration failed: $err"; } @@ -3113,6 +3255,138 @@ __PACKAGE__->register_method({ return PVE::QemuConfig->lock_config($vmid, $updatefn); }}); +my $check_vm_disks_local = sub { + my ($storecfg, $vmconf, $vmid) = @_; + + my $local_disks = {}; + + # add some more information to the disks e.g. cdrom + PVE::QemuServer::foreach_volid($vmconf, sub { + my ($volid, $attr) = @_; + + my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid, 1); + if ($storeid) { + my $scfg = PVE::Storage::storage_config($storecfg, $storeid); + return if $scfg->{shared}; + } + # The shared attr here is just a special case where the vdisk + # is marked as shared manually + return if $attr->{shared}; + return if $attr->{cdrom} and $volid eq "none"; + + if (exists $local_disks->{$volid}) { + @{$local_disks->{$volid}}{keys %$attr} = values %$attr + } else { + $local_disks->{$volid} = $attr; + # ensure volid is present in case it's needed + $local_disks->{$volid}->{volid} = $volid; + } + }); + + return $local_disks; +}; + +__PACKAGE__->register_method({ + name => 'migrate_vm_precondition', + path => '{vmid}/migrate', + method => 'GET', + protected => 1, + proxyto => 'node', + description => "Get preconditions for migration.", + permissions => { + check => ['perm', '/vms/{vmid}', [ 'VM.Migrate' ]], + }, + parameters => { + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }), + target => get_standard_option('pve-node', { + description => "Target node.", + completion => \&PVE::Cluster::complete_migration_target, + optional => 1, + }), + }, + }, + returns => { + type => "object", + properties => { + running => { type => 'boolean' }, + allowed_nodes => { + type => 'array', + optional => 1, + description => "List nodes allowed for offline migration, only passed if VM is offline" + }, + not_allowed_nodes => { + type => 'object', + optional => 1, + description => "List not allowed nodes with additional informations, only passed if VM is offline" + }, + local_disks => { + type => 'array', + description => "List local disks including CD-Rom, unsused and not referenced disks" + }, + local_resources => { + type => 'array', + description => "List local resources e.g. pci, usb" + } + }, + }, + code => sub { + my ($param) = @_; + + my $rpcenv = PVE::RPCEnvironment::get(); + + my $authuser = $rpcenv->get_user(); + + PVE::Cluster::check_cfs_quorum(); + + my $res = {}; + + my $vmid = extract_param($param, 'vmid'); + my $target = extract_param($param, 'target'); + my $localnode = PVE::INotify::nodename(); + + + # test if VM exists + my $vmconf = PVE::QemuConfig->load_config($vmid); + my $storecfg = PVE::Storage::config(); + + + # try to detect errors early + PVE::QemuConfig->check_lock($vmconf); + + $res->{running} = PVE::QemuServer::check_running($vmid) ? 1:0; + + # if vm is not running, return target nodes where local storage is available + # for offline migration + if (!$res->{running}) { + $res->{allowed_nodes} = []; + my $checked_nodes = PVE::QemuServer::check_local_storage_availability($vmconf, $storecfg); + delete $checked_nodes->{$localnode}; + + foreach my $node (keys %$checked_nodes) { + if (!defined $checked_nodes->{$node}->{unavailable_storages}) { + push @{$res->{allowed_nodes}}, $node; + } + + } + $res->{not_allowed_nodes} = $checked_nodes; + } + + + my $local_disks = &$check_vm_disks_local($storecfg, $vmconf, $vmid); + $res->{local_disks} = [ values %$local_disks ];; + + my $local_resources = PVE::QemuServer::check_local_resources($vmconf, 1); + + $res->{local_resources} = $local_resources; + + return $res; + + + }}); + __PACKAGE__->register_method({ name => 'migrate_vm', path => '{vmid}/migrate', @@ -3134,7 +3408,7 @@ __PACKAGE__->register_method({ }), online => { type => 'boolean', - description => "Use online/live migration.", + description => "Use online/live migration if VM is running. Ignored if VM is stopped.", optional => 1, }, force => { @@ -3158,10 +3432,8 @@ __PACKAGE__->register_method({ description => "Enable live storage migration for local disk", optional => 1, }, - targetstorage => get_standard_option('pve-storage-id', { - description => "Default target storage.", - optional => 1, - completion => \&PVE::QemuServer::complete_storage, + targetstorage => get_standard_option('pve-targetstorage', { + completion => \&PVE::QemuServer::complete_migration_storage, }), bwlimit => { description => "Override I/O bandwidth limit (in KiB/s).", @@ -3195,9 +3467,6 @@ __PACKAGE__->register_method({ my $vmid = extract_param($param, 'vmid'); - raise_param_exc({ targetstorage => "Live storage migration can only be done online." }) - if !$param->{online} && $param->{targetstorage}; - raise_param_exc({ force => "Only root may use this option." }) if $param->{force} && $authuser ne 'root@pam'; @@ -3216,14 +3485,42 @@ __PACKAGE__->register_method({ PVE::QemuConfig->check_lock($conf); if (PVE::QemuServer::check_running($vmid)) { - die "cant migrate running VM without --online\n" - if !$param->{online}; + die "can't migrate running VM without --online\n" if !$param->{online}; + } else { + warn "VM isn't running. Doing offline migration instead.\n" if $param->{online}; + $param->{online} = 0; } my $storecfg = PVE::Storage::config(); - if( $param->{targetstorage}) { - PVE::Storage::storage_check_node($storecfg, $param->{targetstorage}, $target); + if (my $targetstorage = $param->{targetstorage}) { + my $check_storage = sub { + my ($target_sid) = @_; + PVE::Storage::storage_check_node($storecfg, $target_sid, $target); + $rpcenv->check($authuser, "/storage/$target_sid", ['Datastore.AllocateSpace']); + my $scfg = PVE::Storage::storage_config($storecfg, $target_sid); + raise_param_exc({ targetstorage => "storage '$target_sid' does not support vm images"}) + if !$scfg->{content}->{images}; + }; + + my $storagemap = eval { PVE::JSONSchema::parse_idmap($targetstorage, 'pve-storage-id') }; + raise_param_exc({ targetstorage => "failed to parse storage map: $@" }) + if $@; + + $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.Disk']) + if !defined($storagemap->{identity}); + + foreach my $source (values %{$storagemap->{entries}}) { + $check_storage->($source); + } + + $check_storage->($storagemap->{default}) + if $storagemap->{default}; + + PVE::QemuServer::check_storage_availability($storecfg, $conf, $target) + if $storagemap->{identity}; + + $param->{storagemap} = $storagemap; } else { PVE::QemuServer::check_storage_availability($storecfg, $conf, $target); } @@ -3301,7 +3598,7 @@ __PACKAGE__->register_method({ my $res = ''; eval { - $res = PVE::QemuServer::vm_human_monitor_command($vmid, $param->{command}); + $res = PVE::QemuServer::Monitor::hmp_cmd($vmid, $param->{command}); }; $res = "ERROR: $@" if $@; @@ -3327,7 +3624,7 @@ __PACKAGE__->register_method({ disk => { type => 'string', description => "The disk you want to resize.", - enum => [PVE::QemuServer::valid_drive_names()], + enum => [PVE::QemuServer::Drive::valid_drive_names()], }, size => { type => 'string', @@ -3397,6 +3694,8 @@ __PACKAGE__->register_method({ PVE::Storage::activate_volumes($storecfg, [$volid]); my $size = PVE::Storage::volume_size_info($storecfg, $volid, 5); + die "Could not determine current size of volume '$volid'\n" if !defined($size); + die "internal error" if $sizestr !~ m/^(\+)?(\d+(\.\d+)?)([KMGT])?$/; my ($ext, $newsize, $unit) = ($1, $2, $4); if ($unit) { @@ -3422,7 +3721,7 @@ __PACKAGE__->register_method({ PVE::QemuServer::qemu_block_resize($vmid, "drive-$disk", $storecfg, $volid, $newsize); $drive->{size} = $newsize; - $conf->{$disk} = PVE::QemuServer::print_drive($vmid, $drive); + $conf->{$disk} = PVE::QemuServer::print_drive($drive); PVE::QemuConfig->write_config($vmid, $conf); }; @@ -3566,6 +3865,9 @@ __PACKAGE__->register_method({ die "unable to use snapshot name 'current' (reserved name)\n" if $snapname eq 'current'; + die "unable to use snapshot name 'pending' (reserved name)\n" + if lc($snapname) eq 'pending'; + my $realcmd = sub { PVE::Cluster::log_msg('info', $authuser, "snapshot VM $vmid: $snapname"); PVE::QemuConfig->snapshot_create($vmid, $snapname, $param->{vmstate}, @@ -3674,7 +3976,7 @@ __PACKAGE__->register_method({ proxyto => 'node', description => "Get snapshot configuration", permissions => { - check => ['perm', '/vms/{vmid}', [ 'VM.Snapshot', 'VM.Snapshot.Rollback' ], any => 1], + check => ['perm', '/vms/{vmid}', [ 'VM.Snapshot', 'VM.Snapshot.Rollback', 'VM.Audit' ], any => 1], }, parameters => { additionalProperties => 0, @@ -3821,7 +4123,7 @@ __PACKAGE__->register_method({ optional => 1, type => 'string', description => "If you want to convert only 1 disk to base image.", - enum => [PVE::QemuServer::valid_drive_names()], + enum => [PVE::QemuServer::Drive::valid_drive_names()], }, }, @@ -3869,4 +4171,36 @@ __PACKAGE__->register_method({ return undef; }}); +__PACKAGE__->register_method({ + name => 'cloudinit_generated_config_dump', + path => '{vmid}/cloudinit/dump', + method => 'GET', + proxyto => 'node', + description => "Get automatically generated cloudinit config.", + permissions => { + check => ['perm', '/vms/{vmid}', [ 'VM.Audit' ]], + }, + parameters => { + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }), + type => { + description => 'Config type.', + type => 'string', + enum => ['user', 'network', 'meta'], + }, + }, + }, + returns => { + type => 'string', + }, + code => sub { + my ($param) = @_; + + my $conf = PVE::QemuConfig->load_config($param->{vmid}); + + return PVE::QemuServer::Cloudinit::dump_cloudinit_config($conf, $param->{vmid}, $param->{type}); + }}); + 1;