X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=PVE%2FAPI2%2FQemu.pm;h=ee77479839f3f7cf767d5389c3c2c644967caf01;hb=cd11416f7a5dbc3204aafdd89d372cd867a7d08;hp=dccfbcc00ece08e77bc12bf5223af181d7f18a70;hpb=0f56d571e424c2bd2aab2478f5ed9a96436aa1ab;p=qemu-server.git diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm index dccfbcc..985a9f8 100644 --- a/PVE/API2/Qemu.pm +++ b/PVE/API2/Qemu.pm @@ -3,11 +3,13 @@ package PVE::API2::Qemu; use strict; use warnings; use Cwd 'abs_path'; +use Net::SSLeay; +use UUID; use PVE::Cluster qw (cfs_read_file cfs_write_file);; use PVE::SafeSyslog; use PVE::Tools qw(extract_param); -use PVE::Exception qw(raise raise_param_exc); +use PVE::Exception qw(raise raise_param_exc raise_perm_exc); use PVE::Storage; use PVE::JSONSchema qw(get_standard_option); use PVE::RESTHandler; @@ -16,6 +18,8 @@ use PVE::QemuMigrate; use PVE::RPCEnvironment; use PVE::AccessControl; use PVE::INotify; +use PVE::Network; +use PVE::API2::Firewall::VM; use Data::Dumper; # fixme: remove @@ -58,6 +62,43 @@ my $check_storage_access = sub { }); }; +my $check_storage_access_clone = sub { + my ($rpcenv, $authuser, $storecfg, $conf, $storage) = @_; + + my $sharedvm = 1; + + PVE::QemuServer::foreach_drive($conf, sub { + my ($ds, $drive) = @_; + + my $isCDROM = PVE::QemuServer::drive_is_cdrom($drive); + + my $volid = $drive->{file}; + + return if !$volid || $volid eq 'none'; + + if ($isCDROM) { + if ($volid eq 'cdrom') { + $rpcenv->check($authuser, "/", ['Sys.Console']); + } else { + # we simply allow access + my ($sid, $volname) = PVE::Storage::parse_volume_id($volid); + my $scfg = PVE::Storage::storage_config($storecfg, $sid); + $sharedvm = 0 if !$scfg->{shared}; + + } + } else { + my ($sid, $volname) = PVE::Storage::parse_volume_id($volid); + my $scfg = PVE::Storage::storage_config($storecfg, $sid); + $sharedvm = 0 if !$scfg->{shared}; + + $sid = $storage if $storage; + $rpcenv->check($authuser, "/storage/$sid", ['Datastore.AllocateSpace']); + } + }); + + return $sharedvm; +}; + # Note: $pool is only needed when creating a VM, because pool permissions # are automatically inherited if VM already exists inside a pool. my $create_disks = sub { @@ -72,7 +113,8 @@ my $create_disks = sub { my $volid = $disk->{file}; if (!$volid || $volid eq 'none' || $volid eq 'cdrom') { - $res->{$ds} = $settings->{$ds}; + delete $disk->{size}; + $res->{$ds} = PVE::QemuServer::print_drive($vmid, $disk); } elsif ($volid =~ m/^(([^:\s]+):)?(\d+(\.\d+)?)$/) { my ($storeid, $size) = ($2 || $default_storage, $3); die "no storage ID specified (and no default storage)\n" if !$storeid; @@ -81,28 +123,35 @@ my $create_disks = sub { my $volid = PVE::Storage::vdisk_alloc($storecfg, $storeid, $vmid, $fmt, undef, $size*1024*1024); $disk->{file} = $volid; + $disk->{size} = $size*1024*1024*1024; push @$vollist, $volid; delete $disk->{format}; # no longer needed $res->{$ds} = PVE::QemuServer::print_drive($vmid, $disk); } else { - my $path = $rpcenv->check_volume_access($authuser, $storecfg, $vmid, $volid); - PVE::Storage::activate_volumes($storecfg, [ $volid ]) - if PVE::Storage::parse_volume_id ($volid, 1); - my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid); - my $dl = PVE::Storage::vdisk_list($storecfg, $storeid, undef); - my $foundvolid = undef; - - PVE::Storage::foreach_volid($dl, sub { - my ($volumeid) = @_; - if($volumeid eq $volid) { - $foundvolid = 1; - return; - } - }); - - die "image '$path' does not exists\n" if (!(-f $path || -b $path || $foundvolid)); - $res->{$ds} = $settings->{$ds}; + $rpcenv->check_volume_access($authuser, $storecfg, $vmid, $volid); + + my $volid_is_new = 1; + + if ($conf->{$ds}) { + my $olddrive = PVE::QemuServer::parse_drive($ds, $conf->{$ds}); + $volid_is_new = undef if $olddrive->{file} && $olddrive->{file} eq $volid; + } + + if ($volid_is_new) { + + my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid, 1); + + PVE::Storage::activate_volumes($storecfg, [ $volid ]) if $storeid; + + my $size = PVE::Storage::volume_size_info($storecfg, $volid); + + die "volume $volid does not exists\n" if !$size; + + $disk->{size} = $size; + } + + $res->{$ds} = PVE::QemuServer::print_drive($vmid, $disk); } }); @@ -134,17 +183,17 @@ my $check_vm_modify_config_perm = sub { next if PVE::QemuServer::valid_drivename($opt); if ($opt eq 'sockets' || $opt eq 'cores' || - $opt eq 'cpu' || $opt eq 'smp' || + $opt eq 'cpu' || $opt eq 'smp' || $opt eq 'cpulimit' || $opt eq 'cpuunits') { $rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.CPU']); } elsif ($opt eq 'boot' || $opt eq 'bootdisk') { $rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.Disk']); - } elsif ($opt eq 'memory' || $opt eq 'balloon') { + } elsif ($opt eq 'memory' || $opt eq 'balloon' || $opt eq 'shares') { $rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.Memory']); } elsif ($opt eq 'args' || $opt eq 'lock') { die "only root can set '$opt' config\n"; - } elsif ($opt eq 'cpu' || $opt eq 'kvm' || $opt eq 'acpi' || - $opt eq 'vga' || $opt eq 'watchdog' || $opt eq 'tablet') { + } elsif ($opt eq 'cpu' || $opt eq 'kvm' || $opt eq 'acpi' || $opt eq 'machine' || + $opt eq 'vga' || $opt eq 'watchdog' || $opt eq 'tablet' || $opt eq 'smbios1') { $rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.HWType']); } elsif ($opt =~ m/^net\d+$/) { $rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.Network']); @@ -201,17 +250,18 @@ __PACKAGE__->register_method({ return $res; }}); + + __PACKAGE__->register_method({ name => 'create_vm', path => '', method => 'POST', description => "Create or restore a virtual machine.", permissions => { - description => "You need 'VM.Allocate' permissions on /vms/{vmid} or on the VM pool /pool/{pool}. If you create disks you need 'Datastore.AllocateSpace' on any used storage.", - check => [ 'or', - [ 'perm', '/vms/{vmid}', ['VM.Allocate']], - [ 'perm', '/pool/{pool}', ['VM.Allocate'], require_param => 'pool'], - ], + description => "You need 'VM.Allocate' permissions on /vms/{vmid} or on the VM pool /pool/{pool}. " . + "For restore (option 'archive'), it is enough if the user has 'VM.Backup' permission and the VM already exists. " . + "If you create disks you need 'Datastore.AllocateSpace' on any used storage.", + user => 'all', # check inside }, protected => 1, proxyto => 'node', @@ -243,7 +293,7 @@ __PACKAGE__->register_method({ description => "Assign a unique random ethernet address.", requires => 'archive', }, - pool => { + pool => { optional => 1, type => 'string', format => 'pve-poolid', description => "Add the VM to the specified pool.", @@ -271,7 +321,7 @@ __PACKAGE__->register_method({ my $force = extract_param($param, 'force'); my $unique = extract_param($param, 'unique'); - + my $pool = extract_param($param, 'pool'); my $filename = PVE::QemuServer::config_file($vmid); @@ -282,11 +332,22 @@ __PACKAGE__->register_method({ if (defined($pool)) { $rpcenv->check_pool_exist($pool); - } + } $rpcenv->check($authuser, "/storage/$storage", ['Datastore.AllocateSpace']) if defined($storage); + if ($rpcenv->check($authuser, "/vms/$vmid", ['VM.Allocate'], 1)) { + # OK + } elsif ($pool && $rpcenv->check($authuser, "/pool/$pool", ['VM.Allocate'], 1)) { + # OK + } elsif ($archive && $force && (-f $filename) && + $rpcenv->check($authuser, "/vms/$vmid", ['VM.Backup'], 1)) { + # OK: user has VM.Backup permissions, and want to restore an existing VM + } else { + raise_perm_exc(); + } + if (!$archive) { &$resolve_cdrom_alias($param); @@ -313,36 +374,20 @@ __PACKAGE__->register_method({ die "pipe requires cli environment\n" if $rpcenv->{type} ne 'cli'; } else { - my $path = $rpcenv->check_volume_access($authuser, $storecfg, $vmid, $archive); - - PVE::Storage::activate_volumes($storecfg, [ $archive ]) - if PVE::Storage::parse_volume_id ($archive, 1); - - die "can't find archive file '$archive'\n" if !($path && -f $path); - $archive = $path; + $rpcenv->check_volume_access($authuser, $storecfg, $vmid, $archive); + $archive = PVE::Storage::abs_filesystem_path($storecfg, $archive); } } - my $addVMtoPoolFn = sub { - my $usercfg = cfs_read_file("user.cfg"); - if (my $data = $usercfg->{pools}->{$pool}) { - $data->{vms}->{$vmid} = 1; - $usercfg->{vms}->{$vmid} = $pool; - cfs_write_file("user.cfg", $usercfg); - } - }; - my $restorefn = sub { + # fixme: this test does not work if VM exists on other node! if (-f $filename) { die "unable to restore vm $vmid: config file already exists\n" if !$force; die "unable to restore vm $vmid: vm is running\n" if PVE::QemuServer::check_running($vmid); - - # destroy existing data - keep empty config - PVE::QemuServer::destroy_vm($storecfg, $vmid, 1); } my $realcmd = sub { @@ -351,7 +396,7 @@ __PACKAGE__->register_method({ pool => $pool, unique => $unique }); - PVE::AccessControl::lock_user_config($addVMtoPoolFn, "can't add VM to pool") if $pool; + PVE::AccessControl::add_vm_to_pool($vmid, $pool) if $pool; }; return $rpcenv->fork_worker('qmrestore', $vmid, $authuser, $realcmd); @@ -387,6 +432,14 @@ __PACKAGE__->register_method({ $conf->{bootdisk} = $firstdisk; } + # auto generate uuid if user did not specify smbios1 option + if (!$conf->{smbios1}) { + my ($uuid, $uuid_str); + UUID::generate($uuid); + UUID::unparse($uuid, $uuid_str); + $conf->{smbios1} = "uuid=$uuid_str"; + } + PVE::QemuServer::update_config_nolock($vmid, $conf); }; @@ -400,7 +453,7 @@ __PACKAGE__->register_method({ die "create failed - $err"; } - PVE::AccessControl::lock_user_config($addVMtoPoolFn, "can't add VM to pool") if $pool; + PVE::AccessControl::add_vm_to_pool($vmid, $pool) if $pool; }; return $rpcenv->fork_worker('qmcreate', $vmid, $authuser, $realcmd); @@ -444,14 +497,25 @@ __PACKAGE__->register_method({ { subdir => 'unlink' }, { subdir => 'vncproxy' }, { subdir => 'migrate' }, + { subdir => 'resize' }, + { subdir => 'move' }, { subdir => 'rrd' }, { subdir => 'rrddata' }, { subdir => 'monitor' }, + { subdir => 'snapshot' }, + { subdir => 'spiceproxy' }, + { subdir => 'sendkey' }, + { subdir => 'firewall' }, ]; return $res; }}); +__PACKAGE__->register_method ({ + subclass => "PVE::API2::Firewall::VM", + path => '{vmid}/firewall', +}); + __PACKAGE__->register_method({ name => 'rrd', path => '{vmid}/rrd', @@ -570,6 +634,8 @@ __PACKAGE__->register_method({ my $conf = PVE::QemuServer::load_config($param->{vmid}); + delete $conf->{snapshots}; + return $conf; }}); @@ -608,9 +674,19 @@ my $delete_drive = sub { if (!PVE::QemuServer::drive_is_cdrom($drive)) { my $volid = $drive->{file}; + if (&$vm_is_volid_owner($storecfg, $vmid, $volid)) { if ($force || $key =~ m/^unused/) { - eval { PVE::Storage::vdisk_free($storecfg, $volid); }; + eval { + # check if the disk is really unused + my $used_paths = PVE::QemuServer::get_used_paths($vmid, $storecfg, $conf, 1, $key); + my $path = PVE::Storage::path($storecfg, $volid); + + die "unable to delete '$volid' - volume is still in use (snapshot?)\n" + if $used_paths->{$path}; + + PVE::Storage::vdisk_free($storecfg, $volid); + }; die $@ if $@; } else { PVE::QemuServer::add_unused_volume($conf, $volid, $vmid); @@ -632,12 +708,25 @@ my $vmconfig_delete_option = sub { $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.Disk']); my $drive = PVE::QemuServer::parse_drive($opt, $conf->{$opt}); - if (my $sid = &$test_deallocate_drive($storecfg, $vmid, $opt, $drive, $force)) { - $rpcenv->check($authuser, "/storage/$sid", ['Datastore.Allocate']); + if (my $sid = &$test_deallocate_drive($storecfg, $vmid, $opt, $drive, $force)) { + $rpcenv->check($authuser, "/storage/$sid", ['Datastore.AllocateSpace']); } } - - die "error hot-unplug $opt" if !PVE::QemuServer::vm_deviceunplug($vmid, $conf, $opt); + + my $unplugwarning = ""; + if ($conf->{ostype} && $conf->{ostype} eq 'l26') { + $unplugwarning = "
verify that you have acpiphp && pci_hotplug modules loaded in your guest VM"; + } elsif ($conf->{ostype} && $conf->{ostype} eq 'l24') { + $unplugwarning = "
kernel 2.4 don't support hotplug, please disable hotplug in options"; + } elsif (!$conf->{ostype} || ($conf->{ostype} && $conf->{ostype} eq 'other')) { + $unplugwarning = "
verify that your guest support acpi hotplug"; + } + + if ($opt eq 'tablet') { + PVE::QemuServer::vm_deviceplug(undef, $conf, $vmid, $opt); + } else { + die "error hot-unplug $opt $unplugwarning" if !PVE::QemuServer::vm_deviceunplug($vmid, $conf, $opt); + } if ($isDisk) { my $drive = PVE::QemuServer::parse_drive($opt, $conf->{$opt}); @@ -649,6 +738,16 @@ my $vmconfig_delete_option = sub { PVE::QemuServer::update_config_nolock($vmid, $conf, 1); }; +my $safe_num_ne = sub { + my ($a, $b) = @_; + + return 0 if !defined($a) && !defined($b); + return 1 if !defined($a); + return 1 if !defined($b); + + return $a != $b; +}; + my $vmconfig_update_disk = sub { my ($rpcenv, $authuser, $conf, $storecfg, $vmid, $opt, $value, $force) = @_; @@ -675,13 +774,32 @@ my $vmconfig_update_disk = sub { $conf = PVE::QemuServer::load_config($vmid); # update/reload } - if($drive->{bps} != $old_drive->{bps} || - $drive->{bps_rd} != $old_drive->{bps_rd} || - $drive->{bps_wr} != $old_drive->{bps_wr} || - $drive->{iops} != $old_drive->{iops} || - $drive->{iops_rd} != $old_drive->{iops_rd} || - $drive->{iops_wr} != $old_drive->{iops_wr} ) { - PVE::QemuServer::qemu_block_set_io_throttle($vmid,"drive-$opt",$drive->{bps}, $drive->{bps_rd}, $drive->{bps_wr}, $drive->{iops}, $drive->{iops_rd}, $drive->{iops_wr}) if !PVE::QemuServer::drive_is_cdrom($drive); + if(&$safe_num_ne($drive->{mbps}, $old_drive->{mbps}) || + &$safe_num_ne($drive->{mbps_rd}, $old_drive->{mbps_rd}) || + &$safe_num_ne($drive->{mbps_wr}, $old_drive->{mbps_wr}) || + &$safe_num_ne($drive->{iops}, $old_drive->{iops}) || + &$safe_num_ne($drive->{iops_rd}, $old_drive->{iops_rd}) || + &$safe_num_ne($drive->{iops_wr}, $old_drive->{iops_wr}) || + &$safe_num_ne($drive->{mbps_max}, $old_drive->{mbps_max}) || + &$safe_num_ne($drive->{mbps_rd_max}, $old_drive->{mbps_rd_max}) || + &$safe_num_ne($drive->{mbps_wr_max}, $old_drive->{mbps_wr_max}) || + &$safe_num_ne($drive->{iops_max}, $old_drive->{iops_max}) || + &$safe_num_ne($drive->{iops_rd_max}, $old_drive->{iops_rd_max}) || + &$safe_num_ne($drive->{iops_wr_max}, $old_drive->{iops_wr_max})) { + PVE::QemuServer::qemu_block_set_io_throttle($vmid,"drive-$opt", + ($drive->{mbps} || 0)*1024*1024, + ($drive->{mbps_rd} || 0)*1024*1024, + ($drive->{mbps_wr} || 0)*1024*1024, + $drive->{iops} || 0, + $drive->{iops_rd} || 0, + $drive->{iops_wr} || 0, + ($drive->{mbps_max} || 0)*1024*1024, + ($drive->{mbps_rd_max} || 0)*1024*1024, + ($drive->{mbps_wr_max} || 0)*1024*1024, + $drive->{iops_max} || 0, + $drive->{iops_rd_max} || 0, + $drive->{iops_wr_max} || 0) + if !PVE::QemuServer::drive_is_cdrom($drive); } } } @@ -713,11 +831,34 @@ my $vmconfig_update_disk = sub { my $vmconfig_update_net = sub { my ($rpcenv, $authuser, $conf, $storecfg, $vmid, $opt, $value) = @_; - if ($conf->{$opt}) { - #if online update, then unplug first - die "error hot-unplug $opt for update" if !PVE::QemuServer::vm_deviceunplug($vmid, $conf, $opt); - } + if ($conf->{$opt} && PVE::QemuServer::check_running($vmid)) { + my $oldnet = PVE::QemuServer::parse_net($conf->{$opt}); + my $newnet = PVE::QemuServer::parse_net($value); + + if($oldnet->{model} ne $newnet->{model}){ + #if model change, we try to hot-unplug + die "error hot-unplug $opt for update" if !PVE::QemuServer::vm_deviceunplug($vmid, $conf, $opt); + }else{ + + if($newnet->{bridge} && $oldnet->{bridge}){ + my $iface = "tap".$vmid."i".$1 if $opt =~ m/net(\d+)/; + + if($newnet->{rate} ne $oldnet->{rate}){ + PVE::Network::tap_rate_limit($iface, $newnet->{rate}); + } + + if(($newnet->{bridge} ne $oldnet->{bridge}) || ($newnet->{tag} ne $oldnet->{tag}) || ($newnet->{firewall} ne $oldnet->{firewall})){ + PVE::Network::tap_unplug($iface); + PVE::Network::tap_plug($iface, $newnet->{bridge}, $newnet->{tag}, $newnet->{firewall}); + } + + }else{ + #if bridge/nat mode change, we try to hot-unplug + die "error hot-unplug $opt for update" if !PVE::QemuServer::vm_deviceunplug($vmid, $conf, $opt); + } + } + } $conf->{$opt} = $value; PVE::QemuServer::update_config_nolock($vmid, $conf, 1); $conf = PVE::QemuServer::load_config($vmid); # update/reload @@ -727,136 +868,119 @@ my $vmconfig_update_net = sub { die "error hotplug $opt" if !PVE::QemuServer::vm_deviceplug($storecfg, $conf, $vmid, $opt, $net); }; -my $vm_config_perm_list = [ - 'VM.Config.Disk', - 'VM.Config.CDROM', - 'VM.Config.CPU', - 'VM.Config.Memory', - 'VM.Config.Network', - 'VM.Config.HWType', - 'VM.Config.Options', - ]; +# POST/PUT {vmid}/config implementation +# +# The original API used PUT (idempotent) an we assumed that all operations +# are fast. But it turned out that almost any configuration change can +# involve hot-plug actions, or disk alloc/free. Such actions can take long +# time to complete and have side effects (not idempotent). +# +# The new implementation uses POST and forks a worker process. We added +# a new option 'background_delay'. If specified we wait up to +# 'background_delay' second for the worker task to complete. It returns null +# if the task is finished within that time, else we return the UPID. -__PACKAGE__->register_method({ - name => 'update_vm', - path => '{vmid}/config', - method => 'PUT', - protected => 1, - proxyto => 'node', - description => "Set virtual machine options.", - permissions => { - check => ['perm', '/vms/{vmid}', $vm_config_perm_list, any => 1], - }, - parameters => { - additionalProperties => 0, - properties => PVE::QemuServer::json_config_properties( - { - node => get_standard_option('pve-node'), - vmid => get_standard_option('pve-vmid'), - skiplock => get_standard_option('skiplock'), - delete => { - type => 'string', format => 'pve-configid-list', - description => "A list of settings you want to delete.", - optional => 1, - }, - force => { - type => 'boolean', - description => $opt_force_description, - optional => 1, - requires => 'delete', - }, - digest => { - type => 'string', - description => 'Prevent changes if current configuration file has different SHA1 digest. This can be used to prevent concurrent modifications.', - maxLength => 40, - optional => 1, - } - }), - }, - returns => { type => 'null'}, - code => sub { - my ($param) = @_; +my $update_vm_api = sub { + my ($param, $sync) = @_; - my $rpcenv = PVE::RPCEnvironment::get(); + my $rpcenv = PVE::RPCEnvironment::get(); - my $authuser = $rpcenv->get_user(); + my $authuser = $rpcenv->get_user(); - my $node = extract_param($param, 'node'); + my $node = extract_param($param, 'node'); - my $vmid = extract_param($param, 'vmid'); + my $vmid = extract_param($param, 'vmid'); - my $digest = extract_param($param, 'digest'); + my $digest = extract_param($param, 'digest'); - my @paramarr = (); # used for log message - foreach my $key (keys %$param) { - push @paramarr, "-$key", $param->{$key}; - } + my $background_delay = extract_param($param, 'background_delay'); - my $skiplock = extract_param($param, 'skiplock'); - raise_param_exc({ skiplock => "Only root may use this option." }) - if $skiplock && $authuser ne 'root@pam'; + my @paramarr = (); # used for log message + foreach my $key (keys %$param) { + push @paramarr, "-$key", $param->{$key}; + } - my $delete_str = extract_param($param, 'delete'); + my $skiplock = extract_param($param, 'skiplock'); + raise_param_exc({ skiplock => "Only root may use this option." }) + if $skiplock && $authuser ne 'root@pam'; - my $force = extract_param($param, 'force'); + my $delete_str = extract_param($param, 'delete'); - die "no options specified\n" if !$delete_str && !scalar(keys %$param); + my $force = extract_param($param, 'force'); - my $storecfg = PVE::Storage::config(); + die "no options specified\n" if !$delete_str && !scalar(keys %$param); - &$resolve_cdrom_alias($param); + my $storecfg = PVE::Storage::config(); - # now try to verify all parameters + my $defaults = PVE::QemuServer::load_defaults(); - my @delete = (); - foreach my $opt (PVE::Tools::split_list($delete_str)) { - $opt = 'ide2' if $opt eq 'cdrom'; - raise_param_exc({ delete => "you can't use '-$opt' and " . - "-delete $opt' at the same time" }) - if defined($param->{$opt}); + &$resolve_cdrom_alias($param); - if (!PVE::QemuServer::option_exists($opt)) { - raise_param_exc({ delete => "unknown option '$opt'" }); - } + # now try to verify all parameters - push @delete, $opt; + my @delete = (); + foreach my $opt (PVE::Tools::split_list($delete_str)) { + $opt = 'ide2' if $opt eq 'cdrom'; + raise_param_exc({ delete => "you can't use '-$opt' and " . + "-delete $opt' at the same time" }) + if defined($param->{$opt}); + + if (!PVE::QemuServer::option_exists($opt)) { + raise_param_exc({ delete => "unknown option '$opt'" }); } - foreach my $opt (keys %$param) { - if (PVE::QemuServer::valid_drivename($opt)) { - # cleanup drive path - my $drive = PVE::QemuServer::parse_drive($opt, $param->{$opt}); - PVE::QemuServer::cleanup_drive_path($opt, $storecfg, $drive); - $param->{$opt} = PVE::QemuServer::print_drive($vmid, $drive); - } elsif ($opt =~ m/^net(\d+)$/) { - # add macaddr - my $net = PVE::QemuServer::parse_net($param->{$opt}); - $param->{$opt} = PVE::QemuServer::print_net($net); - } + push @delete, $opt; + } + + foreach my $opt (keys %$param) { + if (PVE::QemuServer::valid_drivename($opt)) { + # cleanup drive path + my $drive = PVE::QemuServer::parse_drive($opt, $param->{$opt}); + PVE::QemuServer::cleanup_drive_path($opt, $storecfg, $drive); + $param->{$opt} = PVE::QemuServer::print_drive($vmid, $drive); + } elsif ($opt =~ m/^net(\d+)$/) { + # add macaddr + my $net = PVE::QemuServer::parse_net($param->{$opt}); + $param->{$opt} = PVE::QemuServer::print_net($net); } + } - &$check_vm_modify_config_perm($rpcenv, $authuser, $vmid, undef, [@delete]); + &$check_vm_modify_config_perm($rpcenv, $authuser, $vmid, undef, [@delete]); - &$check_vm_modify_config_perm($rpcenv, $authuser, $vmid, undef, [keys %$param]); + &$check_vm_modify_config_perm($rpcenv, $authuser, $vmid, undef, [keys %$param]); - &$check_storage_access($rpcenv, $authuser, $storecfg, $vmid, $param); + &$check_storage_access($rpcenv, $authuser, $storecfg, $vmid, $param); - my $updatefn = sub { + my $updatefn = sub { - my $conf = PVE::QemuServer::load_config($vmid); + my $conf = PVE::QemuServer::load_config($vmid); - die "checksum missmatch (file change by other user?)\n" - if $digest && $digest ne $conf->{digest}; + die "checksum missmatch (file change by other user?)\n" + if $digest && $digest ne $conf->{digest}; + + PVE::QemuServer::check_lock($conf) if !$skiplock; + + if ($param->{memory} || defined($param->{balloon})) { + my $maxmem = $param->{memory} || $conf->{memory} || $defaults->{memory}; + my $balloon = defined($param->{balloon}) ? $param->{balloon} : $conf->{balloon}; - PVE::QemuServer::check_lock($conf) if !$skiplock; + die "balloon value too large (must be smaller than assigned memory)\n" + if $balloon && $balloon > $maxmem; + } + + PVE::Cluster::log_msg('info', $authuser, "update VM $vmid: " . join (' ', @paramarr)); - PVE::Cluster::log_msg('info', $authuser, "update VM $vmid: " . join (' ', @paramarr)); + my $worker = sub { + + print "update VM $vmid: " . join (' ', @paramarr) . "\n"; foreach my $opt (@delete) { # delete $conf = PVE::QemuServer::load_config($vmid); # update/reload &$vmconfig_delete_option($rpcenv, $authuser, $conf, $storecfg, $vmid, $opt, $force); } + my $running = PVE::QemuServer::check_running($vmid); + foreach my $opt (keys %$param) { # add/change $conf = PVE::QemuServer::load_config($vmid); # update/reload @@ -865,128 +989,280 @@ __PACKAGE__->register_method({ if (PVE::QemuServer::valid_drivename($opt)) { - &$vmconfig_update_disk($rpcenv, $authuser, $conf, $storecfg, $vmid, + &$vmconfig_update_disk($rpcenv, $authuser, $conf, $storecfg, $vmid, $opt, $param->{$opt}, $force); - + } elsif ($opt =~ m/^net(\d+)$/) { #nics - &$vmconfig_update_net($rpcenv, $authuser, $conf, $storecfg, $vmid, + &$vmconfig_update_net($rpcenv, $authuser, $conf, $storecfg, $vmid, $opt, $param->{$opt}); } else { + if($opt eq 'tablet' && $param->{$opt} == 1){ + PVE::QemuServer::vm_deviceplug(undef, $conf, $vmid, $opt); + } elsif($opt eq 'tablet' && $param->{$opt} == 0){ + PVE::QemuServer::vm_deviceunplug($vmid, $conf, $opt); + } + + if($opt eq 'cores' && $conf->{maxcpus}){ + PVE::QemuServer::qemu_cpu_hotplug($vmid, $conf, $param->{$opt}); + } + $conf->{$opt} = $param->{$opt}; PVE::QemuServer::update_config_nolock($vmid, $conf, 1); } } - }; - - PVE::QemuServer::lock_config($vmid, $updatefn); - - return undef; - }}); + # allow manual ballooning if shares is set to zero + if ($running && defined($param->{balloon}) && + defined($conf->{shares}) && ($conf->{shares} == 0)) { + my $balloon = $param->{'balloon'} || $conf->{memory} || $defaults->{memory}; + PVE::QemuServer::vm_mon_cmd($vmid, "balloon", value => $balloon*1024*1024); + } + }; -__PACKAGE__->register_method({ - name => 'destroy_vm', - path => '{vmid}', - method => 'DELETE', - protected => 1, - proxyto => 'node', - description => "Destroy the vm (also delete all used/owned volumes).", - permissions => { - check => [ 'perm', '/vms/{vmid}', ['VM.Allocate']], - }, - parameters => { - additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - vmid => get_standard_option('pve-vmid'), - skiplock => get_standard_option('skiplock'), - }, - }, - returns => { - type => 'string', - }, - code => sub { - my ($param) = @_; - - my $rpcenv = PVE::RPCEnvironment::get(); + if ($sync) { + &$worker(); + return undef; + } else { + my $upid = $rpcenv->fork_worker('qmconfig', $vmid, $authuser, $worker); - my $authuser = $rpcenv->get_user(); + if ($background_delay) { - my $vmid = $param->{vmid}; + # Note: It would be better to do that in the Event based HTTPServer + # to avoid blocking call to sleep. - my $skiplock = $param->{skiplock}; - raise_param_exc({ skiplock => "Only root may use this option." }) - if $skiplock && $authuser ne 'root@pam'; + my $end_time = time() + $background_delay; - # test if VM exists - my $conf = PVE::QemuServer::load_config($vmid); + my $task = PVE::Tools::upid_decode($upid); - my $storecfg = PVE::Storage::config(); + my $running = 1; + while (time() < $end_time) { + $running = PVE::ProcFSTools::check_process_running($task->{pid}, $task->{pstart}); + last if !$running; + sleep(1); # this gets interrupted when child process ends + } - my $delVMfromPoolFn = sub { - my $usercfg = cfs_read_file("user.cfg"); - if (my $pool = $usercfg->{vms}->{$vmid}) { - if (my $data = $usercfg->{pools}->{$pool}) { - delete $data->{vms}->{$vmid}; - delete $usercfg->{vms}->{$vmid}; - cfs_write_file("user.cfg", $usercfg); + if (!$running) { + my $status = PVE::Tools::upid_read_status($upid); + return undef if $status eq 'OK'; + die $status; } } - }; - my $realcmd = sub { - my $upid = shift; - - syslog('info', "destroy VM $vmid: $upid\n"); - - PVE::QemuServer::vm_destroy($storecfg, $vmid, $skiplock); + return $upid; + } + }; - PVE::AccessControl::lock_user_config($delVMfromPoolFn, "pool cleanup failed"); - }; + return PVE::QemuServer::lock_config($vmid, $updatefn); +}; - return $rpcenv->fork_worker('qmdestroy', $vmid, $authuser, $realcmd); - }}); +my $vm_config_perm_list = [ + 'VM.Config.Disk', + 'VM.Config.CDROM', + 'VM.Config.CPU', + 'VM.Config.Memory', + 'VM.Config.Network', + 'VM.Config.HWType', + 'VM.Config.Options', + ]; __PACKAGE__->register_method({ - name => 'unlink', - path => '{vmid}/unlink', - method => 'PUT', + name => 'update_vm_async', + path => '{vmid}/config', + method => 'POST', protected => 1, proxyto => 'node', - description => "Unlink/delete disk images.", + description => "Set virtual machine options (asynchrounous API).", permissions => { - check => [ 'perm', '/vms/{vmid}', ['VM.Config.Disk']], + check => ['perm', '/vms/{vmid}', $vm_config_perm_list, any => 1], }, parameters => { additionalProperties => 0, - properties => { - node => get_standard_option('pve-node'), - vmid => get_standard_option('pve-vmid'), - idlist => { - type => 'string', format => 'pve-configid-list', - description => "A list of disk IDs you want to delete.", - }, - force => { - type => 'boolean', - description => $opt_force_description, - optional => 1, - }, - }, - }, - returns => { type => 'null'}, - code => sub { - my ($param) = @_; - - $param->{delete} = extract_param($param, 'idlist'); - - __PACKAGE__->update_vm($param); - - return undef; - }}); - + properties => PVE::QemuServer::json_config_properties( + { + node => get_standard_option('pve-node'), + vmid => get_standard_option('pve-vmid'), + skiplock => get_standard_option('skiplock'), + delete => { + type => 'string', format => 'pve-configid-list', + description => "A list of settings you want to delete.", + optional => 1, + }, + force => { + type => 'boolean', + description => $opt_force_description, + optional => 1, + requires => 'delete', + }, + digest => { + type => 'string', + description => 'Prevent changes if current configuration file has different SHA1 digest. This can be used to prevent concurrent modifications.', + maxLength => 40, + optional => 1, + }, + background_delay => { + type => 'integer', + description => "Time to wait for the task to finish. We return 'null' if the task finish within that time.", + minimum => 1, + maximum => 30, + optional => 1, + }, + }), + }, + returns => { + type => 'string', + optional => 1, + }, + code => $update_vm_api, +}); + +__PACKAGE__->register_method({ + name => 'update_vm', + path => '{vmid}/config', + method => 'PUT', + protected => 1, + proxyto => 'node', + description => "Set virtual machine options (synchrounous API) - You should consider using the POST method instead for any actions involving hotplug or storage allocation.", + permissions => { + check => ['perm', '/vms/{vmid}', $vm_config_perm_list, any => 1], + }, + parameters => { + additionalProperties => 0, + properties => PVE::QemuServer::json_config_properties( + { + node => get_standard_option('pve-node'), + vmid => get_standard_option('pve-vmid'), + skiplock => get_standard_option('skiplock'), + delete => { + type => 'string', format => 'pve-configid-list', + description => "A list of settings you want to delete.", + optional => 1, + }, + force => { + type => 'boolean', + description => $opt_force_description, + optional => 1, + requires => 'delete', + }, + digest => { + type => 'string', + description => 'Prevent changes if current configuration file has different SHA1 digest. This can be used to prevent concurrent modifications.', + maxLength => 40, + optional => 1, + }, + }), + }, + returns => { type => 'null' }, + code => sub { + my ($param) = @_; + &$update_vm_api($param, 1); + return undef; + } +}); + + +__PACKAGE__->register_method({ + name => 'destroy_vm', + path => '{vmid}', + method => 'DELETE', + protected => 1, + proxyto => 'node', + description => "Destroy the vm (also delete all used/owned volumes).", + permissions => { + check => [ 'perm', '/vms/{vmid}', ['VM.Allocate']], + }, + parameters => { + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + vmid => get_standard_option('pve-vmid'), + skiplock => get_standard_option('skiplock'), + }, + }, + returns => { + type => 'string', + }, + code => sub { + my ($param) = @_; + + my $rpcenv = PVE::RPCEnvironment::get(); + + my $authuser = $rpcenv->get_user(); + + my $vmid = $param->{vmid}; + + my $skiplock = $param->{skiplock}; + raise_param_exc({ skiplock => "Only root may use this option." }) + if $skiplock && $authuser ne 'root@pam'; + + # test if VM exists + my $conf = PVE::QemuServer::load_config($vmid); + + my $storecfg = PVE::Storage::config(); + + my $delVMfromPoolFn = sub { + my $usercfg = cfs_read_file("user.cfg"); + if (my $pool = $usercfg->{vms}->{$vmid}) { + if (my $data = $usercfg->{pools}->{$pool}) { + delete $data->{vms}->{$vmid}; + delete $usercfg->{vms}->{$vmid}; + cfs_write_file("user.cfg", $usercfg); + } + } + }; + + my $realcmd = sub { + my $upid = shift; + + syslog('info', "destroy VM $vmid: $upid\n"); + + PVE::QemuServer::vm_destroy($storecfg, $vmid, $skiplock); + + PVE::AccessControl::remove_vm_from_pool($vmid); + }; + + return $rpcenv->fork_worker('qmdestroy', $vmid, $authuser, $realcmd); + }}); + +__PACKAGE__->register_method({ + name => 'unlink', + path => '{vmid}/unlink', + method => 'PUT', + protected => 1, + proxyto => 'node', + description => "Unlink/delete disk images.", + permissions => { + check => [ 'perm', '/vms/{vmid}', ['VM.Config.Disk']], + }, + parameters => { + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + vmid => get_standard_option('pve-vmid'), + idlist => { + type => 'string', format => 'pve-configid-list', + description => "A list of disk IDs you want to delete.", + }, + force => { + type => 'boolean', + description => $opt_force_description, + optional => 1, + }, + }, + }, + returns => { type => 'null'}, + code => sub { + my ($param) = @_; + + $param->{delete} = extract_param($param, 'idlist'); + + __PACKAGE__->update_vm($param); + + return undef; + }}); + my $sslcert; __PACKAGE__->register_method({ @@ -1003,6 +1279,11 @@ __PACKAGE__->register_method({ properties => { node => get_standard_option('pve-node'), vmid => get_standard_option('pve-vmid'), + websocket => { + optional => 1, + type => 'boolean', + description => "starts websockify instead of vncproxy", + }, }, }, returns => { @@ -1024,6 +1305,9 @@ __PACKAGE__->register_method({ my $vmid = $param->{vmid}; my $node = $param->{node}; + my $websocket = $param->{websocket}; + + my $conf = PVE::QemuServer::load_config($vmid, $node); # check if VM exists my $authpath = "/vms/$vmid"; @@ -1035,16 +1319,14 @@ __PACKAGE__->register_method({ my $port = PVE::Tools::next_vnc_port(); my $remip; + my $remcmd = []; if ($node ne 'localhost' && $node ne PVE::INotify::nodename()) { $remip = PVE::Cluster::remote_node_ip($node); + # NOTE: kvm VNC traffic is already TLS encrypted or is known unsecure + $remcmd = ['/usr/bin/ssh', '-T', '-o', 'BatchMode=yes', $remip]; } - # NOTE: kvm VNC traffic is already TLS encrypted, - # so we select the fastest chipher here (or 'none'?) - my $remcmd = $remip ? ['/usr/bin/ssh', '-T', '-o', 'BatchMode=yes', - '-c', 'blowfish-cbc', $remip] : []; - my $timeout = 10; my $realcmd = sub { @@ -1052,12 +1334,28 @@ __PACKAGE__->register_method({ syslog('info', "starting vnc proxy $upid\n"); - my $qmcmd = [@$remcmd, "/usr/sbin/qm", 'vncproxy', $vmid]; + my $cmd; + + if ($conf->{vga} && ($conf->{vga} =~ m/^serial\d+$/)) { + + die "Websocket mode is not supported in vga serial mode!" if $websocket; - my $qmstr = join(' ', @$qmcmd); + my $termcmd = [ '/usr/sbin/qm', 'terminal', $vmid, '-iface', $conf->{vga} ]; + #my $termcmd = "/usr/bin/qm terminal -iface $conf->{vga}"; + $cmd = ['/usr/bin/vncterm', '-rfbport', $port, + '-timeout', $timeout, '-authpath', $authpath, + '-perm', 'Sys.Console', '-c', @$remcmd, @$termcmd]; + } else { + + $ENV{LC_PVE_TICKET} = $ticket if $websocket; # set ticket with "qm vncproxy" + + my $qmcmd = [@$remcmd, "/usr/sbin/qm", 'vncproxy', $vmid]; + + my $qmstr = join(' ', @$qmcmd); - # also redirect stderr (else we get RFB protocol errors) - my $cmd = ['/bin/nc', '-l', '-p', $port, '-w', $timeout, '-c', "$qmstr 2>/dev/null"]; + # also redirect stderr (else we get RFB protocol errors) + $cmd = ['/bin/nc', '-l', '-p', $port, '-w', $timeout, '-c', "$qmstr 2>/dev/null"]; + } PVE::Tools::run_command($cmd); @@ -1066,6 +1364,8 @@ __PACKAGE__->register_method({ my $upid = $rpcenv->fork_worker('vncproxy', $vmid, $authuser, $realcmd); + PVE::Tools::wait_for_vnc_port($port); + return { user => $authuser, ticket => $ticket, @@ -1075,6 +1375,108 @@ __PACKAGE__->register_method({ }; }}); +__PACKAGE__->register_method({ + name => 'vncwebsocket', + path => '{vmid}/vncwebsocket', + method => 'GET', + permissions => { + description => "You also need to pass a valid ticket (vncticket).", + check => ['perm', '/vms/{vmid}', [ 'VM.Console' ]], + }, + description => "Opens a weksocket for VNC traffic.", + parameters => { + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + vmid => get_standard_option('pve-vmid'), + vncticket => { + description => "Ticket from previous call to vncproxy.", + type => 'string', + maxLength => 512, + }, + port => { + description => "Port number returned by previous vncproxy call.", + type => 'integer', + minimum => 5900, + maximum => 5999, + }, + }, + }, + returns => { + type => "object", + properties => { + port => { type => 'string' }, + }, + }, + code => sub { + my ($param) = @_; + + my $rpcenv = PVE::RPCEnvironment::get(); + + my $authuser = $rpcenv->get_user(); + + my $vmid = $param->{vmid}; + my $node = $param->{node}; + + my $authpath = "/vms/$vmid"; + + PVE::AccessControl::verify_vnc_ticket($param->{vncticket}, $authuser, $authpath); + + my $conf = PVE::QemuServer::load_config($vmid, $node); # VM exists ? + + # Note: VNC ports are acessible from outside, so we do not gain any + # security if we verify that $param->{port} belongs to VM $vmid. This + # check is done by verifying the VNC ticket (inside VNC protocol). + + my $port = $param->{port}; + + return { port => $port }; + }}); + +__PACKAGE__->register_method({ + name => 'spiceproxy', + path => '{vmid}/spiceproxy', + method => 'POST', + protected => 1, + proxyto => 'node', + permissions => { + check => ['perm', '/vms/{vmid}', [ 'VM.Console' ]], + }, + description => "Returns a SPICE configuration to connect to the VM.", + parameters => { + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + vmid => get_standard_option('pve-vmid'), + proxy => get_standard_option('spice-proxy', { optional => 1 }), + }, + }, + returns => get_standard_option('remote-viewer-config'), + code => sub { + my ($param) = @_; + + my $rpcenv = PVE::RPCEnvironment::get(); + + my $authuser = $rpcenv->get_user(); + + my $vmid = $param->{vmid}; + my $node = $param->{node}; + my $proxy = $param->{proxy}; + + my $conf = PVE::QemuServer::load_config($vmid, $node); + my $title = "VM $vmid - $conf->{'name'}", + + my $port = PVE::QemuServer::spice_port($vmid); + + my ($ticket, undef, $remote_viewer_config) = + PVE::AccessControl::remote_viewer_config($authuser, $vmid, $node, $proxy, $title, $port); + + PVE::QemuServer::vm_mon_cmd($vmid, "set_password", protocol => 'spice', password => $ticket); + PVE::QemuServer::vm_mon_cmd($vmid, "expire_password", protocol => 'spice', time => "+30"); + + return $remote_viewer_config; + }}); + __PACKAGE__->register_method({ name => 'vmcmdidx', path => '{vmid}/status', @@ -1122,7 +1524,7 @@ my $vm_is_ha_managed = sub { my $cc = PVE::Cluster::cfs_read_file('cluster.conf'); if (PVE::Cluster::cluster_conf_lookup_pvevm($cc, 0, $vmid, 1)) { return 1; - } + } return 0; }; @@ -1155,6 +1557,8 @@ __PACKAGE__->register_method({ $status->{ha} = &$vm_is_ha_managed($param->{vmid}); + $status->{spice} = 1 if PVE::QemuServer::vga_conf_has_spice($conf->{vga}); + return $status; }}); @@ -1175,6 +1579,8 @@ __PACKAGE__->register_method({ vmid => get_standard_option('pve-vmid'), skiplock => get_standard_option('skiplock'), stateuri => get_standard_option('pve-qm-stateuri'), + migratedfrom => get_standard_option('pve-node',{ optional => 1 }), + machine => get_standard_option('pve-qm-machine'), }, }, returns => { @@ -1191,6 +1597,8 @@ __PACKAGE__->register_method({ my $vmid = extract_param($param, 'vmid'); + my $machine = extract_param($param, 'machine'); + my $stateuri = extract_param($param, 'stateuri'); raise_param_exc({ stateuri => "Only root may use this option." }) if $stateuri && $authuser ne 'root@pam'; @@ -1199,9 +1607,22 @@ __PACKAGE__->register_method({ raise_param_exc({ skiplock => "Only root may use this option." }) if $skiplock && $authuser ne 'root@pam'; - my $storecfg = PVE::Storage::config(); + my $migratedfrom = extract_param($param, 'migratedfrom'); + raise_param_exc({ migratedfrom => "Only root may use this option." }) + if $migratedfrom && $authuser ne 'root@pam'; - if (&$vm_is_ha_managed($vmid) && !$stateuri && + # read spice ticket from STDIN + my $spice_ticket; + if ($stateuri && ($stateuri eq 'tcp') && $migratedfrom && ($rpcenv->{type} eq 'cli')) { + if (defined(my $line = <>)) { + chomp $line; + $spice_ticket = $line; + } + } + + my $storecfg = PVE::Storage::config(); + + if (&$vm_is_ha_managed($vmid) && !$stateuri && $rpcenv->{type} ne 'ha') { my $hacmd = sub { @@ -1227,7 +1648,8 @@ __PACKAGE__->register_method({ syslog('info', "start VM $vmid: $upid\n"); - PVE::QemuServer::vm_start($storecfg, $vmid, $stateuri, $skiplock); + PVE::QemuServer::vm_start($storecfg, $vmid, $stateuri, $skiplock, $migratedfrom, undef, + $machine, $spice_ticket); return; }; @@ -1252,6 +1674,7 @@ __PACKAGE__->register_method({ node => get_standard_option('pve-node'), vmid => get_standard_option('pve-vmid'), skiplock => get_standard_option('skiplock'), + migratedfrom => get_standard_option('pve-node', { optional => 1 }), timeout => { description => "Wait maximal timeout seconds.", type => 'integer', @@ -1288,9 +1711,14 @@ __PACKAGE__->register_method({ raise_param_exc({ keepActive => "Only root may use this option." }) if $keepActive && $authuser ne 'root@pam'; + my $migratedfrom = extract_param($param, 'migratedfrom'); + raise_param_exc({ migratedfrom => "Only root may use this option." }) + if $migratedfrom && $authuser ne 'root@pam'; + + my $storecfg = PVE::Storage::config(); - if (&$vm_is_ha_managed($vmid) && $rpcenv->{type} ne 'ha') { + if (&$vm_is_ha_managed($vmid) && ($rpcenv->{type} ne 'ha') && !defined($migratedfrom)) { my $hacmd = sub { my $upid = shift; @@ -1315,7 +1743,7 @@ __PACKAGE__->register_method({ syslog('info', "stop VM $vmid: $upid\n"); PVE::QemuServer::vm_stop($storecfg, $vmid, $skiplock, 0, - $param->{timeout}, 0, 1, $keepActive); + $param->{timeout}, 0, 1, $keepActive, $migratedfrom); return; }; @@ -1593,142 +2021,1166 @@ __PACKAGE__->register_method({ }}); __PACKAGE__->register_method({ - name => 'migrate_vm', - path => '{vmid}/migrate', + name => 'vm_feature', + path => '{vmid}/feature', + method => 'GET', + proxyto => 'node', + protected => 1, + description => "Check if feature for virtual machine is available.", + permissions => { + check => ['perm', '/vms/{vmid}', [ 'VM.Audit' ]], + }, + parameters => { + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + vmid => get_standard_option('pve-vmid'), + feature => { + description => "Feature to check.", + type => 'string', + enum => [ 'snapshot', 'clone', 'copy' ], + }, + snapname => get_standard_option('pve-snapshot-name', { + optional => 1, + }), + }, + }, + returns => { + type => "object", + properties => { + hasFeature => { type => 'boolean' }, + nodes => { + type => 'array', + items => { type => 'string' }, + } + }, + }, + code => sub { + my ($param) = @_; + + my $node = extract_param($param, 'node'); + + my $vmid = extract_param($param, 'vmid'); + + my $snapname = extract_param($param, 'snapname'); + + my $feature = extract_param($param, 'feature'); + + my $running = PVE::QemuServer::check_running($vmid); + + my $conf = PVE::QemuServer::load_config($vmid); + + if($snapname){ + my $snap = $conf->{snapshots}->{$snapname}; + die "snapshot '$snapname' does not exist\n" if !defined($snap); + $conf = $snap; + } + my $storecfg = PVE::Storage::config(); + + my $nodelist = PVE::QemuServer::shared_nodes($conf, $storecfg); + my $hasFeature = PVE::QemuServer::has_feature($feature, $conf, $storecfg, $snapname, $running); + + return { + hasFeature => $hasFeature, + nodes => [ keys %$nodelist ], + }; + }}); + +__PACKAGE__->register_method({ + name => 'clone_vm', + path => '{vmid}/clone', method => 'POST', protected => 1, proxyto => 'node', - description => "Migrate virtual machine. Creates a new migration task.", + description => "Create a copy of virtual machine/template.", permissions => { - check => ['perm', '/vms/{vmid}', [ 'VM.Migrate' ]], + description => "You need 'VM.Clone' permissions on /vms/{vmid}, and 'VM.Allocate' permissions " . + "on /vms/{newid} (or on the VM pool /pool/{pool}). You also need " . + "'Datastore.AllocateSpace' on any used storage.", + check => + [ 'and', + ['perm', '/vms/{vmid}', [ 'VM.Clone' ]], + [ 'or', + [ 'perm', '/vms/{newid}', ['VM.Allocate']], + [ 'perm', '/pool/{pool}', ['VM.Allocate'], require_param => 'pool'], + ], + ] }, parameters => { additionalProperties => 0, properties => { node => get_standard_option('pve-node'), vmid => get_standard_option('pve-vmid'), - target => get_standard_option('pve-node', { description => "Target node." }), - online => { - type => 'boolean', - description => "Use online/live migration.", + newid => get_standard_option('pve-vmid', { description => 'VMID for the clone.' }), + name => { optional => 1, + type => 'string', format => 'dns-name', + description => "Set a name for the new VM.", }, - force => { - type => 'boolean', - description => "Allow to migrate VMs which use local devices. Only root may use this option.", + description => { optional => 1, + type => 'string', + description => "Description for the new VM.", }, - }, + pool => { + optional => 1, + type => 'string', format => 'pve-poolid', + description => "Add the new VM to the specified pool.", + }, + snapname => get_standard_option('pve-snapshot-name', { + optional => 1, + }), + storage => get_standard_option('pve-storage-id', { + description => "Target storage for full clone.", + requires => 'full', + optional => 1, + }), + 'format' => { + description => "Target format for file storage.", + requires => 'full', + type => 'string', + optional => 1, + enum => [ 'raw', 'qcow2', 'vmdk'], + }, + full => { + optional => 1, + type => 'boolean', + description => "Create a full copy of all disk. This is always done when " . + "you clone a normal VM. For VM templates, we try to create a linked clone by default.", + default => 0, + }, + target => get_standard_option('pve-node', { + description => "Target node. Only allowed if the original VM is on shared storage.", + optional => 1, + }), + }, }, returns => { type => 'string', - description => "the task ID.", }, code => sub { my ($param) = @_; my $rpcenv = PVE::RPCEnvironment::get(); - my $authuser = $rpcenv->get_user(); + my $authuser = $rpcenv->get_user(); - my $target = extract_param($param, 'target'); + my $node = extract_param($param, 'node'); - my $localnode = PVE::INotify::nodename(); - raise_param_exc({ target => "target is local node."}) if $target eq $localnode; + my $vmid = extract_param($param, 'vmid'); - PVE::Cluster::check_cfs_quorum(); + my $newid = extract_param($param, 'newid'); - PVE::Cluster::check_node_exists($target); + my $pool = extract_param($param, 'pool'); - my $targetip = PVE::Cluster::remote_node_ip($target); + if (defined($pool)) { + $rpcenv->check_pool_exist($pool); + } - my $vmid = extract_param($param, 'vmid'); + my $snapname = extract_param($param, 'snapname'); - raise_param_exc({ force => "Only root may use this option." }) - if $param->{force} && $authuser ne 'root@pam'; + my $storage = extract_param($param, 'storage'); - # test if VM exists - my $conf = PVE::QemuServer::load_config($vmid); + my $format = extract_param($param, 'format'); - # try to detect errors early + my $target = extract_param($param, 'target'); - PVE::QemuServer::check_lock($conf); + my $localnode = PVE::INotify::nodename(); - if (PVE::QemuServer::check_running($vmid)) { - die "cant migrate running VM without --online\n" - if !$param->{online}; - } + undef $target if $target && ($target eq $localnode || $target eq 'localhost'); + + PVE::Cluster::check_node_exists($target) if $target; my $storecfg = PVE::Storage::config(); - PVE::QemuServer::check_storage_availability($storecfg, $conf, $target); - if (&$vm_is_ha_managed($vmid) && $rpcenv->{type} ne 'ha') { + if ($storage) { + # check if storage is enabled on local node + PVE::Storage::storage_check_enabled($storecfg, $storage); + if ($target) { + # check if storage is available on target node + PVE::Storage::storage_check_node($storecfg, $storage, $target); + # clone only works if target storage is shared + my $scfg = PVE::Storage::storage_config($storecfg, $storage); + die "can't clone to non-shared storage '$storage'\n" if !$scfg->{shared}; + } + } - my $hacmd = sub { - my $upid = shift; + PVE::Cluster::check_cfs_quorum(); - my $service = "pvevm:$vmid"; + my $running = PVE::QemuServer::check_running($vmid) || 0; - my $cmd = ['clusvcadm', '-M', $service, '-m', $target]; + # exclusive lock if VM is running - else shared lock is enough; + my $shared_lock = $running ? 0 : 1; - print "Executing HA migrate for VM $vmid to node $target\n"; + my $clonefn = sub { - PVE::Tools::run_command($cmd); + # do all tests after lock + # we also try to do all tests before we fork the worker - return; - }; + my $conf = PVE::QemuServer::load_config($vmid); - return $rpcenv->fork_worker('hamigrate', $vmid, $authuser, $hacmd); + PVE::QemuServer::check_lock($conf); - } else { + my $verify_running = PVE::QemuServer::check_running($vmid) || 0; + + die "unexpected state change\n" if $verify_running != $running; + + die "snapshot '$snapname' does not exist\n" + if $snapname && !defined( $conf->{snapshots}->{$snapname}); + + my $oldconf = $snapname ? $conf->{snapshots}->{$snapname} : $conf; + + my $sharedvm = &$check_storage_access_clone($rpcenv, $authuser, $storecfg, $oldconf, $storage); + + die "can't clone VM to node '$target' (VM uses local storage)\n" if $target && !$sharedvm; + + my $conffile = PVE::QemuServer::config_file($newid); + + die "unable to create VM $newid: config file already exists\n" + if -f $conffile; + + my $newconf = { lock => 'clone' }; + my $drives = {}; + my $vollist = []; + + foreach my $opt (keys %$oldconf) { + my $value = $oldconf->{$opt}; + + # do not copy snapshot related info + next if $opt eq 'snapshots' || $opt eq 'parent' || $opt eq 'snaptime' || + $opt eq 'vmstate' || $opt eq 'snapstate'; + + # always change MAC! address + if ($opt =~ m/^net(\d+)$/) { + my $net = PVE::QemuServer::parse_net($value); + $net->{macaddr} = PVE::Tools::random_ether_addr(); + $newconf->{$opt} = PVE::QemuServer::print_net($net); + } elsif (PVE::QemuServer::valid_drivename($opt)) { + my $drive = PVE::QemuServer::parse_drive($opt, $value); + die "unable to parse drive options for '$opt'\n" if !$drive; + if (PVE::QemuServer::drive_is_cdrom($drive)) { + $newconf->{$opt} = $value; # simply copy configuration + } else { + if ($param->{full}) { + die "Full clone feature is not available" + if !PVE::Storage::volume_has_feature($storecfg, 'copy', $drive->{file}, $snapname, $running); + $drive->{full} = 1; + } else { + # not full means clone instead of copy + die "Linked clone feature is not available" + if !PVE::Storage::volume_has_feature($storecfg, 'clone', $drive->{file}, $snapname, $running); + } + $drives->{$opt} = $drive; + push @$vollist, $drive->{file}; + } + } else { + # copy everything else + $newconf->{$opt} = $value; + } + } + + # auto generate a new uuid + my ($uuid, $uuid_str); + UUID::generate($uuid); + UUID::unparse($uuid, $uuid_str); + my $smbios1 = PVE::QemuServer::parse_smbios1($newconf->{smbios1} || ''); + $smbios1->{uuid} = $uuid_str; + $newconf->{smbios1} = PVE::QemuServer::print_smbios1($smbios1); + + delete $newconf->{template}; + + if ($param->{name}) { + $newconf->{name} = $param->{name}; + } else { + if ($oldconf->{name}) { + $newconf->{name} = "Copy-of-$oldconf->{name}"; + } else { + $newconf->{name} = "Copy-of-VM-$vmid"; + } + } + + if ($param->{description}) { + $newconf->{description} = $param->{description}; + } + + # create empty/temp config - this fails if VM already exists on other node + PVE::Tools::file_set_contents($conffile, "# qmclone temporary file\nlock: clone\n"); my $realcmd = sub { my $upid = shift; - PVE::QemuMigrate->migrate($target, $targetip, $vmid, $param); + my $newvollist = []; + + eval { + local $SIG{INT} = $SIG{TERM} = $SIG{QUIT} = $SIG{HUP} = sub { die "interrupted by signal\n"; }; + + PVE::Storage::activate_volumes($storecfg, $vollist); + + foreach my $opt (keys %$drives) { + my $drive = $drives->{$opt}; + + my $newdrive = PVE::QemuServer::clone_disk($storecfg, $vmid, $running, $opt, $drive, $snapname, + $newid, $storage, $format, $drive->{full}, $newvollist); + + $newconf->{$opt} = PVE::QemuServer::print_drive($vmid, $newdrive); + + PVE::QemuServer::update_config_nolock($newid, $newconf, 1); + } + + delete $newconf->{lock}; + PVE::QemuServer::update_config_nolock($newid, $newconf, 1); + + if ($target) { + # always deactivate volumes - avoid lvm LVs to be active on several nodes + PVE::Storage::deactivate_volumes($storecfg, $vollist); + + my $newconffile = PVE::QemuServer::config_file($newid, $target); + die "Failed to move config to node '$target' - rename failed: $!\n" + if !rename($conffile, $newconffile); + } + + PVE::AccessControl::add_vm_to_pool($newid, $pool) if $pool; + }; + if (my $err = $@) { + unlink $conffile; + + sleep 1; # some storage like rbd need to wait before release volume - really? + + foreach my $volid (@$newvollist) { + eval { PVE::Storage::vdisk_free($storecfg, $volid); }; + warn $@ if $@; + } + die "clone failed: $err"; + } + + return; }; - return $rpcenv->fork_worker('qmigrate', $vmid, $authuser, $realcmd); - } + return $rpcenv->fork_worker('qmclone', $vmid, $authuser, $realcmd); + }; + + return PVE::QemuServer::lock_config_mode($vmid, 1, $shared_lock, sub { + # Aquire exclusive lock lock for $newid + return PVE::QemuServer::lock_config_full($newid, 1, $clonefn); + }); }}); __PACKAGE__->register_method({ - name => 'monitor', - path => '{vmid}/monitor', + name => 'move_vm_disk', + path => '{vmid}/move_disk', method => 'POST', protected => 1, proxyto => 'node', - description => "Execute Qemu monitor commands.", + description => "Move volume to different storage.", permissions => { - check => ['perm', '/vms/{vmid}', [ 'VM.Monitor' ]], + description => "You need 'VM.Config.Disk' permissions on /vms/{vmid}, " . + "and 'Datastore.AllocateSpace' permissions on the storage.", + check => + [ 'and', + ['perm', '/vms/{vmid}', [ 'VM.Config.Disk' ]], + ['perm', '/storage/{storage}', [ 'Datastore.AllocateSpace' ]], + ], }, parameters => { - additionalProperties => 0, - properties => { + additionalProperties => 0, + properties => { node => get_standard_option('pve-node'), vmid => get_standard_option('pve-vmid'), - command => { + disk => { + type => 'string', + description => "The disk you want to move.", + enum => [ PVE::QemuServer::disknames() ], + }, + storage => get_standard_option('pve-storage-id', { description => "Target Storage." }), + 'format' => { + type => 'string', + description => "Target Format.", + enum => [ 'raw', 'qcow2', 'vmdk' ], + optional => 1, + }, + delete => { + type => 'boolean', + description => "Delete the original disk after successful copy. By default the original disk is kept as unused disk.", + optional => 1, + default => 0, + }, + digest => { type => 'string', - description => "The monitor command.", - } + description => 'Prevent changes if current configuration file has different SHA1 digest. This can be used to prevent concurrent modifications.', + maxLength => 40, + optional => 1, + }, }, }, - returns => { type => 'string'}, + returns => { + type => 'string', + description => "the task ID.", + }, code => sub { my ($param) = @_; - my $vmid = $param->{vmid}; + my $rpcenv = PVE::RPCEnvironment::get(); - my $conf = PVE::QemuServer::load_config ($vmid); # check if VM exists + my $authuser = $rpcenv->get_user(); - my $res = ''; - eval { - $res = PVE::QemuServer::vm_human_monitor_command($vmid, $param->{command}); + my $node = extract_param($param, 'node'); + + my $vmid = extract_param($param, 'vmid'); + + my $digest = extract_param($param, 'digest'); + + my $disk = extract_param($param, 'disk'); + + my $storeid = extract_param($param, 'storage'); + + my $format = extract_param($param, 'format'); + + my $storecfg = PVE::Storage::config(); + + my $updatefn = sub { + + my $conf = PVE::QemuServer::load_config($vmid); + + die "checksum missmatch (file change by other user?)\n" + if $digest && $digest ne $conf->{digest}; + + die "disk '$disk' does not exist\n" if !$conf->{$disk}; + + my $drive = PVE::QemuServer::parse_drive($disk, $conf->{$disk}); + + my $old_volid = $drive->{file} || die "disk '$disk' has no associated volume\n"; + + die "you can't move a cdrom\n" if PVE::QemuServer::drive_is_cdrom($drive); + + my $oldfmt; + my ($oldstoreid, $oldvolname) = PVE::Storage::parse_volume_id($old_volid); + if ($oldvolname =~ m/\.(raw|qcow2|vmdk)$/){ + $oldfmt = $1; + } + + die "you can't move on the same storage with same format\n" if $oldstoreid eq $storeid && + (!$format || !$oldfmt || $oldfmt eq $format); + + PVE::Cluster::log_msg('info', $authuser, "move disk VM $vmid: move --disk $disk --storage $storeid"); + + my $running = PVE::QemuServer::check_running($vmid); + + PVE::Storage::activate_volumes($storecfg, [ $drive->{file} ]); + + my $realcmd = sub { + + my $newvollist = []; + + eval { + local $SIG{INT} = $SIG{TERM} = $SIG{QUIT} = $SIG{HUP} = sub { die "interrupted by signal\n"; }; + + my $newdrive = PVE::QemuServer::clone_disk($storecfg, $vmid, $running, $disk, $drive, undef, + $vmid, $storeid, $format, 1, $newvollist); + + $conf->{$disk} = PVE::QemuServer::print_drive($vmid, $newdrive); + + PVE::QemuServer::add_unused_volume($conf, $old_volid) if !$param->{delete}; + + PVE::QemuServer::update_config_nolock($vmid, $conf, 1); + + eval { + # try to deactivate volumes - avoid lvm LVs to be active on several nodes + PVE::Storage::deactivate_volumes($storecfg, [ $newdrive->{file} ]) + if !$running; + }; + warn $@ if $@; + }; + if (my $err = $@) { + + foreach my $volid (@$newvollist) { + eval { PVE::Storage::vdisk_free($storecfg, $volid); }; + warn $@ if $@; + } + die "storage migration failed: $err"; + } + + if ($param->{delete}) { + my $used_paths = PVE::QemuServer::get_used_paths($vmid, $storecfg, $conf, 1, 1); + my $path = PVE::Storage::path($storecfg, $old_volid); + if ($used_paths->{$path}){ + warn "volume $old_volid have snapshots. Can't delete it\n"; + PVE::QemuServer::add_unused_volume($conf, $old_volid); + PVE::QemuServer::update_config_nolock($vmid, $conf, 1); + } else { + eval { PVE::Storage::vdisk_free($storecfg, $old_volid); }; + warn $@ if $@; + } + } + }; + + return $rpcenv->fork_worker('qmmove', $vmid, $authuser, $realcmd); }; - $res = "ERROR: $@" if $@; - return $res; + return PVE::QemuServer::lock_config($vmid, $updatefn); + }}); + +__PACKAGE__->register_method({ + name => 'migrate_vm', + path => '{vmid}/migrate', + method => 'POST', + protected => 1, + proxyto => 'node', + description => "Migrate virtual machine. Creates a new migration task.", + permissions => { + check => ['perm', '/vms/{vmid}', [ 'VM.Migrate' ]], + }, + parameters => { + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + vmid => get_standard_option('pve-vmid'), + target => get_standard_option('pve-node', { description => "Target node." }), + online => { + type => 'boolean', + description => "Use online/live migration.", + optional => 1, + }, + force => { + type => 'boolean', + description => "Allow to migrate VMs which use local devices. Only root may use this option.", + optional => 1, + }, + }, + }, + returns => { + type => 'string', + description => "the task ID.", + }, + code => sub { + my ($param) = @_; + + my $rpcenv = PVE::RPCEnvironment::get(); + + my $authuser = $rpcenv->get_user(); + + my $target = extract_param($param, 'target'); + + my $localnode = PVE::INotify::nodename(); + raise_param_exc({ target => "target is local node."}) if $target eq $localnode; + + PVE::Cluster::check_cfs_quorum(); + + PVE::Cluster::check_node_exists($target); + + my $targetip = PVE::Cluster::remote_node_ip($target); + + my $vmid = extract_param($param, 'vmid'); + + raise_param_exc({ force => "Only root may use this option." }) + if $param->{force} && $authuser ne 'root@pam'; + + # test if VM exists + my $conf = PVE::QemuServer::load_config($vmid); + + # try to detect errors early + + PVE::QemuServer::check_lock($conf); + + if (PVE::QemuServer::check_running($vmid)) { + die "cant migrate running VM without --online\n" + if !$param->{online}; + } + + my $storecfg = PVE::Storage::config(); + PVE::QemuServer::check_storage_availability($storecfg, $conf, $target); + + if (&$vm_is_ha_managed($vmid) && $rpcenv->{type} ne 'ha') { + + my $hacmd = sub { + my $upid = shift; + + my $service = "pvevm:$vmid"; + + my $cmd = ['clusvcadm', '-M', $service, '-m', $target]; + + print "Executing HA migrate for VM $vmid to node $target\n"; + + PVE::Tools::run_command($cmd); + + return; + }; + + return $rpcenv->fork_worker('hamigrate', $vmid, $authuser, $hacmd); + + } else { + + my $realcmd = sub { + my $upid = shift; + + PVE::QemuMigrate->migrate($target, $targetip, $vmid, $param); + }; + + return $rpcenv->fork_worker('qmigrate', $vmid, $authuser, $realcmd); + } + + }}); + +__PACKAGE__->register_method({ + name => 'monitor', + path => '{vmid}/monitor', + method => 'POST', + protected => 1, + proxyto => 'node', + description => "Execute Qemu monitor commands.", + permissions => { + check => ['perm', '/vms/{vmid}', [ 'VM.Monitor' ]], + }, + parameters => { + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + vmid => get_standard_option('pve-vmid'), + command => { + type => 'string', + description => "The monitor command.", + } + }, + }, + returns => { type => 'string'}, + code => sub { + my ($param) = @_; + + my $vmid = $param->{vmid}; + + my $conf = PVE::QemuServer::load_config ($vmid); # check if VM exists + + my $res = ''; + eval { + $res = PVE::QemuServer::vm_human_monitor_command($vmid, $param->{command}); + }; + $res = "ERROR: $@" if $@; + + return $res; + }}); + +__PACKAGE__->register_method({ + name => 'resize_vm', + path => '{vmid}/resize', + method => 'PUT', + protected => 1, + proxyto => 'node', + description => "Extend volume size.", + permissions => { + check => ['perm', '/vms/{vmid}', [ 'VM.Config.Disk' ]], + }, + parameters => { + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + vmid => get_standard_option('pve-vmid'), + skiplock => get_standard_option('skiplock'), + disk => { + type => 'string', + description => "The disk you want to resize.", + enum => [PVE::QemuServer::disknames()], + }, + size => { + type => 'string', + pattern => '\+?\d+(\.\d+)?[KMGT]?', + description => "The new size. With the '+' sign the value is added to the actual size of the volume and without it, the value is taken as an absolute one. Shrinking disk size is not supported.", + }, + digest => { + type => 'string', + description => 'Prevent changes if current configuration file has different SHA1 digest. This can be used to prevent concurrent modifications.', + maxLength => 40, + optional => 1, + }, + }, + }, + returns => { type => 'null'}, + code => sub { + my ($param) = @_; + + my $rpcenv = PVE::RPCEnvironment::get(); + + my $authuser = $rpcenv->get_user(); + + my $node = extract_param($param, 'node'); + + my $vmid = extract_param($param, 'vmid'); + + my $digest = extract_param($param, 'digest'); + + my $disk = extract_param($param, 'disk'); + + my $sizestr = extract_param($param, 'size'); + + my $skiplock = extract_param($param, 'skiplock'); + raise_param_exc({ skiplock => "Only root may use this option." }) + if $skiplock && $authuser ne 'root@pam'; + + my $storecfg = PVE::Storage::config(); + + my $updatefn = sub { + + my $conf = PVE::QemuServer::load_config($vmid); + + die "checksum missmatch (file change by other user?)\n" + if $digest && $digest ne $conf->{digest}; + PVE::QemuServer::check_lock($conf) if !$skiplock; + + die "disk '$disk' does not exist\n" if !$conf->{$disk}; + + my $drive = PVE::QemuServer::parse_drive($disk, $conf->{$disk}); + + my $volid = $drive->{file}; + + die "disk '$disk' has no associated volume\n" if !$volid; + + die "you can't resize a cdrom\n" if PVE::QemuServer::drive_is_cdrom($drive); + + my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid); + + $rpcenv->check($authuser, "/storage/$storeid", ['Datastore.AllocateSpace']); + + my $size = PVE::Storage::volume_size_info($storecfg, $volid, 5); + + die "internal error" if $sizestr !~ m/^(\+)?(\d+(\.\d+)?)([KMGT])?$/; + my ($ext, $newsize, $unit) = ($1, $2, $4); + if ($unit) { + if ($unit eq 'K') { + $newsize = $newsize * 1024; + } elsif ($unit eq 'M') { + $newsize = $newsize * 1024 * 1024; + } elsif ($unit eq 'G') { + $newsize = $newsize * 1024 * 1024 * 1024; + } elsif ($unit eq 'T') { + $newsize = $newsize * 1024 * 1024 * 1024 * 1024; + } + } + $newsize += $size if $ext; + $newsize = int($newsize); + + die "unable to skrink disk size\n" if $newsize < $size; + + return if $size == $newsize; + + PVE::Cluster::log_msg('info', $authuser, "update VM $vmid: resize --disk $disk --size $sizestr"); + + PVE::QemuServer::qemu_block_resize($vmid, "drive-$disk", $storecfg, $volid, $newsize); + + $drive->{size} = $newsize; + $conf->{$disk} = PVE::QemuServer::print_drive($vmid, $drive); + + PVE::QemuServer::update_config_nolock($vmid, $conf, 1); + }; + + PVE::QemuServer::lock_config($vmid, $updatefn); + return undef; + }}); + +__PACKAGE__->register_method({ + name => 'snapshot_list', + path => '{vmid}/snapshot', + method => 'GET', + description => "List all snapshots.", + permissions => { + check => ['perm', '/vms/{vmid}', [ 'VM.Audit' ]], + }, + proxyto => 'node', + protected => 1, # qemu pid files are only readable by root + parameters => { + additionalProperties => 0, + properties => { + vmid => get_standard_option('pve-vmid'), + node => get_standard_option('pve-node'), + }, + }, + returns => { + type => 'array', + items => { + type => "object", + properties => {}, + }, + links => [ { rel => 'child', href => "{name}" } ], + }, + code => sub { + my ($param) = @_; + + my $vmid = $param->{vmid}; + + my $conf = PVE::QemuServer::load_config($vmid); + my $snaphash = $conf->{snapshots} || {}; + + my $res = []; + + foreach my $name (keys %$snaphash) { + my $d = $snaphash->{$name}; + my $item = { + name => $name, + snaptime => $d->{snaptime} || 0, + vmstate => $d->{vmstate} ? 1 : 0, + description => $d->{description} || '', + }; + $item->{parent} = $d->{parent} if $d->{parent}; + $item->{snapstate} = $d->{snapstate} if $d->{snapstate}; + push @$res, $item; + } + + my $running = PVE::QemuServer::check_running($vmid, 1) ? 1 : 0; + my $current = { name => 'current', digest => $conf->{digest}, running => $running }; + $current->{parent} = $conf->{parent} if $conf->{parent}; + + push @$res, $current; + + return $res; + }}); + +__PACKAGE__->register_method({ + name => 'snapshot', + path => '{vmid}/snapshot', + method => 'POST', + protected => 1, + proxyto => 'node', + description => "Snapshot a VM.", + permissions => { + check => ['perm', '/vms/{vmid}', [ 'VM.Snapshot' ]], + }, + parameters => { + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + vmid => get_standard_option('pve-vmid'), + snapname => get_standard_option('pve-snapshot-name'), + vmstate => { + optional => 1, + type => 'boolean', + description => "Save the vmstate", + }, + freezefs => { + optional => 1, + type => 'boolean', + description => "Freeze the filesystem", + }, + description => { + optional => 1, + type => 'string', + description => "A textual description or comment.", + }, + }, + }, + returns => { + type => 'string', + description => "the task ID.", + }, + code => sub { + my ($param) = @_; + + my $rpcenv = PVE::RPCEnvironment::get(); + + my $authuser = $rpcenv->get_user(); + + my $node = extract_param($param, 'node'); + + my $vmid = extract_param($param, 'vmid'); + + my $snapname = extract_param($param, 'snapname'); + + die "unable to use snapshot name 'current' (reserved name)\n" + if $snapname eq 'current'; + + my $realcmd = sub { + PVE::Cluster::log_msg('info', $authuser, "snapshot VM $vmid: $snapname"); + PVE::QemuServer::snapshot_create($vmid, $snapname, $param->{vmstate}, + $param->{freezefs}, $param->{description}); + }; + + return $rpcenv->fork_worker('qmsnapshot', $vmid, $authuser, $realcmd); + }}); + +__PACKAGE__->register_method({ + name => 'snapshot_cmd_idx', + path => '{vmid}/snapshot/{snapname}', + description => '', + method => 'GET', + permissions => { + user => 'all', + }, + parameters => { + additionalProperties => 0, + properties => { + vmid => get_standard_option('pve-vmid'), + node => get_standard_option('pve-node'), + snapname => get_standard_option('pve-snapshot-name'), + }, + }, + returns => { + type => 'array', + items => { + type => "object", + properties => {}, + }, + links => [ { rel => 'child', href => "{cmd}" } ], + }, + code => sub { + my ($param) = @_; + + my $res = []; + + push @$res, { cmd => 'rollback' }; + push @$res, { cmd => 'config' }; + + return $res; + }}); + +__PACKAGE__->register_method({ + name => 'update_snapshot_config', + path => '{vmid}/snapshot/{snapname}/config', + method => 'PUT', + protected => 1, + proxyto => 'node', + description => "Update snapshot metadata.", + permissions => { + check => ['perm', '/vms/{vmid}', [ 'VM.Snapshot' ]], + }, + parameters => { + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + vmid => get_standard_option('pve-vmid'), + snapname => get_standard_option('pve-snapshot-name'), + description => { + optional => 1, + type => 'string', + description => "A textual description or comment.", + }, + }, + }, + returns => { type => 'null' }, + code => sub { + my ($param) = @_; + + my $rpcenv = PVE::RPCEnvironment::get(); + + my $authuser = $rpcenv->get_user(); + + my $vmid = extract_param($param, 'vmid'); + + my $snapname = extract_param($param, 'snapname'); + + return undef if !defined($param->{description}); + + my $updatefn = sub { + + my $conf = PVE::QemuServer::load_config($vmid); + + PVE::QemuServer::check_lock($conf); + + my $snap = $conf->{snapshots}->{$snapname}; + + die "snapshot '$snapname' does not exist\n" if !defined($snap); + + $snap->{description} = $param->{description} if defined($param->{description}); + + PVE::QemuServer::update_config_nolock($vmid, $conf, 1); + }; + + PVE::QemuServer::lock_config($vmid, $updatefn); + + return undef; + }}); + +__PACKAGE__->register_method({ + name => 'get_snapshot_config', + path => '{vmid}/snapshot/{snapname}/config', + method => 'GET', + proxyto => 'node', + description => "Get snapshot configuration", + permissions => { + check => ['perm', '/vms/{vmid}', [ 'VM.Snapshot' ]], + }, + parameters => { + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + vmid => get_standard_option('pve-vmid'), + snapname => get_standard_option('pve-snapshot-name'), + }, + }, + returns => { type => "object" }, + code => sub { + my ($param) = @_; + + my $rpcenv = PVE::RPCEnvironment::get(); + + my $authuser = $rpcenv->get_user(); + + my $vmid = extract_param($param, 'vmid'); + + my $snapname = extract_param($param, 'snapname'); + + my $conf = PVE::QemuServer::load_config($vmid); + + my $snap = $conf->{snapshots}->{$snapname}; + + die "snapshot '$snapname' does not exist\n" if !defined($snap); + + return $snap; + }}); + +__PACKAGE__->register_method({ + name => 'rollback', + path => '{vmid}/snapshot/{snapname}/rollback', + method => 'POST', + protected => 1, + proxyto => 'node', + description => "Rollback VM state to specified snapshot.", + permissions => { + check => ['perm', '/vms/{vmid}', [ 'VM.Snapshot' ]], + }, + parameters => { + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + vmid => get_standard_option('pve-vmid'), + snapname => get_standard_option('pve-snapshot-name'), + }, + }, + returns => { + type => 'string', + description => "the task ID.", + }, + code => sub { + my ($param) = @_; + + my $rpcenv = PVE::RPCEnvironment::get(); + + my $authuser = $rpcenv->get_user(); + + my $node = extract_param($param, 'node'); + + my $vmid = extract_param($param, 'vmid'); + + my $snapname = extract_param($param, 'snapname'); + + my $realcmd = sub { + PVE::Cluster::log_msg('info', $authuser, "rollback snapshot VM $vmid: $snapname"); + PVE::QemuServer::snapshot_rollback($vmid, $snapname); + }; + + return $rpcenv->fork_worker('qmrollback', $vmid, $authuser, $realcmd); + }}); + +__PACKAGE__->register_method({ + name => 'delsnapshot', + path => '{vmid}/snapshot/{snapname}', + method => 'DELETE', + protected => 1, + proxyto => 'node', + description => "Delete a VM snapshot.", + permissions => { + check => ['perm', '/vms/{vmid}', [ 'VM.Snapshot' ]], + }, + parameters => { + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + vmid => get_standard_option('pve-vmid'), + snapname => get_standard_option('pve-snapshot-name'), + force => { + optional => 1, + type => 'boolean', + description => "For removal from config file, even if removing disk snapshots fails.", + }, + }, + }, + returns => { + type => 'string', + description => "the task ID.", + }, + code => sub { + my ($param) = @_; + + my $rpcenv = PVE::RPCEnvironment::get(); + + my $authuser = $rpcenv->get_user(); + + my $node = extract_param($param, 'node'); + + my $vmid = extract_param($param, 'vmid'); + + my $snapname = extract_param($param, 'snapname'); + + my $realcmd = sub { + PVE::Cluster::log_msg('info', $authuser, "delete snapshot VM $vmid: $snapname"); + PVE::QemuServer::snapshot_delete($vmid, $snapname, $param->{force}); + }; + + return $rpcenv->fork_worker('qmdelsnapshot', $vmid, $authuser, $realcmd); + }}); + +__PACKAGE__->register_method({ + name => 'template', + path => '{vmid}/template', + method => 'POST', + protected => 1, + proxyto => 'node', + description => "Create a Template.", + permissions => { + description => "You need 'VM.Allocate' permissions on /vms/{vmid}", + check => [ 'perm', '/vms/{vmid}', ['VM.Allocate']], + }, + parameters => { + additionalProperties => 0, + properties => { + node => get_standard_option('pve-node'), + vmid => get_standard_option('pve-vmid'), + disk => { + optional => 1, + type => 'string', + description => "If you want to convert only 1 disk to base image.", + enum => [PVE::QemuServer::disknames()], + }, + + }, + }, + returns => { type => 'null'}, + code => sub { + my ($param) = @_; + + my $rpcenv = PVE::RPCEnvironment::get(); + + my $authuser = $rpcenv->get_user(); + + my $node = extract_param($param, 'node'); + + my $vmid = extract_param($param, 'vmid'); + + my $disk = extract_param($param, 'disk'); + + my $updatefn = sub { + + my $conf = PVE::QemuServer::load_config($vmid); + + PVE::QemuServer::check_lock($conf); + + die "unable to create template, because VM contains snapshots\n" + if $conf->{snapshots} && scalar(keys %{$conf->{snapshots}}); + + die "you can't convert a template to a template\n" + if PVE::QemuServer::is_template($conf) && !$disk; + + die "you can't convert a VM to template if VM is running\n" + if PVE::QemuServer::check_running($vmid); + + my $realcmd = sub { + PVE::QemuServer::template_create($vmid, $conf, $disk); + }; + + $conf->{template} = 1; + PVE::QemuServer::update_config_nolock($vmid, $conf, 1); + + return $rpcenv->fork_worker('qmtemplate', $vmid, $authuser, $realcmd); + }; + + PVE::QemuServer::lock_config($vmid, $updatefn); + return undef; }}); 1;