use warnings;
use Cwd 'abs_path';
use Net::SSLeay;
-use UUID;
use POSIX;
use IO::Socket::IP;
+use URI::Escape;
+use Crypt::OpenSSL::Random;
use PVE::Cluster qw (cfs_read_file cfs_write_file);;
+use PVE::RRD;
use PVE::SafeSyslog;
use PVE::Tools qw(extract_param);
use PVE::Exception qw(raise raise_param_exc raise_perm_exc);
use PVE::GuestHelpers;
use PVE::QemuConfig;
use PVE::QemuServer;
+use PVE::QemuServer::Drive;
+use PVE::QemuServer::CPUConfig;
+use PVE::QemuServer::Monitor qw(mon_cmd);
use PVE::QemuMigrate;
use PVE::RPCEnvironment;
use PVE::AccessControl;
use PVE::Network;
use PVE::Firewall;
use PVE::API2::Firewall::VM;
+use PVE::API2::Qemu::Agent;
+use PVE::VZDump::Plugin;
+use PVE::DataCenterConfig;
+use PVE::SSHInfo;
BEGIN {
if (!$ENV{PVE_GENERATING_DOCS}) {
}
};
+my $NEW_DISK_RE = qr!^(([^/:\s]+):)?(\d+(\.\d+)?)$!;
my $check_storage_access = sub {
my ($rpcenv, $authuser, $storecfg, $vmid, $settings, $default_storage) = @_;
- PVE::QemuServer::foreach_drive($settings, sub {
+ PVE::QemuConfig->foreach_volume($settings, sub {
my ($ds, $drive) = @_;
my $isCDROM = PVE::QemuServer::drive_is_cdrom($drive);
my $volid = $drive->{file};
+ my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
- if (!$volid || $volid eq 'none') {
+ if (!$volid || ($volid eq 'none' || $volid eq 'cloudinit' || (defined($volname) && $volname eq 'cloudinit'))) {
# nothing to check
} elsif ($isCDROM && ($volid eq 'cdrom')) {
$rpcenv->check($authuser, "/", ['Sys.Console']);
- } elsif (!$isCDROM && ($volid =~ m/^(([^:\s]+):)?(\d+(\.\d+)?)$/)) {
+ } elsif (!$isCDROM && ($volid =~ $NEW_DISK_RE)) {
my ($storeid, $size) = ($2 || $default_storage, $3);
die "no storage ID specified (and no default storage)\n" if !$storeid;
$rpcenv->check($authuser, "/storage/$storeid", ['Datastore.AllocateSpace']);
+ my $scfg = PVE::Storage::storage_config($storecfg, $storeid);
+ raise_param_exc({ storage => "storage '$storeid' does not support vm images"})
+ if !$scfg->{content}->{images};
} else {
PVE::Storage::check_volume_access($rpcenv, $authuser, $storecfg, $vmid, $volid);
}
});
+
+ $rpcenv->check($authuser, "/storage/$settings->{vmstatestorage}", ['Datastore.AllocateSpace'])
+ if defined($settings->{vmstatestorage});
};
my $check_storage_access_clone = sub {
my $sharedvm = 1;
- PVE::QemuServer::foreach_drive($conf, sub {
+ PVE::QemuConfig->foreach_volume($conf, sub {
my ($ds, $drive) = @_;
my $isCDROM = PVE::QemuServer::drive_is_cdrom($drive);
}
});
+ $rpcenv->check($authuser, "/storage/$conf->{vmstatestorage}", ['Datastore.AllocateSpace'])
+ if defined($conf->{vmstatestorage});
+
return $sharedvm;
};
# Note: $pool is only needed when creating a VM, because pool permissions
# are automatically inherited if VM already exists inside a pool.
-my $NEW_DISK_RE = qr!^(([^/:\s]+):)?(\d+(\.\d+)?)$!;
my $create_disks = sub {
- my ($rpcenv, $authuser, $conf, $storecfg, $vmid, $pool, $settings, $default_storage) = @_;
+ my ($rpcenv, $authuser, $conf, $arch, $storecfg, $vmid, $pool, $settings, $default_storage) = @_;
my $vollist = [];
my ($ds, $disk) = @_;
my $volid = $disk->{file};
+ my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
if (!$volid || $volid eq 'none' || $volid eq 'cdrom') {
delete $disk->{size};
- $res->{$ds} = PVE::QemuServer::print_drive($vmid, $disk);
+ $res->{$ds} = PVE::QemuServer::print_drive($disk);
+ } elsif (defined($volname) && $volname eq 'cloudinit') {
+ $storeid = $storeid // $default_storage;
+ die "no storage ID specified (and no default storage)\n" if !$storeid;
+ my $scfg = PVE::Storage::storage_config($storecfg, $storeid);
+ my $name = "vm-$vmid-cloudinit";
+
+ my $fmt = undef;
+ if ($scfg->{path}) {
+ $fmt = $disk->{format} // "qcow2";
+ $name .= ".$fmt";
+ } else {
+ $fmt = $disk->{format} // "raw";
+ }
+
+ # Initial disk created with 4 MB and aligned to 4MB on regeneration
+ my $ci_size = PVE::QemuServer::Cloudinit::CLOUDINIT_DISK_SIZE;
+ my $volid = PVE::Storage::vdisk_alloc($storecfg, $storeid, $vmid, $fmt, $name, $ci_size/1024);
+ $disk->{file} = $volid;
+ $disk->{media} = 'cdrom';
+ push @$vollist, $volid;
+ delete $disk->{format}; # no longer needed
+ $res->{$ds} = PVE::QemuServer::print_drive($disk);
} elsif ($volid =~ $NEW_DISK_RE) {
my ($storeid, $size) = ($2 || $default_storage, $3);
die "no storage ID specified (and no default storage)\n" if !$storeid;
my $defformat = PVE::Storage::storage_default_format($storecfg, $storeid);
my $fmt = $disk->{format} || $defformat;
+ $size = PVE::Tools::convert_size($size, 'gb' => 'kb'); # vdisk_alloc uses kb
+
my $volid;
if ($ds eq 'efidisk0') {
- # handle efidisk
- my $ovmfvars = '/usr/share/kvm/OVMF_VARS-pure-efi.fd';
- die "uefi vars image not found\n" if ! -f $ovmfvars;
- $volid = PVE::Storage::vdisk_alloc($storecfg, $storeid, $vmid,
- $fmt, undef, 128);
- $disk->{file} = $volid;
- $disk->{size} = 128*1024;
- my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid);
- my $scfg = PVE::Storage::storage_config($storecfg, $storeid);
- my $qemufmt = PVE::QemuServer::qemu_img_format($scfg, $volname);
- my $path = PVE::Storage::path($storecfg, $volid);
- my $efidiskcmd = ['/usr/bin/qemu-img', 'convert', '-n', '-f', 'raw', '-O', $qemufmt];
- push @$efidiskcmd, $ovmfvars;
- push @$efidiskcmd, $path;
-
- PVE::Storage::activate_volumes($storecfg, [$volid]);
-
- eval { PVE::Tools::run_command($efidiskcmd); };
- my $err = $@;
- die "Copying of EFI Vars image failed: $err" if $err;
+ ($volid, $size) = PVE::QemuServer::create_efidisk($storecfg, $storeid, $vmid, $fmt, $arch);
} else {
- $volid = PVE::Storage::vdisk_alloc($storecfg, $storeid, $vmid,
- $fmt, undef, $size*1024*1024);
- $disk->{file} = $volid;
- $disk->{size} = $size*1024*1024*1024;
+ $volid = PVE::Storage::vdisk_alloc($storecfg, $storeid, $vmid, $fmt, undef, $size);
}
push @$vollist, $volid;
+ $disk->{file} = $volid;
+ $disk->{size} = PVE::Tools::convert_size($size, 'kb' => 'b');
delete $disk->{format}; # no longer needed
- $res->{$ds} = PVE::QemuServer::print_drive($vmid, $disk);
+ $res->{$ds} = PVE::QemuServer::print_drive($disk);
} else {
PVE::Storage::check_volume_access($rpcenv, $authuser, $storecfg, $vmid, $volid);
if ($volid_is_new) {
- my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
-
PVE::Storage::activate_volumes($storecfg, [ $volid ]) if $storeid;
my $size = PVE::Storage::volume_size_info($storecfg, $volid);
- die "volume $volid does not exists\n" if !$size;
+ die "volume $volid does not exist\n" if !$size;
$disk->{size} = $size;
}
- $res->{$ds} = PVE::QemuServer::print_drive($vmid, $disk);
+ $res->{$ds} = PVE::QemuServer::print_drive($disk);
}
};
- eval { PVE::QemuServer::foreach_drive($settings, $code); };
+ eval { PVE::QemuConfig->foreach_volume($settings, $code); };
# free allocated images on error
if (my $err = $@) {
return $vollist;
};
+my $check_cpu_model_access = sub {
+ my ($rpcenv, $authuser, $new, $existing) = @_;
+
+ return if !defined($new->{cpu});
+
+ my $cpu = PVE::JSONSchema::check_format('pve-vm-cpu-conf', $new->{cpu});
+ return if !$cpu || !$cpu->{cputype}; # always allow default
+ my $cputype = $cpu->{cputype};
+
+ if ($existing && $existing->{cpu}) {
+ # changing only other settings doesn't require permissions for CPU model
+ my $existingCpu = PVE::JSONSchema::check_format('pve-vm-cpu-conf', $existing->{cpu});
+ return if $existingCpu->{cputype} eq $cputype;
+ }
+
+ if (PVE::QemuServer::CPUConfig::is_custom_model($cputype)) {
+ $rpcenv->check($authuser, "/nodes", ['Sys.Audit']);
+ }
+};
+
my $cpuoptions = {
'cores' => 1,
'cpu' => 1,
'tablet' => 1,
'vga' => 1,
'watchdog' => 1,
+ 'audio0' => 1,
};
my $generaloptions = {
'startup' => 1,
'tdf' => 1,
'template' => 1,
+ 'tags' => 1,
};
my $vmpoweroptions = {
my $diskoptions = {
'boot' => 1,
'bootdisk' => 1,
+ 'vmstatestorage' => 1,
+};
+
+my $cloudinitoptions = {
+ cicustom => 1,
+ cipassword => 1,
+ citype => 1,
+ ciuser => 1,
+ nameserver => 1,
+ searchdomain => 1,
+ sshkeys => 1,
};
my $check_vm_modify_config_perm = sub {
return 1 if $authuser eq 'root@pam';
foreach my $opt (@$key_list) {
- # disk checks need to be done somewhere else
+ # some checks (e.g., disk, serial port, usb) need to be done somewhere
+ # else, as there the permission can be value dependend
next if PVE::QemuServer::is_valid_drivename($opt);
next if $opt eq 'cdrom';
- next if $opt =~ m/^unused\d+$/;
+ next if $opt =~ m/^(?:unused|serial|usb)\d+$/;
+
if ($cpuoptions->{$opt} || $opt =~ m/^numa\d+$/) {
$rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.CPU']);
$rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.PowerMgmt']);
} elsif ($diskoptions->{$opt}) {
$rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.Disk']);
- } elsif ($opt =~ m/^net\d+$/) {
+ } elsif ($cloudinitoptions->{$opt} || ($opt =~ m/^(?:net|ipconfig)\d+$/)) {
$rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.Network']);
+ } elsif ($opt eq 'vmstate') {
+ # the user needs Disk and PowerMgmt privileges to change the vmstate
+ # also needs privileges on the storage, that will be checked later
+ $rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.Disk', 'VM.PowerMgmt' ]);
} else {
- # catches usb\d+, hostpci\d+, args, lock, etc.
+ # catches hostpci\d+, args, lock, etc.
# new options will be checked here
die "only root can set '$opt' config\n";
}
type => 'array',
items => {
type => "object",
- properties => {},
+ properties => $PVE::QemuServer::vmstatus_return_properties,
},
links => [ { rel => 'child', href => "{vmid}" } ],
},
next if !$rpcenv->check($authuser, "/vms/$vmid", [ 'VM.Audit' ], 1);
my $data = $vmstatus->{$vmid};
- $data->{vmid} = int($vmid);
push @$res, $data;
}
return $res;
}});
+my $parse_restore_archive = sub {
+ my ($storecfg, $archive) = @_;
+
+ my ($archive_storeid, $archive_volname) = PVE::Storage::parse_volume_id($archive, 1);
+
+ if (defined($archive_storeid)) {
+ my $scfg = PVE::Storage::storage_config($storecfg, $archive_storeid);
+ if ($scfg->{type} eq 'pbs') {
+ return {
+ type => 'pbs',
+ volid => $archive,
+ };
+ }
+ }
+ my $path = PVE::Storage::abs_filesystem_path($storecfg, $archive);
+ return {
+ type => 'file',
+ path => $path,
+ };
+};
__PACKAGE__->register_method({
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid', { completion => \&PVE::Cluster::complete_next_vmid }),
archive => {
- description => "The backup file.",
+ description => "The backup archive. Either the file system path to a .tar or .vma file (use '-' to pipe data from stdin) or a proxmox storage backup volume identifier.",
type => 'string',
optional => 1,
maxLength => 255,
type => 'string', format => 'pve-poolid',
description => "Add the VM to the specified pool.",
},
+ bwlimit => {
+ description => "Override I/O bandwidth limit (in KiB/s).",
+ optional => 1,
+ type => 'integer',
+ minimum => '0',
+ default => 'restore limit from datacenter or storage config',
+ },
+ start => {
+ optional => 1,
+ type => 'boolean',
+ default => 0,
+ description => "Start VM after it was created successfully.",
+ },
}),
},
returns => {
my ($param) = @_;
my $rpcenv = PVE::RPCEnvironment::get();
-
my $authuser = $rpcenv->get_user();
my $node = extract_param($param, 'node');
-
my $vmid = extract_param($param, 'vmid');
my $archive = extract_param($param, 'archive');
+ my $is_restore = !!$archive;
- my $storage = extract_param($param, 'storage');
-
+ my $bwlimit = extract_param($param, 'bwlimit');
my $force = extract_param($param, 'force');
-
+ my $pool = extract_param($param, 'pool');
+ my $start_after_create = extract_param($param, 'start');
+ my $storage = extract_param($param, 'storage');
my $unique = extract_param($param, 'unique');
- my $pool = extract_param($param, 'pool');
+ if (defined(my $ssh_keys = $param->{sshkeys})) {
+ $ssh_keys = URI::Escape::uri_unescape($ssh_keys);
+ PVE::Tools::validate_ssh_public_keys($ssh_keys);
+ }
- my $filename = PVE::QemuConfig->config_file($vmid);
+ PVE::Cluster::check_cfs_quorum();
+ my $filename = PVE::QemuConfig->config_file($vmid);
my $storecfg = PVE::Storage::config();
- PVE::Cluster::check_cfs_quorum();
-
if (defined($pool)) {
$rpcenv->check_pool_exist($pool);
}
&$check_vm_modify_config_perm($rpcenv, $authuser, $vmid, $pool, [ keys %$param]);
+ &$check_cpu_model_access($rpcenv, $authuser, $param);
+
foreach my $opt (keys %$param) {
if (PVE::QemuServer::is_valid_drivename($opt)) {
my $drive = PVE::QemuServer::parse_drive($opt, $param->{$opt});
raise_param_exc({ $opt => "unable to parse drive options" }) if !$drive;
PVE::QemuServer::cleanup_drive_path($opt, $storecfg, $drive);
- $param->{$opt} = PVE::QemuServer::print_drive($vmid, $drive);
+ $param->{$opt} = PVE::QemuServer::print_drive($drive);
}
}
if ($archive eq '-') {
die "pipe requires cli environment\n"
if $rpcenv->{type} ne 'cli';
+ $archive = { type => 'pipe' };
} else {
PVE::Storage::check_volume_access($rpcenv, $authuser, $storecfg, $vmid, $archive);
- $archive = PVE::Storage::abs_filesystem_path($storecfg, $archive);
+
+ $archive = $parse_restore_archive->($storecfg, $archive);
}
}
- my $restorefn = sub {
- my $vmlist = PVE::Cluster::get_vmlist();
- if ($vmlist->{ids}->{$vmid}) {
- my $current_node = $vmlist->{ids}->{$vmid}->{node};
- if ($current_node eq $node) {
- my $conf = PVE::QemuConfig->load_config($vmid);
-
- PVE::QemuConfig->check_protection($conf, "unable to restore VM $vmid");
+ my $emsg = $is_restore ? "unable to restore VM $vmid -" : "unable to create VM $vmid -";
- die "unable to restore vm $vmid - config file already exists\n"
- if !$force;
+ eval { PVE::QemuConfig->create_and_lock_config($vmid, $force) };
+ die "$emsg $@" if $@;
- die "unable to restore vm $vmid - vm is running\n"
- if PVE::QemuServer::check_running($vmid);
+ my $restorefn = sub {
+ my $conf = PVE::QemuConfig->load_config($vmid);
- die "unable to restore vm $vmid - vm is a template\n"
- if PVE::QemuConfig->is_template($conf);
+ PVE::QemuConfig->check_protection($conf, $emsg);
- } else {
- die "unable to restore vm $vmid - already existing on cluster node '$current_node'\n";
- }
- }
+ die "$emsg vm is running\n" if PVE::QemuServer::check_running($vmid);
my $realcmd = sub {
- PVE::QemuServer::restore_archive($archive, $vmid, $authuser, {
+ my $restore_options = {
storage => $storage,
pool => $pool,
- unique => $unique });
+ unique => $unique,
+ bwlimit => $bwlimit,
+ };
+ if ($archive->{type} eq 'file' || $archive->{type} eq 'pipe') {
+ PVE::QemuServer::restore_file_archive($archive->{path} // '-', $vmid, $authuser, $restore_options);
+ } elsif ($archive->{type} eq 'pbs') {
+ PVE::QemuServer::restore_proxmox_backup_archive($archive->{volid}, $vmid, $authuser, $restore_options);
+ } else {
+ die "unknown backup archive type\n";
+ }
+ my $restored_conf = PVE::QemuConfig->load_config($vmid);
+ # Convert restored VM to template if backup was VM template
+ if (PVE::QemuConfig->is_template($restored_conf)) {
+ warn "Convert to template.\n";
+ eval { PVE::QemuServer::template_create($vmid, $restored_conf) };
+ warn $@ if $@;
+ }
PVE::AccessControl::add_vm_to_pool($vmid, $pool) if $pool;
};
# ensure no old replication state are exists
PVE::ReplicationState::delete_guest_states($vmid);
- return $rpcenv->fork_worker('qmrestore', $vmid, $authuser, $realcmd);
+ PVE::QemuConfig->lock_config_full($vmid, 1, $realcmd);
+
+ if ($start_after_create) {
+ print "Execute autostart\n";
+ eval { PVE::API2::Qemu->vm_start({ vmid => $vmid, node => $node }) };
+ warn $@ if $@;
+ }
};
my $createfn = sub {
-
- # test after locking
- PVE::Cluster::check_vmid_unused($vmid);
-
# ensure no old replication state are exists
PVE::ReplicationState::delete_guest_states($vmid);
my $realcmd = sub {
-
- my $vollist = [];
-
my $conf = $param;
+ my $arch = PVE::QemuServer::get_vm_arch($conf);
+ my $vollist = [];
eval {
+ $vollist = &$create_disks($rpcenv, $authuser, $conf, $arch, $storecfg, $vmid, $pool, $param, $storage);
- $vollist = &$create_disks($rpcenv, $authuser, $conf, $storecfg, $vmid, $pool, $param, $storage);
-
- # try to be smart about bootdisk
- my @disks = PVE::QemuServer::valid_drive_names();
- my $firstdisk;
- foreach my $ds (reverse @disks) {
- next if !$conf->{$ds};
- my $disk = PVE::QemuServer::parse_drive($ds, $conf->{$ds});
- next if PVE::QemuServer::drive_is_cdrom($disk);
- $firstdisk = $ds;
- }
-
- if (!$conf->{bootdisk} && $firstdisk) {
- $conf->{bootdisk} = $firstdisk;
+ if (!$conf->{bootdisk}) {
+ my $firstdisk = PVE::QemuServer::Drive::resolve_first_disk($conf);
+ $conf->{bootdisk} = $firstdisk if $firstdisk;
}
# auto generate uuid if user did not specify smbios1 option
if (!$conf->{smbios1}) {
- my ($uuid, $uuid_str);
- UUID::generate($uuid);
- UUID::unparse($uuid, $uuid_str);
- $conf->{smbios1} = "uuid=$uuid_str";
+ $conf->{smbios1} = PVE::QemuServer::generate_smbios1_uuid();
+ }
+
+ if ((!defined($conf->{vmgenid}) || $conf->{vmgenid} eq '1') && $arch ne 'aarch64') {
+ $conf->{vmgenid} = PVE::QemuServer::generate_uuid();
}
PVE::QemuConfig->write_config($vmid, $conf);
eval { PVE::Storage::vdisk_free($storecfg, $volid); };
warn $@ if $@;
}
- die "create failed - $err";
+ die "$emsg $err";
}
PVE::AccessControl::add_vm_to_pool($vmid, $pool) if $pool;
};
- return $rpcenv->fork_worker('qmcreate', $vmid, $authuser, $realcmd);
+ PVE::QemuConfig->lock_config_full($vmid, 1, $realcmd);
+
+ if ($start_after_create) {
+ print "Execute autostart\n";
+ eval { PVE::API2::Qemu->vm_start({vmid => $vmid, node => $node}) };
+ warn $@ if $@;
+ }
};
- return PVE::QemuConfig->lock_config_full($vmid, 1, $archive ? $restorefn : $createfn);
+ my ($code, $worker_name);
+ if ($is_restore) {
+ $worker_name = 'qmrestore';
+ $code = sub {
+ eval { $restorefn->() };
+ if (my $err = $@) {
+ eval { PVE::QemuConfig->remove_lock($vmid, 'create') };
+ warn $@ if $@;
+ die $err;
+ }
+ };
+ } else {
+ $worker_name = 'qmcreate';
+ $code = sub {
+ eval { $createfn->() };
+ if (my $err = $@) {
+ eval {
+ my $conffile = PVE::QemuConfig->config_file($vmid);
+ unlink($conffile) or die "failed to remove config file: $!\n";
+ };
+ warn $@ if $@;
+ die $err;
+ }
+ };
+ }
+
+ return $rpcenv->fork_worker($worker_name, $vmid, $authuser, $code);
}});
__PACKAGE__->register_method({
{ subdir => 'status' },
{ subdir => 'unlink' },
{ subdir => 'vncproxy' },
+ { subdir => 'termproxy' },
{ subdir => 'migrate' },
{ subdir => 'resize' },
{ subdir => 'move' },
path => '{vmid}/firewall',
});
+__PACKAGE__->register_method ({
+ subclass => "PVE::API2::Qemu::Agent",
+ path => '{vmid}/agent',
+});
+
__PACKAGE__->register_method({
name => 'rrd',
path => '{vmid}/rrd',
code => sub {
my ($param) = @_;
- return PVE::Cluster::create_rrd_graph(
+ return PVE::RRD::create_rrd_graph(
"pve2-vm/$param->{vmid}", $param->{timeframe},
$param->{ds}, $param->{cf});
code => sub {
my ($param) = @_;
- return PVE::Cluster::create_rrd_data(
+ return PVE::RRD::create_rrd_data(
"pve2-vm/$param->{vmid}", $param->{timeframe}, $param->{cf});
}});
path => '{vmid}/config',
method => 'GET',
proxyto => 'node',
- description => "Get current virtual machine configuration. This does not include pending configuration changes (see 'pending' API).",
+ description => "Get the virtual machine configuration with pending configuration " .
+ "changes applied. Set the 'current' parameter to get the current configuration instead.",
permissions => {
check => ['perm', '/vms/{vmid}', [ 'VM.Audit' ]],
},
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
- current => {
- description => "Get current values (instead of pending values).",
- optional => 1,
+ current => {
+ description => "Get current values (instead of pending values).",
+ optional => 1,
default => 0,
type => 'boolean',
- },
+ },
+ snapshot => get_standard_option('pve-snapshot-name', {
+ description => "Fetch config values from given snapshot.",
+ optional => 1,
+ completion => sub {
+ my ($cmd, $pname, $cur, $args) = @_;
+ PVE::QemuConfig->snapshot_list($args->[0]);
+ },
+ }),
},
},
returns => {
+ description => "The VM configuration.",
type => "object",
- properties => {
+ properties => PVE::QemuServer::json_config_properties({
digest => {
type => 'string',
description => 'SHA1 digest of configuration file. This can be used to prevent concurrent modifications.',
}
- },
+ }),
},
code => sub {
my ($param) = @_;
- my $conf = PVE::QemuConfig->load_config($param->{vmid});
-
- delete $conf->{snapshots};
+ raise_param_exc({ snapshot => "cannot use 'snapshot' parameter with 'current'",
+ current => "cannot use 'snapshot' parameter with 'current'"})
+ if ($param->{snapshot} && $param->{current});
- if (!$param->{current}) {
- foreach my $opt (keys %{$conf->{pending}}) {
- next if $opt eq 'delete';
- my $value = $conf->{pending}->{$opt};
- next if ref($value); # just to be sure
- $conf->{$opt} = $value;
- }
- my $pending_delete_hash = PVE::QemuServer::split_flagged_list($conf->{pending}->{delete});
- foreach my $opt (keys %$pending_delete_hash) {
- delete $conf->{$opt} if $conf->{$opt};
- }
+ my $conf;
+ if ($param->{snapshot}) {
+ $conf = PVE::QemuConfig->load_snapshot_config($param->{vmid}, $param->{snapshot});
+ } else {
+ $conf = PVE::QemuConfig->load_current_config($param->{vmid}, $param->{current});
}
-
- delete $conf->{pending};
-
+ $conf->{cipassword} = '**********' if $conf->{cipassword};
return $conf;
+
}});
__PACKAGE__->register_method({
path => '{vmid}/pending',
method => 'GET',
proxyto => 'node',
- description => "Get virtual machine configuration, including pending changes.",
+ description => "Get the virtual machine configuration with both current and pending values.",
permissions => {
check => ['perm', '/vms/{vmid}', [ 'VM.Audit' ]],
},
my $conf = PVE::QemuConfig->load_config($param->{vmid});
- my $pending_delete_hash = PVE::QemuServer::split_flagged_list($conf->{pending}->{delete});
-
- my $res = [];
-
- foreach my $opt (keys %$conf) {
- next if ref($conf->{$opt});
- my $item = { key => $opt };
- $item->{value} = $conf->{$opt} if defined($conf->{$opt});
- $item->{pending} = $conf->{pending}->{$opt} if defined($conf->{pending}->{$opt});
- $item->{delete} = ($pending_delete_hash->{$opt} ? 2 : 1) if exists $pending_delete_hash->{$opt};
- push @$res, $item;
- }
-
- foreach my $opt (keys %{$conf->{pending}}) {
- next if $opt eq 'delete';
- next if ref($conf->{pending}->{$opt}); # just to be sure
- next if defined($conf->{$opt});
- my $item = { key => $opt };
- $item->{pending} = $conf->{pending}->{$opt};
- push @$res, $item;
- }
+ my $pending_delete_hash = PVE::QemuConfig->parse_pending_delete($conf->{pending}->{delete});
- while (my ($opt, $force) = each %$pending_delete_hash) {
- next if $conf->{pending}->{$opt}; # just to be sure
- next if $conf->{$opt};
- my $item = { key => $opt, delete => ($force ? 2 : 1)};
- push @$res, $item;
- }
+ $conf->{cipassword} = '**********' if defined($conf->{cipassword});
+ $conf->{pending}->{cipassword} = '********** ' if defined($conf->{pending}->{cipassword});
- return $res;
- }});
+ return PVE::GuestHelpers::config_with_pending_array($conf, $pending_delete_hash);
+ }});
# POST/PUT {vmid}/config implementation
#
my $background_delay = extract_param($param, 'background_delay');
+ if (defined(my $cipassword = $param->{cipassword})) {
+ # Same logic as in cloud-init (but with the regex fixed...)
+ $param->{cipassword} = PVE::Tools::encrypt_pw($cipassword)
+ if $cipassword !~ /^\$(?:[156]|2[ay])(\$.+){2}/;
+ }
+
my @paramarr = (); # used for log message
- foreach my $key (keys %$param) {
- push @paramarr, "-$key", $param->{$key};
+ foreach my $key (sort keys %$param) {
+ my $value = $key eq 'cipassword' ? '<hidden>' : $param->{$key};
+ push @paramarr, "-$key", $value;
}
my $skiplock = extract_param($param, 'skiplock');
my $force = extract_param($param, 'force');
+ if (defined(my $ssh_keys = $param->{sshkeys})) {
+ $ssh_keys = URI::Escape::uri_unescape($ssh_keys);
+ PVE::Tools::validate_ssh_public_keys($ssh_keys);
+ }
+
die "no options specified\n" if !$delete_str && !$revert_str && !scalar(keys %$param);
my $storecfg = PVE::Storage::config();
my $volid = $drive->{file};
return if !$volid || !($drive->{replicate}//1);
return if PVE::QemuServer::drive_is_cdrom($drive);
- my ($storeid, $format);
+
+ my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
+ die "cannot add non-managed/pass-through volume to a replicated VM\n"
+ if !defined($storeid);
+
+ return if defined($volname) && $volname eq 'cloudinit';
+
+ my $format;
if ($volid =~ $NEW_DISK_RE) {
$storeid = $2;
$format = $drive->{format} || PVE::Storage::storage_default_format($storecfg, $storeid);
} else {
- ($storeid, undef) = PVE::Storage::parse_volume_id($volid, 1);
$format = (PVE::Storage::parse_volname($storecfg, $volid))[6];
}
return if PVE::Storage::storage_can_replicate($storecfg, $storeid, $format);
raise_param_exc({ $opt => "unable to parse drive options" }) if !$drive;
PVE::QemuServer::cleanup_drive_path($opt, $storecfg, $drive);
$check_replication->($drive);
- $param->{$opt} = PVE::QemuServer::print_drive($vmid, $drive);
+ $param->{$opt} = PVE::QemuServer::print_drive($drive);
} elsif ($opt =~ m/^net(\d+)$/) {
# add macaddr
my $net = PVE::QemuServer::parse_net($param->{$opt});
$param->{$opt} = PVE::QemuServer::print_net($net);
+ } elsif ($opt eq 'vmgenid') {
+ if ($param->{$opt} eq '1') {
+ $param->{$opt} = PVE::QemuServer::generate_uuid();
+ }
+ } elsif ($opt eq 'hookscript') {
+ eval { PVE::GuestHelpers::check_hookscript($param->{$opt}, $storecfg); };
+ raise_param_exc({ $opt => $@ }) if $@;
}
}
die "checksum missmatch (file change by other user?)\n"
if $digest && $digest ne $conf->{digest};
+ &$check_cpu_model_access($rpcenv, $authuser, $param, $conf);
+
+ # FIXME: 'suspended' lock should probabyl be a state or "weak" lock?!
+ if (scalar(@delete) && grep { $_ eq 'vmstate'} @delete) {
+ if (defined($conf->{lock}) && $conf->{lock} eq 'suspended') {
+ delete $conf->{lock}; # for check lock check, not written out
+ push @delete, 'lock'; # this is the real deal to write it out
+ }
+ push @delete, 'runningmachine' if $conf->{runningmachine};
+ push @delete, 'runningcpu' if $conf->{runningcpu};
+ }
+
PVE::QemuConfig->check_lock($conf) if !$skiplock;
foreach my $opt (keys %$revert) {
foreach my $opt (@delete) {
$modified->{$opt} = 1;
$conf = PVE::QemuConfig->load_config($vmid); # update/reload
- if (!defined($conf->{$opt}) && !defined($conf->{pending}->{$opt})) {
+
+ # value of what we want to delete, independent if pending or not
+ my $val = $conf->{$opt} // $conf->{pending}->{$opt};
+ if (!defined($val)) {
warn "cannot delete '$opt' - not set in current configuration!\n";
$modified->{$opt} = 0;
next;
}
+ my $is_pending_val = defined($conf->{pending}->{$opt});
+ delete $conf->{pending}->{$opt};
if ($opt =~ m/^unused/) {
- my $drive = PVE::QemuServer::parse_drive($opt, $conf->{$opt});
+ my $drive = PVE::QemuServer::parse_drive($opt, $val);
PVE::QemuConfig->check_protection($conf, "can't remove unused disk '$drive->{file}'");
$rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.Disk']);
if (PVE::QemuServer::try_deallocate_drive($storecfg, $vmid, $conf, $opt, $drive, $rpcenv, $authuser)) {
delete $conf->{$opt};
PVE::QemuConfig->write_config($vmid, $conf);
}
+ } elsif ($opt eq 'vmstate') {
+ PVE::QemuConfig->check_protection($conf, "can't remove vmstate '$val'");
+ if (PVE::QemuServer::try_deallocate_drive($storecfg, $vmid, $conf, $opt, { file => $val }, $rpcenv, $authuser, 1)) {
+ delete $conf->{$opt};
+ PVE::QemuConfig->write_config($vmid, $conf);
+ }
} elsif (PVE::QemuServer::is_valid_drivename($opt)) {
PVE::QemuConfig->check_protection($conf, "can't remove drive '$opt'");
$rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.Disk']);
- PVE::QemuServer::vmconfig_register_unused_drive($storecfg, $vmid, $conf, PVE::QemuServer::parse_drive($opt, $conf->{pending}->{$opt}))
- if defined($conf->{pending}->{$opt});
- PVE::QemuServer::vmconfig_delete_pending_option($conf, $opt, $force);
+ PVE::QemuServer::vmconfig_register_unused_drive($storecfg, $vmid, $conf, PVE::QemuServer::parse_drive($opt, $val))
+ if $is_pending_val;
+ PVE::QemuConfig->add_to_pending_delete($conf, $opt, $force);
+ PVE::QemuConfig->write_config($vmid, $conf);
+ } elsif ($opt =~ m/^serial\d+$/) {
+ if ($val eq 'socket') {
+ $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.HWType']);
+ } elsif ($authuser ne 'root@pam') {
+ die "only root can delete '$opt' config for real devices\n";
+ }
+ PVE::QemuConfig->add_to_pending_delete($conf, $opt, $force);
+ PVE::QemuConfig->write_config($vmid, $conf);
+ } elsif ($opt =~ m/^usb\d+$/) {
+ if ($val =~ m/spice/) {
+ $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.HWType']);
+ } elsif ($authuser ne 'root@pam') {
+ die "only root can delete '$opt' config for real devices\n";
+ }
+ PVE::QemuConfig->add_to_pending_delete($conf, $opt, $force);
PVE::QemuConfig->write_config($vmid, $conf);
} else {
- PVE::QemuServer::vmconfig_delete_pending_option($conf, $opt, $force);
+ PVE::QemuConfig->add_to_pending_delete($conf, $opt, $force);
PVE::QemuConfig->write_config($vmid, $conf);
}
}
$conf = PVE::QemuConfig->load_config($vmid); # update/reload
next if defined($conf->{pending}->{$opt}) && ($param->{$opt} eq $conf->{pending}->{$opt}); # skip if nothing changed
+ my $arch = PVE::QemuServer::get_vm_arch($conf);
+
if (PVE::QemuServer::is_valid_drivename($opt)) {
my $drive = PVE::QemuServer::parse_drive($opt, $param->{$opt});
+ # FIXME: cloudinit: CDROM or Disk?
if (PVE::QemuServer::drive_is_cdrom($drive)) { # CDROM
$rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.CDROM']);
} else {
PVE::QemuServer::vmconfig_register_unused_drive($storecfg, $vmid, $conf, PVE::QemuServer::parse_drive($opt, $conf->{pending}->{$opt}))
if defined($conf->{pending}->{$opt});
- &$create_disks($rpcenv, $authuser, $conf->{pending}, $storecfg, $vmid, undef, {$opt => $param->{$opt}});
+ &$create_disks($rpcenv, $authuser, $conf->{pending}, $arch, $storecfg, $vmid, undef, {$opt => $param->{$opt}});
+ } elsif ($opt =~ m/^serial\d+/) {
+ if ((!defined($conf->{$opt}) || $conf->{$opt} eq 'socket') && $param->{$opt} eq 'socket') {
+ $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.HWType']);
+ } elsif ($authuser ne 'root@pam') {
+ die "only root can modify '$opt' config for real devices\n";
+ }
+ $conf->{pending}->{$opt} = $param->{$opt};
+ } elsif ($opt =~ m/^usb\d+/) {
+ if ((!defined($conf->{$opt}) || $conf->{$opt} =~ m/spice/) && $param->{$opt} =~ m/spice/) {
+ $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.HWType']);
+ } elsif ($authuser ne 'root@pam') {
+ die "only root can modify '$opt' config for real devices\n";
+ }
+ $conf->{pending}->{$opt} = $param->{$opt};
} else {
$conf->{pending}->{$opt} = $param->{$opt};
}
- PVE::QemuServer::vmconfig_undelete_pending_option($conf, $opt);
+ PVE::QemuConfig->remove_from_pending_delete($conf, $opt);
PVE::QemuConfig->write_config($vmid, $conf);
}
# remove pending changes when nothing changed
$conf = PVE::QemuConfig->load_config($vmid); # update/reload
- my $changes = PVE::QemuServer::vmconfig_cleanup_pending($conf);
+ my $changes = PVE::QemuConfig->cleanup_pending($conf);
PVE::QemuConfig->write_config($vmid, $conf) if $changes;
return if !scalar(keys %{$conf->{pending}});
$conf = PVE::QemuConfig->load_config($vmid); # update/reload
+ my $errors = {};
if ($running) {
- my $errors = {};
PVE::QemuServer::vmconfig_hotplug_pending($vmid, $conf, $storecfg, $modified, $errors);
- raise_param_exc($errors) if scalar(keys %$errors);
} else {
- PVE::QemuServer::vmconfig_apply_pending($vmid, $conf, $storecfg, $running);
+ PVE::QemuServer::vmconfig_apply_pending($vmid, $conf, $storecfg, $running, $errors);
}
+ raise_param_exc($errors) if scalar(keys %$errors);
return;
};
}
});
-
__PACKAGE__->register_method({
name => 'destroy_vm',
path => '{vmid}',
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid_stopped }),
skiplock => get_standard_option('skiplock'),
+ purge => {
+ type => 'boolean',
+ description => "Remove vmid from backup cron jobs.",
+ optional => 1,
+ },
},
},
returns => {
my ($param) = @_;
my $rpcenv = PVE::RPCEnvironment::get();
-
my $authuser = $rpcenv->get_user();
-
my $vmid = $param->{vmid};
my $skiplock = $param->{skiplock};
raise_param_exc({ skiplock => "Only root may use this option." })
if $skiplock && $authuser ne 'root@pam';
- # test if VM exists
- my $conf = PVE::QemuConfig->load_config($vmid);
+ my $early_checks = sub {
+ # test if VM exists
+ my $conf = PVE::QemuConfig->load_config($vmid);
+ PVE::QemuConfig->check_protection($conf, "can't remove VM $vmid");
- my $storecfg = PVE::Storage::config();
+ my $ha_managed = PVE::HA::Config::service_is_configured("vm:$vmid");
- PVE::QemuConfig->check_protection($conf, "can't remove VM $vmid");
+ if (!$param->{purge}) {
+ die "unable to remove VM $vmid - used in HA resources and purge parameter not set.\n"
+ if $ha_managed;
+ # don't allow destroy if with replication jobs but no purge param
+ my $repl_conf = PVE::ReplicationConfig->new();
+ $repl_conf->check_for_existing_jobs($vmid);
+ }
- die "unable to remove VM $vmid - used in HA resources\n"
- if PVE::HA::Config::vm_is_ha_managed($vmid);
+ die "VM $vmid is running - destroy failed\n"
+ if PVE::QemuServer::check_running($vmid);
- # do not allow destroy if there are replication jobs
- my $repl_conf = PVE::ReplicationConfig->new();
- $repl_conf->check_for_existing_jobs($vmid);
+ return $ha_managed;
+ };
- # early tests (repeat after locking)
- die "VM $vmid is running - destroy failed\n"
- if PVE::QemuServer::check_running($vmid);
+ $early_checks->();
my $realcmd = sub {
my $upid = shift;
- syslog('info', "destroy VM $vmid: $upid\n");
-
- PVE::QemuServer::vm_destroy($storecfg, $vmid, $skiplock);
+ my $storecfg = PVE::Storage::config();
- PVE::AccessControl::remove_vm_access($vmid);
+ syslog('info', "destroy VM $vmid: $upid\n");
+ PVE::QemuConfig->lock_config($vmid, sub {
+ # repeat, config might have changed
+ my $ha_managed = $early_checks->();
+
+ PVE::QemuServer::destroy_vm($storecfg, $vmid, $skiplock, { lock => 'destroyed' });
+
+ PVE::AccessControl::remove_vm_access($vmid);
+ PVE::Firewall::remove_vmfw_conf($vmid);
+ if ($param->{purge}) {
+ print "purging VM $vmid from related configurations..\n";
+ PVE::ReplicationConfig::remove_vmid_jobs($vmid);
+ PVE::VZDump::Plugin::remove_vmid_from_backup_jobs($vmid);
+
+ if ($ha_managed) {
+ PVE::HA::Config::delete_service_from_config("vm:$vmid");
+ print "NOTE: removed VM $vmid from HA resource configuration.\n";
+ }
+ }
- PVE::Firewall::remove_vmfw_conf($vmid);
+ # only now remove the zombie config, else we can have reuse race
+ PVE::QemuConfig->destroy_config($vmid);
+ });
};
return $rpcenv->fork_worker('qmdestroy', $vmid, $authuser, $realcmd);
return undef;
}});
+# uses good entropy, each char is limited to 6 bit to get printable chars simply
+my $gen_rand_chars = sub {
+ my ($length) = @_;
+
+ die "invalid length $length" if $length < 1;
+
+ my $min = ord('!'); # first printable ascii
+ my @rand_bytes = split '', Crypt::OpenSSL::Random::random_bytes($length);
+ my $str = join('', map { chr((ord($_) & 0x3F) + $min) } @rand_bytes);
+
+ return $str;
+};
+
my $sslcert;
__PACKAGE__->register_method({
type => 'boolean',
description => "starts websockify instead of vncproxy",
},
+ 'generate-password' => {
+ optional => 1,
+ type => 'boolean',
+ default => 0,
+ description => "Generates a random password to be used as ticket instead of the API ticket.",
+ },
},
},
returns => {
properties => {
user => { type => 'string' },
ticket => { type => 'string' },
+ password => {
+ optional => 1,
+ description => "Returned if requested with 'generate-password' param."
+ ." Consists of printable ASCII characters ('!' .. '~').",
+ type => 'string',
+ },
cert => { type => 'string' },
port => { type => 'integer' },
upid => { type => 'string' },
my $conf = PVE::QemuConfig->load_config($vmid, $node); # check if VM exists
+ my $serial;
+ if ($conf->{vga}) {
+ my $vga = PVE::QemuServer::parse_vga($conf->{vga});
+ $serial = $vga->{type} if $vga->{type} =~ m/^serial\d+$/;
+ }
+
my $authpath = "/vms/$vmid";
my $ticket = PVE::AccessControl::assemble_vnc_ticket($authuser, $authpath);
+ my $password = $ticket;
+ if ($param->{'generate-password'}) {
+ $password = $gen_rand_chars->(8);
+ }
$sslcert = PVE::Tools::file_get_contents("/etc/pve/pve-root-ca.pem", 8192)
if !$sslcert;
- my ($remip, $family);
+ my $family;
my $remcmd = [];
if ($node ne 'localhost' && $node ne PVE::INotify::nodename()) {
- ($remip, $family) = PVE::Cluster::remote_node_ip($node);
+ (undef, $family) = PVE::Cluster::remote_node_ip($node);
+ my $sshinfo = PVE::SSHInfo::get_ssh_info($node);
# NOTE: kvm VNC traffic is already TLS encrypted or is known unsecure
- $remcmd = ['/usr/bin/ssh', '-T', '-o', 'BatchMode=yes', $remip];
+ $remcmd = PVE::SSHInfo::ssh_info_to_command($sshinfo, defined($serial) ? '-t' : '-T');
} else {
$family = PVE::Tools::get_host_address_family($node);
}
my $cmd;
- if ($conf->{vga} && ($conf->{vga} =~ m/^serial\d+$/)) {
+ if (defined($serial)) {
- die "Websocket mode is not supported in vga serial mode!" if $websocket;
+ my $termcmd = [ '/usr/sbin/qm', 'terminal', $vmid, '-iface', $serial, '-escape', '0' ];
- my $termcmd = [ '/usr/sbin/qm', 'terminal', $vmid, '-iface', $conf->{vga} ];
- #my $termcmd = "/usr/bin/qm terminal -iface $conf->{vga}";
$cmd = ['/usr/bin/vncterm', '-rfbport', $port,
'-timeout', $timeout, '-authpath', $authpath,
- '-perm', 'Sys.Console', '-c', @$remcmd, @$termcmd];
+ '-perm', 'Sys.Console'];
+
+ if ($param->{websocket}) {
+ $ENV{PVE_VNC_TICKET} = $password; # pass ticket to vncterm
+ push @$cmd, '-notls', '-listen', 'localhost';
+ }
+
+ push @$cmd, '-c', @$remcmd, @$termcmd;
+
PVE::Tools::run_command($cmd);
+
} else {
- $ENV{LC_PVE_TICKET} = $ticket if $websocket; # set ticket with "qm vncproxy"
+ $ENV{LC_PVE_TICKET} = $password if $websocket; # set ticket with "qm vncproxy"
$cmd = [@$remcmd, "/usr/sbin/qm", 'vncproxy', $vmid];
LocalPort => $port,
Proto => 'tcp',
GetAddrInfoFlags => 0,
- ) or die "failed to create socket: $!\n";
+ ) or die "failed to create socket: $!\n";
# Inside the worker we shouldn't have any previous alarms
# running anyway...:
alarm(0);
PVE::Tools::wait_for_vnc_port($port);
- return {
+ my $res = {
user => $authuser,
ticket => $ticket,
port => $port,
upid => $upid,
cert => $sslcert,
};
+ $res->{password} = $password if $param->{'generate-password'};
+
+ return $res;
}});
__PACKAGE__->register_method({
- name => 'vncwebsocket',
- path => '{vmid}/vncwebsocket',
- method => 'GET',
+ name => 'termproxy',
+ path => '{vmid}/termproxy',
+ method => 'POST',
+ protected => 1,
permissions => {
- description => "You also need to pass a valid ticket (vncticket).",
check => ['perm', '/vms/{vmid}', [ 'VM.Console' ]],
},
- description => "Opens a weksocket for VNC traffic.",
+ description => "Creates a TCP proxy connections.",
parameters => {
- additionalProperties => 0,
+ additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid'),
- vncticket => {
- description => "Ticket from previous call to vncproxy.",
+ serial=> {
+ optional => 1,
type => 'string',
- maxLength => 512,
- },
- port => {
- description => "Port number returned by previous vncproxy call.",
- type => 'integer',
- minimum => 5900,
- maximum => 5999,
+ enum => [qw(serial0 serial1 serial2 serial3)],
+ description => "opens a serial terminal (defaults to display)",
},
},
},
returns => {
- type => "object",
+ additionalProperties => 0,
properties => {
- port => { type => 'string' },
+ user => { type => 'string' },
+ ticket => { type => 'string' },
+ port => { type => 'integer' },
+ upid => { type => 'string' },
},
},
code => sub {
my $vmid = $param->{vmid};
my $node = $param->{node};
+ my $serial = $param->{serial};
+
+ my $conf = PVE::QemuConfig->load_config($vmid, $node); # check if VM exists
+
+ if (!defined($serial)) {
+ if ($conf->{vga}) {
+ my $vga = PVE::QemuServer::parse_vga($conf->{vga});
+ $serial = $vga->{type} if $vga->{type} =~ m/^serial\d+$/;
+ }
+ }
my $authpath = "/vms/$vmid";
- PVE::AccessControl::verify_vnc_ticket($param->{vncticket}, $authuser, $authpath);
+ my $ticket = PVE::AccessControl::assemble_vnc_ticket($authuser, $authpath);
- my $conf = PVE::QemuConfig->load_config($vmid, $node); # VM exists ?
+ my $family;
+ my $remcmd = [];
- # Note: VNC ports are acessible from outside, so we do not gain any
- # security if we verify that $param->{port} belongs to VM $vmid. This
- # check is done by verifying the VNC ticket (inside VNC protocol).
+ if ($node ne 'localhost' && $node ne PVE::INotify::nodename()) {
+ (undef, $family) = PVE::Cluster::remote_node_ip($node);
+ my $sshinfo = PVE::SSHInfo::get_ssh_info($node);
+ $remcmd = PVE::SSHInfo::ssh_info_to_command($sshinfo, '-t');
+ push @$remcmd, '--';
+ } else {
+ $family = PVE::Tools::get_host_address_family($node);
+ }
- my $port = $param->{port};
+ my $port = PVE::Tools::next_vnc_port($family);
- return { port => $port };
- }});
+ my $termcmd = [ '/usr/sbin/qm', 'terminal', $vmid, '-escape', '0'];
+ push @$termcmd, '-iface', $serial if $serial;
-__PACKAGE__->register_method({
- name => 'spiceproxy',
- path => '{vmid}/spiceproxy',
- method => 'POST',
- protected => 1,
- proxyto => 'node',
- permissions => {
- check => ['perm', '/vms/{vmid}', [ 'VM.Console' ]],
- },
- description => "Returns a SPICE configuration to connect to the VM.",
- parameters => {
- additionalProperties => 0,
- properties => {
+ my $realcmd = sub {
+ my $upid = shift;
+
+ syslog('info', "starting qemu termproxy $upid\n");
+
+ my $cmd = ['/usr/bin/termproxy', $port, '--path', $authpath,
+ '--perm', 'VM.Console', '--'];
+ push @$cmd, @$remcmd, @$termcmd;
+
+ PVE::Tools::run_command($cmd);
+ };
+
+ my $upid = $rpcenv->fork_worker('vncproxy', $vmid, $authuser, $realcmd, 1);
+
+ PVE::Tools::wait_for_vnc_port($port);
+
+ return {
+ user => $authuser,
+ ticket => $ticket,
+ port => $port,
+ upid => $upid,
+ };
+ }});
+
+__PACKAGE__->register_method({
+ name => 'vncwebsocket',
+ path => '{vmid}/vncwebsocket',
+ method => 'GET',
+ permissions => {
+ description => "You also need to pass a valid ticket (vncticket).",
+ check => ['perm', '/vms/{vmid}', [ 'VM.Console' ]],
+ },
+ description => "Opens a weksocket for VNC traffic.",
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid'),
+ vncticket => {
+ description => "Ticket from previous call to vncproxy.",
+ type => 'string',
+ maxLength => 512,
+ },
+ port => {
+ description => "Port number returned by previous vncproxy call.",
+ type => 'integer',
+ minimum => 5900,
+ maximum => 5999,
+ },
+ },
+ },
+ returns => {
+ type => "object",
+ properties => {
+ port => { type => 'string' },
+ },
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+
+ my $authuser = $rpcenv->get_user();
+
+ my $vmid = $param->{vmid};
+ my $node = $param->{node};
+
+ my $authpath = "/vms/$vmid";
+
+ PVE::AccessControl::verify_vnc_ticket($param->{vncticket}, $authuser, $authpath);
+
+ my $conf = PVE::QemuConfig->load_config($vmid, $node); # VM exists ?
+
+ # Note: VNC ports are acessible from outside, so we do not gain any
+ # security if we verify that $param->{port} belongs to VM $vmid. This
+ # check is done by verifying the VNC ticket (inside VNC protocol).
+
+ my $port = $param->{port};
+
+ return { port => $port };
+ }});
+
+__PACKAGE__->register_method({
+ name => 'spiceproxy',
+ path => '{vmid}/spiceproxy',
+ method => 'POST',
+ protected => 1,
+ proxyto => 'node',
+ permissions => {
+ check => ['perm', '/vms/{vmid}', [ 'VM.Console' ]],
+ },
+ description => "Returns a SPICE configuration to connect to the VM.",
+ parameters => {
+ additionalProperties => 0,
+ properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid'),
proxy => get_standard_option('spice-proxy', { optional => 1 }),
my ($ticket, undef, $remote_viewer_config) =
PVE::AccessControl::remote_viewer_config($authuser, $vmid, $node, $proxy, $title, $port);
- PVE::QemuServer::vm_mon_cmd($vmid, "set_password", protocol => 'spice', password => $ticket);
- PVE::QemuServer::vm_mon_cmd($vmid, "expire_password", protocol => 'spice', time => "+30");
+ mon_cmd($vmid, "set_password", protocol => 'spice', password => $ticket);
+ mon_cmd($vmid, "expire_password", protocol => 'spice', time => "+30");
return $remote_viewer_config;
}});
{ subdir => 'current' },
{ subdir => 'start' },
{ subdir => 'stop' },
+ { subdir => 'reset' },
+ { subdir => 'shutdown' },
+ { subdir => 'suspend' },
+ { subdir => 'reboot' },
];
return $res;
vmid => get_standard_option('pve-vmid'),
},
},
- returns => { type => 'object' },
+ returns => {
+ type => 'object',
+ properties => {
+ %$PVE::QemuServer::vmstatus_return_properties,
+ ha => {
+ description => "HA manager service status.",
+ type => 'object',
+ },
+ spice => {
+ description => "Qemu VGA configuration supports spice.",
+ type => 'boolean',
+ optional => 1,
+ },
+ agent => {
+ description => "Qemu GuestAgent enabled in config.",
+ type => 'boolean',
+ optional => 1,
+ },
+ },
+ },
code => sub {
my ($param) = @_;
$status->{ha} = PVE::HA::Config::get_service_status("vm:$param->{vmid}");
$status->{spice} = 1 if PVE::QemuServer::vga_conf_has_spice($conf->{vga});
+ $status->{agent} = 1 if (PVE::QemuServer::parse_guest_agent($conf)->{enabled});
return $status;
}});
description => "CIDR of the (sub) network that is used for migration.",
optional => 1,
},
- machine => get_standard_option('pve-qm-machine'),
- targetstorage => {
- description => "Target storage for the migration. (Can be '1' to use the same storage id as on the source node.)",
+ machine => get_standard_option('pve-qemu-machine'),
+ 'force-cpu' => {
+ description => "Override QEMU's -cpu argument with the given string.",
type => 'string',
- optional => 1
- }
+ optional => 1,
+ },
+ targetstorage => get_standard_option('pve-targetstorage'),
+ timeout => {
+ description => "Wait maximal timeout seconds.",
+ type => 'integer',
+ minimum => 0,
+ default => 'max(30, vm memory in GiB)',
+ optional => 1,
+ },
},
},
returns => {
my ($param) = @_;
my $rpcenv = PVE::RPCEnvironment::get();
-
my $authuser = $rpcenv->get_user();
my $node = extract_param($param, 'node');
-
my $vmid = extract_param($param, 'vmid');
+ my $timeout = extract_param($param, 'timeout');
my $machine = extract_param($param, 'machine');
+ my $force_cpu = extract_param($param, 'force-cpu');
- my $stateuri = extract_param($param, 'stateuri');
- raise_param_exc({ stateuri => "Only root may use this option." })
- if $stateuri && $authuser ne 'root@pam';
-
- my $skiplock = extract_param($param, 'skiplock');
- raise_param_exc({ skiplock => "Only root may use this option." })
- if $skiplock && $authuser ne 'root@pam';
-
- my $migratedfrom = extract_param($param, 'migratedfrom');
- raise_param_exc({ migratedfrom => "Only root may use this option." })
- if $migratedfrom && $authuser ne 'root@pam';
-
- my $migration_type = extract_param($param, 'migration_type');
- raise_param_exc({ migration_type => "Only root may use this option." })
- if $migration_type && $authuser ne 'root@pam';
-
- my $migration_network = extract_param($param, 'migration_network');
- raise_param_exc({ migration_network => "Only root may use this option." })
- if $migration_network && $authuser ne 'root@pam';
-
- my $targetstorage = extract_param($param, 'targetstorage');
- raise_param_exc({ targetstorage => "Only root may use this option." })
- if $targetstorage && $authuser ne 'root@pam';
+ my $get_root_param = sub {
+ my $value = extract_param($param, $_[0]);
+ raise_param_exc({ "$_[0]" => "Only root may use this option." })
+ if $value && $authuser ne 'root@pam';
+ return $value;
+ };
- raise_param_exc({ targetstorage => "targetstorage can only by used with migratedfrom." })
- if $targetstorage && !$migratedfrom;
+ my $stateuri = $get_root_param->('stateuri');
+ my $skiplock = $get_root_param->('skiplock');
+ my $migratedfrom = $get_root_param->('migratedfrom');
+ my $migration_type = $get_root_param->('migration_type');
+ my $migration_network = $get_root_param->('migration_network');
+ my $targetstorage = $get_root_param->('targetstorage');
+
+ my $storagemap;
+
+ if ($targetstorage) {
+ raise_param_exc({ targetstorage => "targetstorage can only by used with migratedfrom." })
+ if !$migratedfrom;
+ $storagemap = eval { PVE::JSONSchema::parse_idmap($targetstorage, 'pve-storage-id') };
+ raise_param_exc({ targetstorage => "failed to parse storage map: $@" })
+ if $@;
+ }
# read spice ticket from STDIN
my $spice_ticket;
- if ($stateuri && ($stateuri eq 'tcp') && $migratedfrom && ($rpcenv->{type} eq 'cli')) {
- if (defined(my $line = <>)) {
+ my $nbd_protocol_version = 0;
+ my $replicated_volumes = {};
+ if ($stateuri && ($stateuri eq 'tcp' || $stateuri eq 'unix') && $migratedfrom && ($rpcenv->{type} eq 'cli')) {
+ while (defined(my $line = <STDIN>)) {
chomp $line;
- $spice_ticket = $line;
+ if ($line =~ m/^spice_ticket: (.+)$/) {
+ $spice_ticket = $1;
+ } elsif ($line =~ m/^nbd_protocol_version: (\d+)$/) {
+ $nbd_protocol_version = $1;
+ } elsif ($line =~ m/^replicated_volume: (.*)$/) {
+ $replicated_volumes->{$1} = 1;
+ } else {
+ # fallback for old source node
+ $spice_ticket = $line;
+ }
}
}
my $storecfg = PVE::Storage::config();
- if (PVE::HA::Config::vm_is_ha_managed($vmid) && !$stateuri &&
- $rpcenv->{type} ne 'ha') {
-
+ if (PVE::HA::Config::vm_is_ha_managed($vmid) && !$stateuri && $rpcenv->{type} ne 'ha') {
my $hacmd = sub {
my $upid = shift;
- my $service = "vm:$vmid";
-
- my $cmd = ['ha-manager', 'set', $service, '--state', 'started'];
-
print "Requesting HA start for VM $vmid\n";
+ my $cmd = ['ha-manager', 'set', "vm:$vmid", '--state', 'started'];
PVE::Tools::run_command($cmd);
-
return;
};
syslog('info', "start VM $vmid: $upid\n");
- PVE::QemuServer::vm_start($storecfg, $vmid, $stateuri, $skiplock, $migratedfrom, undef,
- $machine, $spice_ticket, $migration_network, $migration_type, $targetstorage);
+ my $migrate_opts = {
+ migratedfrom => $migratedfrom,
+ spice_ticket => $spice_ticket,
+ network => $migration_network,
+ type => $migration_type,
+ storagemap => $storagemap,
+ nbd_proto_version => $nbd_protocol_version,
+ replicated_volumes => $replicated_volumes,
+ };
+ my $params = {
+ statefile => $stateuri,
+ skiplock => $skiplock,
+ forcemachine => $machine,
+ timeout => $timeout,
+ forcecpu => $force_cpu,
+ };
+
+ PVE::QemuServer::vm_start($storecfg, $vmid, $params, $migrate_opts);
return;
};
my ($param) = @_;
my $rpcenv = PVE::RPCEnvironment::get();
-
my $authuser = $rpcenv->get_user();
my $node = extract_param($param, 'node');
-
my $vmid = extract_param($param, 'vmid');
my $skiplock = extract_param($param, 'skiplock');
my $hacmd = sub {
my $upid = shift;
- my $service = "vm:$vmid";
-
- my $cmd = ['ha-manager', 'set', $service, '--state', 'stopped'];
-
print "Requesting HA stop for VM $vmid\n";
+ my $cmd = ['ha-manager', 'crm-command', 'stop', "vm:$vmid", '0'];
PVE::Tools::run_command($cmd);
-
return;
};
PVE::QemuServer::vm_stop($storecfg, $vmid, $skiplock, 0,
$param->{timeout}, 0, 1, $keepActive, $migratedfrom);
-
return;
};
my ($param) = @_;
my $rpcenv = PVE::RPCEnvironment::get();
-
my $authuser = $rpcenv->get_user();
my $node = extract_param($param, 'node');
-
my $vmid = extract_param($param, 'vmid');
my $skiplock = extract_param($param, 'skiplock');
#
# checking the qmp status here to get feedback to the gui/cli/api
# and the status query should not take too long
- my $qmpstatus;
- eval {
- $qmpstatus = PVE::QemuServer::vm_qmp_command($vmid, { execute => "query-status" }, 0);
+ my $qmpstatus = eval {
+ PVE::QemuConfig::assert_config_exists_on_node($vmid);
+ mon_cmd($vmid, "query-status");
};
my $err = $@ if $@;
}
}
- if (PVE::HA::Config::vm_is_ha_managed($vmid) &&
- ($rpcenv->{type} ne 'ha')) {
+ if (PVE::HA::Config::vm_is_ha_managed($vmid) && $rpcenv->{type} ne 'ha') {
+ my $timeout = $param->{timeout} // 60;
my $hacmd = sub {
my $upid = shift;
- my $service = "vm:$vmid";
-
- my $cmd = ['ha-manager', 'set', $service, '--state', 'stopped'];
-
print "Requesting HA stop for VM $vmid\n";
+ my $cmd = ['ha-manager', 'crm-command', 'stop', "vm:$vmid", "$timeout"];
PVE::Tools::run_command($cmd);
-
return;
};
PVE::QemuServer::vm_stop($storecfg, $vmid, $skiplock, 0, $param->{timeout},
$shutdown, $param->{forceStop}, $keepActive);
-
return;
};
}
}});
+__PACKAGE__->register_method({
+ name => 'vm_reboot',
+ path => '{vmid}/status/reboot',
+ method => 'POST',
+ protected => 1,
+ proxyto => 'node',
+ description => "Reboot the VM by shutting it down, and starting it again. Applies pending changes.",
+ permissions => {
+ check => ['perm', '/vms/{vmid}', [ 'VM.PowerMgmt' ]],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid',
+ { completion => \&PVE::QemuServer::complete_vmid_running }),
+ timeout => {
+ description => "Wait maximal timeout seconds for the shutdown.",
+ type => 'integer',
+ minimum => 0,
+ optional => 1,
+ },
+ },
+ },
+ returns => {
+ type => 'string',
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+ my $authuser = $rpcenv->get_user();
+
+ my $node = extract_param($param, 'node');
+ my $vmid = extract_param($param, 'vmid');
+
+ my $qmpstatus = eval {
+ PVE::QemuConfig::assert_config_exists_on_node($vmid);
+ mon_cmd($vmid, "query-status");
+ };
+ my $err = $@ if $@;
+
+ if (!$err && $qmpstatus->{status} eq "paused") {
+ die "VM is paused - cannot shutdown\n";
+ }
+
+ die "VM $vmid not running\n" if !PVE::QemuServer::check_running($vmid);
+
+ my $realcmd = sub {
+ my $upid = shift;
+
+ syslog('info', "requesting reboot of VM $vmid: $upid\n");
+ PVE::QemuServer::vm_reboot($vmid, $param->{timeout});
+ return;
+ };
+
+ return $rpcenv->fork_worker('qmreboot', $vmid, $authuser, $realcmd);
+ }});
+
__PACKAGE__->register_method({
name => 'vm_suspend',
path => '{vmid}/status/suspend',
proxyto => 'node',
description => "Suspend virtual machine.",
permissions => {
+ description => "You need 'VM.PowerMgmt' on /vms/{vmid}, and if you have set 'todisk',".
+ " you need also 'VM.Config.Disk' on /vms/{vmid} and 'Datastore.AllocateSpace'".
+ " on the storage for the vmstate.",
check => ['perm', '/vms/{vmid}', [ 'VM.PowerMgmt' ]],
},
parameters => {
vmid => get_standard_option('pve-vmid',
{ completion => \&PVE::QemuServer::complete_vmid_running }),
skiplock => get_standard_option('skiplock'),
+ todisk => {
+ type => 'boolean',
+ default => 0,
+ optional => 1,
+ description => 'If set, suspends the VM to disk. Will be resumed on next VM start.',
+ },
+ statestorage => get_standard_option('pve-storage-id', {
+ description => "The storage for the VM state",
+ requires => 'todisk',
+ optional => 1,
+ completion => \&PVE::Storage::complete_storage_enabled,
+ }),
},
},
returns => {
my ($param) = @_;
my $rpcenv = PVE::RPCEnvironment::get();
-
my $authuser = $rpcenv->get_user();
my $node = extract_param($param, 'node');
-
my $vmid = extract_param($param, 'vmid');
+ my $todisk = extract_param($param, 'todisk') // 0;
+
+ my $statestorage = extract_param($param, 'statestorage');
+
my $skiplock = extract_param($param, 'skiplock');
raise_param_exc({ skiplock => "Only root may use this option." })
if $skiplock && $authuser ne 'root@pam';
die "VM $vmid not running\n" if !PVE::QemuServer::check_running($vmid);
+ die "Cannot suspend HA managed VM to disk\n"
+ if $todisk && PVE::HA::Config::vm_is_ha_managed($vmid);
+
+ # early check for storage permission, for better user feedback
+ if ($todisk) {
+ $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.Disk']);
+
+ if (!$statestorage) {
+ # get statestorage from config if none is given
+ my $conf = PVE::QemuConfig->load_config($vmid);
+ my $storecfg = PVE::Storage::config();
+ $statestorage = PVE::QemuServer::find_vmstate_storage($conf, $storecfg);
+ }
+
+ $rpcenv->check($authuser, "/storage/$statestorage", ['Datastore.AllocateSpace']);
+ }
+
my $realcmd = sub {
my $upid = shift;
syslog('info', "suspend VM $vmid: $upid\n");
- PVE::QemuServer::vm_suspend($vmid, $skiplock);
+ PVE::QemuServer::vm_suspend($vmid, $skiplock, $todisk, $statestorage);
return;
};
- return $rpcenv->fork_worker('qmsuspend', $vmid, $authuser, $realcmd);
+ my $taskname = $todisk ? 'qmsuspend' : 'qmpause';
+ return $rpcenv->fork_worker($taskname, $vmid, $authuser, $realcmd);
}});
__PACKAGE__->register_method({
if $skiplock && $authuser ne 'root@pam';
my $nocheck = extract_param($param, 'nocheck');
+ raise_param_exc({ nocheck => "Only root may use this option." })
+ if $nocheck && $authuser ne 'root@pam';
- die "VM $vmid not running\n" if !PVE::QemuServer::check_running($vmid, $nocheck);
+ my $to_disk_suspended;
+ eval {
+ PVE::QemuConfig->lock_config($vmid, sub {
+ my $conf = PVE::QemuConfig->load_config($vmid);
+ $to_disk_suspended = PVE::QemuConfig->has_lock($conf, 'suspended');
+ });
+ };
+
+ die "VM $vmid not running\n"
+ if !$to_disk_suspended && !PVE::QemuServer::check_running($vmid, $nocheck);
my $realcmd = sub {
my $upid = shift;
syslog('info', "resume VM $vmid: $upid\n");
- PVE::QemuServer::vm_resume($vmid, $skiplock, $nocheck);
+ if (!$to_disk_suspended) {
+ PVE::QemuServer::vm_resume($vmid, $skiplock, $nocheck);
+ } else {
+ my $storecfg = PVE::Storage::config();
+ PVE::QemuServer::vm_start($storecfg, $vmid, { skiplock => $skiplock });
+ }
return;
};
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
- newid => get_standard_option('pve-vmid', { description => 'VMID for the clone.' }),
+ newid => get_standard_option('pve-vmid', {
+ completion => \&PVE::Cluster::complete_next_vmid,
+ description => 'VMID for the clone.' }),
name => {
optional => 1,
type => 'string', format => 'dns-name',
}),
storage => get_standard_option('pve-storage-id', {
description => "Target storage for full clone.",
- requires => 'full',
optional => 1,
}),
'format' => {
- description => "Target format for file storage.",
- requires => 'full',
+ description => "Target format for file storage. Only valid for full clone.",
type => 'string',
optional => 1,
enum => [ 'raw', 'qcow2', 'vmdk'],
full => {
optional => 1,
type => 'boolean',
- description => "Create a full copy of all disk. This is always done when " .
+ description => "Create a full copy of all disks. This is always done when " .
"you clone a normal VM. For VM templates, we try to create a linked clone by default.",
- default => 0,
},
target => get_standard_option('pve-node', {
description => "Target node. Only allowed if the original VM is on shared storage.",
optional => 1,
}),
+ bwlimit => {
+ description => "Override I/O bandwidth limit (in KiB/s).",
+ optional => 1,
+ type => 'integer',
+ minimum => '0',
+ default => 'clone limit from datacenter or storage config',
+ },
},
},
returns => {
my ($param) = @_;
my $rpcenv = PVE::RPCEnvironment::get();
-
- my $authuser = $rpcenv->get_user();
+ my $authuser = $rpcenv->get_user();
my $node = extract_param($param, 'node');
-
my $vmid = extract_param($param, 'vmid');
-
my $newid = extract_param($param, 'newid');
-
my $pool = extract_param($param, 'pool');
-
- if (defined($pool)) {
- $rpcenv->check_pool_exist($pool);
- }
+ $rpcenv->check_pool_exist($pool) if defined($pool);
my $snapname = extract_param($param, 'snapname');
-
my $storage = extract_param($param, 'storage');
-
my $format = extract_param($param, 'format');
-
my $target = extract_param($param, 'target');
my $localnode = PVE::INotify::nodename();
- undef $target if $target && ($target eq $localnode || $target eq 'localhost');
+ if ($target && ($target eq $localnode || $target eq 'localhost')) {
+ undef $target;
+ }
PVE::Cluster::check_node_exists($target) if $target;
}
}
- PVE::Cluster::check_cfs_quorum();
+ PVE::Cluster::check_cfs_quorum();
my $running = PVE::QemuServer::check_running($vmid) || 0;
- # exclusive lock if VM is running - else shared lock is enough;
- my $shared_lock = $running ? 0 : 1;
-
my $clonefn = sub {
-
- # do all tests after lock
- # we also try to do all tests before we fork the worker
+ # do all tests after lock but before forking worker - if possible
my $conf = PVE::QemuConfig->load_config($vmid);
-
PVE::QemuConfig->check_lock($conf);
my $verify_running = PVE::QemuServer::check_running($vmid) || 0;
-
die "unexpected state change\n" if $verify_running != $running;
die "snapshot '$snapname' does not exist\n"
if $snapname && !defined( $conf->{snapshots}->{$snapname});
+ my $full = extract_param($param, 'full') // !PVE::QemuConfig->is_template($conf);
+
+ die "parameter 'storage' not allowed for linked clones\n"
+ if defined($storage) && !$full;
+
+ die "parameter 'format' not allowed for linked clones\n"
+ if defined($format) && !$full;
+
my $oldconf = $snapname ? $conf->{snapshots}->{$snapname} : $conf;
my $sharedvm = &$check_storage_access_clone($rpcenv, $authuser, $storecfg, $oldconf, $storage);
- die "can't clone VM to node '$target' (VM uses local storage)\n" if $target && !$sharedvm;
+ die "can't clone VM to node '$target' (VM uses local storage)\n"
+ if $target && !$sharedvm;
my $conffile = PVE::QemuConfig->config_file($newid);
-
die "unable to create VM $newid: config file already exists\n"
if -f $conffile;
} elsif (PVE::QemuServer::is_valid_drivename($opt)) {
my $drive = PVE::QemuServer::parse_drive($opt, $value);
die "unable to parse drive options for '$opt'\n" if !$drive;
- if (PVE::QemuServer::drive_is_cdrom($drive)) {
+ if (PVE::QemuServer::drive_is_cdrom($drive, 1)) {
$newconf->{$opt} = $value; # simply copy configuration
} else {
- if ($param->{full}) {
+ if ($full || PVE::QemuServer::drive_is_cloudinit($drive)) {
die "Full clone feature is not supported for drive '$opt'\n"
if !PVE::Storage::volume_has_feature($storecfg, 'copy', $drive->{file}, $snapname, $running);
$fullclone->{$opt} = 1;
}
# auto generate a new uuid
- my ($uuid, $uuid_str);
- UUID::generate($uuid);
- UUID::unparse($uuid, $uuid_str);
my $smbios1 = PVE::QemuServer::parse_smbios1($newconf->{smbios1} || '');
- $smbios1->{uuid} = $uuid_str;
+ $smbios1->{uuid} = PVE::QemuServer::generate_uuid();
$newconf->{smbios1} = PVE::QemuServer::print_smbios1($smbios1);
+ # auto generate a new vmgenid only if the option was set for template
+ if ($newconf->{vmgenid}) {
+ $newconf->{vmgenid} = PVE::QemuServer::generate_uuid();
+ }
delete $newconf->{template};
if ($param->{name}) {
$newconf->{name} = $param->{name};
} else {
- if ($oldconf->{name}) {
- $newconf->{name} = "Copy-of-$oldconf->{name}";
- } else {
- $newconf->{name} = "Copy-of-VM-$vmid";
- }
+ $newconf->{name} = "Copy-of-VM-" . ($oldconf->{name} // $vmid);
}
if ($param->{description}) {
}
# create empty/temp config - this fails if VM already exists on other node
+ # FIXME use PVE::QemuConfig->create_and_lock_config and adapt code
PVE::Tools::file_set_contents($conffile, "# qmclone temporary file\nlock: clone\n");
my $realcmd = sub {
my $jobs = {};
eval {
- local $SIG{INT} = $SIG{TERM} = $SIG{QUIT} = $SIG{HUP} = sub { die "interrupted by signal\n"; };
+ local $SIG{INT} =
+ local $SIG{TERM} =
+ local $SIG{QUIT} =
+ local $SIG{HUP} = sub { die "interrupted by signal\n"; };
PVE::Storage::activate_volumes($storecfg, $vollist, $snapname);
+ my $bwlimit = extract_param($param, 'bwlimit');
+
my $total_jobs = scalar(keys %{$drives});
my $i = 1;
foreach my $opt (keys %$drives) {
my $drive = $drives->{$opt};
my $skipcomplete = ($total_jobs != $i); # finish after last drive
+ my $completion = $skipcomplete ? 'skip' : 'complete';
+
+ my $src_sid = PVE::Storage::parse_volume_id($drive->{file});
+ my $storage_list = [ $src_sid ];
+ push @$storage_list, $storage if defined($storage);
+ my $clonelimit = PVE::Storage::get_bandwidth_limit('clone', $storage_list, $bwlimit);
my $newdrive = PVE::QemuServer::clone_disk($storecfg, $vmid, $running, $opt, $drive, $snapname,
$newid, $storage, $format, $fullclone->{$opt}, $newvollist,
- $jobs, $skipcomplete, $oldconf->{agent});
+ $jobs, $completion, $oldconf->{agent}, $clonelimit, $oldconf);
- $newconf->{$opt} = PVE::QemuServer::print_drive($vmid, $newdrive);
+ $newconf->{$opt} = PVE::QemuServer::print_drive($newdrive);
PVE::QemuConfig->write_config($newid, $newconf);
$i++;
}
delete $newconf->{lock};
+
+ # do not write pending changes
+ if (my @changes = keys %{$newconf->{pending}}) {
+ my $pending = join(',', @changes);
+ warn "found pending changes for '$pending', discarding for clone\n";
+ delete $newconf->{pending};
+ }
+
PVE::QemuConfig->write_config($newid, $newconf);
if ($target) {
PVE::AccessControl::add_vm_to_pool($newid, $pool) if $pool;
};
if (my $err = $@) {
- unlink $conffile;
-
eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $jobs) };
-
sleep 1; # some storage like rbd need to wait before release volume - really?
foreach my $volid (@$newvollist) {
eval { PVE::Storage::vdisk_free($storecfg, $volid); };
warn $@ if $@;
}
+
+ PVE::Firewall::remove_vmfw_conf($newid);
+
+ unlink $conffile; # avoid races -> last thing before die
+
die "clone failed: $err";
}
return $rpcenv->fork_worker('qmclone', $vmid, $authuser, $realcmd);
};
- return PVE::QemuConfig->lock_config_mode($vmid, 1, $shared_lock, sub {
- # Aquire exclusive lock lock for $newid
+ # Aquire exclusive lock lock for $newid
+ my $lock_target_vm = sub {
return PVE::QemuConfig->lock_config_full($newid, 1, $clonefn);
- });
+ };
+ # exclusive lock if VM is running - else shared lock is enough;
+ if ($running) {
+ return PVE::QemuConfig->lock_config_full($vmid, 1, $lock_target_vm);
+ } else {
+ return PVE::QemuConfig->lock_config_shared($vmid, 1, $lock_target_vm);
+ }
}});
__PACKAGE__->register_method({
disk => {
type => 'string',
description => "The disk you want to move.",
- enum => [ PVE::QemuServer::valid_drive_names() ],
+ enum => [PVE::QemuServer::Drive::valid_drive_names()],
},
storage => get_standard_option('pve-storage-id', {
description => "Target storage.",
maxLength => 40,
optional => 1,
},
+ bwlimit => {
+ description => "Override I/O bandwidth limit (in KiB/s).",
+ optional => 1,
+ type => 'integer',
+ minimum => '0',
+ default => 'move limit from datacenter or storage config',
+ },
},
},
returns => {
my ($param) = @_;
my $rpcenv = PVE::RPCEnvironment::get();
-
my $authuser = $rpcenv->get_user();
my $node = extract_param($param, 'node');
-
my $vmid = extract_param($param, 'vmid');
-
my $digest = extract_param($param, 'digest');
-
my $disk = extract_param($param, 'disk');
-
my $storeid = extract_param($param, 'storage');
-
my $format = extract_param($param, 'format');
my $storecfg = PVE::Storage::config();
my $updatefn = sub {
-
my $conf = PVE::QemuConfig->load_config($vmid);
-
PVE::QemuConfig->check_lock($conf);
- die "checksum missmatch (file change by other user?)\n"
+ die "VM config checksum missmatch (file change by other user?)\n"
if $digest && $digest ne $conf->{digest};
die "disk '$disk' does not exist\n" if !$conf->{$disk};
my $drive = PVE::QemuServer::parse_drive($disk, $conf->{$disk});
- my $old_volid = $drive->{file} || die "disk '$disk' has no associated volume\n";
-
- die "you can't move a cdrom\n" if PVE::QemuServer::drive_is_cdrom($drive);
+ die "disk '$disk' has no associated volume\n" if !$drive->{file};
+ die "you can't move a cdrom\n" if PVE::QemuServer::drive_is_cdrom($drive, 1);
+ my $old_volid = $drive->{file};
my $oldfmt;
my ($oldstoreid, $oldvolname) = PVE::Storage::parse_volume_id($old_volid);
if ($oldvolname =~ m/\.(raw|qcow2|vmdk)$/){
$oldfmt = $1;
}
- die "you can't move on the same storage with same format\n" if $oldstoreid eq $storeid &&
+ die "you can't move to the same storage with same format\n" if $oldstoreid eq $storeid &&
(!$format || !$oldfmt || $oldfmt eq $format);
# this only checks snapshots because $disk is passed!
- my $snapshotted = PVE::QemuServer::is_volume_in_use($storecfg, $conf, $disk, $old_volid);
+ my $snapshotted = PVE::QemuServer::Drive::is_volume_in_use($storecfg, $conf, $disk, $old_volid);
die "you can't move a disk with snapshots and delete the source\n"
if $snapshotted && $param->{delete};
PVE::Storage::activate_volumes($storecfg, [ $drive->{file} ]);
my $realcmd = sub {
-
my $newvollist = [];
eval {
- local $SIG{INT} = $SIG{TERM} = $SIG{QUIT} = $SIG{HUP} = sub { die "interrupted by signal\n"; };
+ local $SIG{INT} =
+ local $SIG{TERM} =
+ local $SIG{QUIT} =
+ local $SIG{HUP} = sub { die "interrupted by signal\n"; };
warn "moving disk with snapshots, snapshots will not be moved!\n"
if $snapshotted;
+ my $bwlimit = extract_param($param, 'bwlimit');
+ my $movelimit = PVE::Storage::get_bandwidth_limit('move', [$oldstoreid, $storeid], $bwlimit);
+
my $newdrive = PVE::QemuServer::clone_disk($storecfg, $vmid, $running, $disk, $drive, undef,
- $vmid, $storeid, $format, 1, $newvollist);
+ $vmid, $storeid, $format, 1, $newvollist, undef, undef, undef, $movelimit, $conf);
- $conf->{$disk} = PVE::QemuServer::print_drive($vmid, $newdrive);
+ $conf->{$disk} = PVE::QemuServer::print_drive($newdrive);
PVE::QemuConfig->add_unused_volume($conf, $old_volid) if !$param->{delete};
PVE::QemuConfig->write_config($vmid, $conf);
+ my $do_trim = PVE::QemuServer::parse_guest_agent($conf)->{fstrim_cloned_disks};
+ if ($running && $do_trim && PVE::QemuServer::qga_check_running($vmid)) {
+ eval { mon_cmd($vmid, "guest-fstrim") };
+ }
+
eval {
# try to deactivate volumes - avoid lvm LVs to be active on several nodes
PVE::Storage::deactivate_volumes($storecfg, [ $newdrive->{file} ])
warn $@ if $@;
};
if (my $err = $@) {
-
- foreach my $volid (@$newvollist) {
- eval { PVE::Storage::vdisk_free($storecfg, $volid); };
- warn $@ if $@;
- }
+ foreach my $volid (@$newvollist) {
+ eval { PVE::Storage::vdisk_free($storecfg, $volid) };
+ warn $@ if $@;
+ }
die "storage migration failed: $err";
}
return PVE::QemuConfig->lock_config($vmid, $updatefn);
}});
+my $check_vm_disks_local = sub {
+ my ($storecfg, $vmconf, $vmid) = @_;
+
+ my $local_disks = {};
+
+ # add some more information to the disks e.g. cdrom
+ PVE::QemuServer::foreach_volid($vmconf, sub {
+ my ($volid, $attr) = @_;
+
+ my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
+ if ($storeid) {
+ my $scfg = PVE::Storage::storage_config($storecfg, $storeid);
+ return if $scfg->{shared};
+ }
+ # The shared attr here is just a special case where the vdisk
+ # is marked as shared manually
+ return if $attr->{shared};
+ return if $attr->{cdrom} and $volid eq "none";
+
+ if (exists $local_disks->{$volid}) {
+ @{$local_disks->{$volid}}{keys %$attr} = values %$attr
+ } else {
+ $local_disks->{$volid} = $attr;
+ # ensure volid is present in case it's needed
+ $local_disks->{$volid}->{volid} = $volid;
+ }
+ });
+
+ return $local_disks;
+};
+
+__PACKAGE__->register_method({
+ name => 'migrate_vm_precondition',
+ path => '{vmid}/migrate',
+ method => 'GET',
+ protected => 1,
+ proxyto => 'node',
+ description => "Get preconditions for migration.",
+ permissions => {
+ check => ['perm', '/vms/{vmid}', [ 'VM.Migrate' ]],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
+ target => get_standard_option('pve-node', {
+ description => "Target node.",
+ completion => \&PVE::Cluster::complete_migration_target,
+ optional => 1,
+ }),
+ },
+ },
+ returns => {
+ type => "object",
+ properties => {
+ running => { type => 'boolean' },
+ allowed_nodes => {
+ type => 'array',
+ optional => 1,
+ description => "List nodes allowed for offline migration, only passed if VM is offline"
+ },
+ not_allowed_nodes => {
+ type => 'object',
+ optional => 1,
+ description => "List not allowed nodes with additional informations, only passed if VM is offline"
+ },
+ local_disks => {
+ type => 'array',
+ description => "List local disks including CD-Rom, unsused and not referenced disks"
+ },
+ local_resources => {
+ type => 'array',
+ description => "List local resources e.g. pci, usb"
+ }
+ },
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+
+ my $authuser = $rpcenv->get_user();
+
+ PVE::Cluster::check_cfs_quorum();
+
+ my $res = {};
+
+ my $vmid = extract_param($param, 'vmid');
+ my $target = extract_param($param, 'target');
+ my $localnode = PVE::INotify::nodename();
+
+
+ # test if VM exists
+ my $vmconf = PVE::QemuConfig->load_config($vmid);
+ my $storecfg = PVE::Storage::config();
+
+
+ # try to detect errors early
+ PVE::QemuConfig->check_lock($vmconf);
+
+ $res->{running} = PVE::QemuServer::check_running($vmid) ? 1:0;
+
+ # if vm is not running, return target nodes where local storage is available
+ # for offline migration
+ if (!$res->{running}) {
+ $res->{allowed_nodes} = [];
+ my $checked_nodes = PVE::QemuServer::check_local_storage_availability($vmconf, $storecfg);
+ delete $checked_nodes->{$localnode};
+
+ foreach my $node (keys %$checked_nodes) {
+ if (!defined $checked_nodes->{$node}->{unavailable_storages}) {
+ push @{$res->{allowed_nodes}}, $node;
+ }
+
+ }
+ $res->{not_allowed_nodes} = $checked_nodes;
+ }
+
+
+ my $local_disks = &$check_vm_disks_local($storecfg, $vmconf, $vmid);
+ $res->{local_disks} = [ values %$local_disks ];;
+
+ my $local_resources = PVE::QemuServer::check_local_resources($vmconf, 1);
+
+ $res->{local_resources} = $local_resources;
+
+ return $res;
+
+
+ }});
+
__PACKAGE__->register_method({
name => 'migrate_vm',
path => '{vmid}/migrate',
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
- target => get_standard_option('pve-node', {
+ target => get_standard_option('pve-node', {
description => "Target node.",
completion => \&PVE::Cluster::complete_migration_target,
}),
online => {
type => 'boolean',
- description => "Use online/live migration.",
+ description => "Use online/live migration if VM is running. Ignored if VM is stopped.",
optional => 1,
},
force => {
description => "Enable live storage migration for local disk",
optional => 1,
},
- targetstorage => get_standard_option('pve-storage-id', {
- description => "Default target storage.",
- optional => 1,
- completion => \&PVE::QemuServer::complete_storage,
+ targetstorage => get_standard_option('pve-targetstorage', {
+ completion => \&PVE::QemuServer::complete_migration_storage,
}),
+ bwlimit => {
+ description => "Override I/O bandwidth limit (in KiB/s).",
+ optional => 1,
+ type => 'integer',
+ minimum => '0',
+ default => 'migrate limit from datacenter or storage config',
+ },
},
},
returns => {
my ($param) = @_;
my $rpcenv = PVE::RPCEnvironment::get();
-
my $authuser = $rpcenv->get_user();
my $target = extract_param($param, 'target');
my $vmid = extract_param($param, 'vmid');
- raise_param_exc({ targetstorage => "Live storage migration can only be done online." })
- if !$param->{online} && $param->{targetstorage};
-
raise_param_exc({ force => "Only root may use this option." })
if $param->{force} && $authuser ne 'root@pam';
PVE::QemuConfig->check_lock($conf);
if (PVE::QemuServer::check_running($vmid)) {
- die "cant migrate running VM without --online\n"
- if !$param->{online};
+ die "can't migrate running VM without --online\n" if !$param->{online};
+ } else {
+ warn "VM isn't running. Doing offline migration instead.\n" if $param->{online};
+ $param->{online} = 0;
}
my $storecfg = PVE::Storage::config();
- if( $param->{targetstorage}) {
- PVE::Storage::storage_check_node($storecfg, $param->{targetstorage}, $target);
+ if (my $targetstorage = $param->{targetstorage}) {
+ my $check_storage = sub {
+ my ($target_sid) = @_;
+ PVE::Storage::storage_check_node($storecfg, $target_sid, $target);
+ $rpcenv->check($authuser, "/storage/$target_sid", ['Datastore.AllocateSpace']);
+ my $scfg = PVE::Storage::storage_config($storecfg, $target_sid);
+ raise_param_exc({ targetstorage => "storage '$target_sid' does not support vm images"})
+ if !$scfg->{content}->{images};
+ };
+
+ my $storagemap = eval { PVE::JSONSchema::parse_idmap($targetstorage, 'pve-storage-id') };
+ raise_param_exc({ targetstorage => "failed to parse storage map: $@" })
+ if $@;
+
+ $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.Disk'])
+ if !defined($storagemap->{identity});
+
+ foreach my $source (values %{$storagemap->{entries}}) {
+ $check_storage->($source);
+ }
+
+ $check_storage->($storagemap->{default})
+ if $storagemap->{default};
+
+ PVE::QemuServer::check_storage_availability($storecfg, $conf, $target)
+ if $storagemap->{identity};
+
+ $param->{storagemap} = $storagemap;
} else {
PVE::QemuServer::check_storage_availability($storecfg, $conf, $target);
}
my $hacmd = sub {
my $upid = shift;
- my $service = "vm:$vmid";
-
- my $cmd = ['ha-manager', 'migrate', $service, $target];
-
print "Requesting HA migration for VM $vmid to node $target\n";
+ my $cmd = ['ha-manager', 'migrate', "vm:$vmid", $target];
PVE::Tools::run_command($cmd);
-
return;
};
my $res = '';
eval {
- $res = PVE::QemuServer::vm_human_monitor_command($vmid, $param->{command});
+ $res = PVE::QemuServer::Monitor::hmp_cmd($vmid, $param->{command});
};
$res = "ERROR: $@" if $@;
return $res;
}});
-my $guest_agent_commands = [
- 'ping',
- 'get-time',
- 'info',
- 'fsfreeze-status',
- 'fsfreeze-freeze',
- 'fsfreeze-thaw',
- 'fstrim',
- 'network-get-interfaces',
- 'get-vcpus',
- 'get-fsinfo',
- 'get-memory-blocks',
- 'get-memory-block-info',
- 'suspend-hybrid',
- 'suspend-ram',
- 'suspend-disk',
- 'shutdown',
- ];
-
-__PACKAGE__->register_method({
- name => 'agent',
- path => '{vmid}/agent',
- method => 'POST',
- protected => 1,
- proxyto => 'node',
- description => "Execute Qemu Guest Agent commands.",
- permissions => {
- check => ['perm', '/vms/{vmid}', [ 'VM.Monitor' ]],
- },
- parameters => {
- additionalProperties => 0,
- properties => {
- node => get_standard_option('pve-node'),
- vmid => get_standard_option('pve-vmid', {
- completion => \&PVE::QemuServer::complete_vmid_running }),
- command => {
- type => 'string',
- description => "The QGA command.",
- enum => $guest_agent_commands,
- },
- },
- },
- returns => {
- type => 'object',
- description => "Returns an object with a single `result` property. The type of that
-property depends on the executed command.",
- },
- code => sub {
- my ($param) = @_;
-
- my $vmid = $param->{vmid};
-
- my $conf = PVE::QemuConfig->load_config ($vmid); # check if VM exists
-
- die "No Qemu Guest Agent\n" if !defined($conf->{agent});
- die "VM $vmid is not running\n" if !PVE::QemuServer::check_running($vmid);
-
- my $cmd = $param->{command};
-
- my $res = PVE::QemuServer::vm_mon_cmd($vmid, "guest-$cmd");
-
- return { result => $res };
- }});
-
__PACKAGE__->register_method({
name => 'resize_vm',
path => '{vmid}/resize',
disk => {
type => 'string',
description => "The disk you want to resize.",
- enum => [PVE::QemuServer::valid_drive_names()],
+ enum => [PVE::QemuServer::Drive::valid_drive_names()],
},
size => {
type => 'string',
my (undef, undef, undef, undef, undef, undef, $format) =
PVE::Storage::parse_volname($storecfg, $drive->{file});
- die "can't resize volume: $disk if snapshot exists\n"
+ die "can't resize volume: $disk if snapshot exists\n"
if %{$conf->{snapshots}} && $format eq 'qcow2';
my $volid = $drive->{file};
PVE::Storage::activate_volumes($storecfg, [$volid]);
my $size = PVE::Storage::volume_size_info($storecfg, $volid, 5);
+ die "Could not determine current size of volume '$volid'\n" if !defined($size);
+
die "internal error" if $sizestr !~ m/^(\+)?(\d+(\.\d+)?)([KMGT])?$/;
my ($ext, $newsize, $unit) = ($1, $2, $4);
if ($unit) {
PVE::QemuServer::qemu_block_resize($vmid, "drive-$disk", $storecfg, $volid, $newsize);
$drive->{size} = $newsize;
- $conf->{$disk} = PVE::QemuServer::print_drive($vmid, $drive);
+ $conf->{$disk} = PVE::QemuServer::print_drive($drive);
PVE::QemuConfig->write_config($vmid, $conf);
};
type => 'array',
items => {
type => "object",
- properties => {},
+ properties => {
+ name => {
+ description => "Snapshot identifier. Value 'current' identifies the current VM.",
+ type => 'string',
+ },
+ vmstate => {
+ description => "Snapshot includes RAM.",
+ type => 'boolean',
+ optional => 1,
+ },
+ description => {
+ description => "Snapshot description.",
+ type => 'string',
+ },
+ snaptime => {
+ description => "Snapshot creation time",
+ type => 'integer',
+ renderer => 'timestamp',
+ optional => 1,
+ },
+ parent => {
+ description => "Parent snapshot identifier.",
+ type => 'string',
+ optional => 1,
+ },
+ },
},
links => [ { rel => 'child', href => "{name}" } ],
},
}
my $running = PVE::QemuServer::check_running($vmid, 1) ? 1 : 0;
- my $current = { name => 'current', digest => $conf->{digest}, running => $running };
+ my $current = {
+ name => 'current',
+ digest => $conf->{digest},
+ running => $running,
+ description => "You are here!",
+ };
$current->{parent} = $conf->{parent} if $conf->{parent};
push @$res, $current;
die "unable to use snapshot name 'current' (reserved name)\n"
if $snapname eq 'current';
+ die "unable to use snapshot name 'pending' (reserved name)\n"
+ if lc($snapname) eq 'pending';
+
my $realcmd = sub {
PVE::Cluster::log_msg('info', $authuser, "snapshot VM $vmid: $snapname");
- PVE::QemuConfig->snapshot_create($vmid, $snapname, $param->{vmstate},
+ PVE::QemuConfig->snapshot_create($vmid, $snapname, $param->{vmstate},
$param->{description});
};
proxyto => 'node',
description => "Get snapshot configuration",
permissions => {
- check => ['perm', '/vms/{vmid}', [ 'VM.Snapshot' ]],
+ check => ['perm', '/vms/{vmid}', [ 'VM.Snapshot', 'VM.Snapshot.Rollback', 'VM.Audit' ], any => 1],
},
parameters => {
additionalProperties => 0,
proxyto => 'node',
description => "Rollback VM state to specified snapshot.",
permissions => {
- check => ['perm', '/vms/{vmid}', [ 'VM.Snapshot' ]],
+ check => ['perm', '/vms/{vmid}', [ 'VM.Snapshot', 'VM.Snapshot.Rollback' ], any => 1],
},
parameters => {
additionalProperties => 0,
optional => 1,
type => 'string',
description => "If you want to convert only 1 disk to base image.",
- enum => [PVE::QemuServer::valid_drive_names()],
+ enum => [PVE::QemuServer::Drive::valid_drive_names()],
},
},
return undef;
}});
+__PACKAGE__->register_method({
+ name => 'cloudinit_generated_config_dump',
+ path => '{vmid}/cloudinit/dump',
+ method => 'GET',
+ proxyto => 'node',
+ description => "Get automatically generated cloudinit config.",
+ permissions => {
+ check => ['perm', '/vms/{vmid}', [ 'VM.Audit' ]],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
+ type => {
+ description => 'Config type.',
+ type => 'string',
+ enum => ['user', 'network', 'meta'],
+ },
+ },
+ },
+ returns => {
+ type => 'string',
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $conf = PVE::QemuConfig->load_config($param->{vmid});
+
+ return PVE::QemuServer::Cloudinit::dump_cloudinit_config($conf, $param->{vmid}, $param->{type});
+ }});
+
1;