die "no storage ID specified (and no default storage)\n" if !$storeid;
my $scfg = PVE::Storage::storage_config($storecfg, $storeid);
my $name = "vm-$vmid-cloudinit";
+
my $fmt = undef;
if ($scfg->{path}) {
- $name .= ".qcow2";
- $fmt = 'qcow2';
- }else{
- $fmt = 'raw';
+ $fmt = $disk->{format} // "qcow2";
+ $name .= ".$fmt";
+ } else {
+ $fmt = $disk->{format} // "raw";
}
- # FIXME: Reasonable size? qcow2 shouldn't grow if the space isn't used anyway?
- my $cloudinit_iso_size = 5; # in MB
- my $volid = PVE::Storage::vdisk_alloc($storecfg, $storeid, $vmid,
- $fmt, $name, $cloudinit_iso_size*1024);
+
+ # Initial disk created with 4 MB and aligned to 4MB on regeneration
+ my $ci_size = PVE::QemuServer::Cloudinit::CLOUDINIT_DISK_SIZE;
+ my $volid = PVE::Storage::vdisk_alloc($storecfg, $storeid, $vmid, $fmt, $name, $ci_size/1024);
$disk->{file} = $volid;
$disk->{media} = 'cdrom';
push @$vollist, $volid;
'tablet' => 1,
'vga' => 1,
'watchdog' => 1,
+ 'audio0' => 1,
};
my $generaloptions = {
return 1 if $authuser eq 'root@pam';
foreach my $opt (@$key_list) {
- # disk checks need to be done somewhere else
+ # some checks (e.g., disk, serial port, usb) need to be done somewhere
+ # else, as there the permission can be value dependend
next if PVE::QemuServer::is_valid_drivename($opt);
next if $opt eq 'cdrom';
- next if $opt =~ m/^unused\d+$/;
+ next if $opt =~ m/^(?:unused|serial|usb)\d+$/;
+
if ($cpuoptions->{$opt} || $opt =~ m/^numa\d+$/) {
$rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.CPU']);
} elsif ($cloudinitoptions->{$opt} || ($opt =~ m/^(?:net|ipconfig)\d+$/)) {
$rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.Network']);
} else {
- # catches usb\d+, hostpci\d+, args, lock, etc.
+ # catches hostpci\d+, args, lock, etc.
# new options will be checked here
die "only root can set '$opt' config\n";
}
description => "Add the VM to the specified pool.",
},
bwlimit => {
- description => "Override i/o bandwidth limit (in KiB/s).",
+ description => "Override I/O bandwidth limit (in KiB/s).",
optional => 1,
type => 'integer',
minimum => '0',
+ default => 'restore limit from datacenter or storage config',
},
start => {
optional => 1,
PVE::QemuConfig->check_protection($conf, $emsg);
die "$emsg vm is running\n" if PVE::QemuServer::check_running($vmid);
- die "$emsg vm is a template\n" if PVE::QemuConfig->is_template($conf);
my $realcmd = sub {
PVE::QemuServer::restore_archive($archive, $vmid, $authuser, {
storage => $storage,
pool => $pool,
unique => $unique,
- bwlimit => $bwlimit, });
+ bwlimit => $bwlimit,
+ });
+ my $restored_conf = PVE::QemuConfig->load_config($vmid);
+ # Convert restored VM to template if backup was VM template
+ if (PVE::QemuConfig->is_template($restored_conf)) {
+ warn "Convert to template.\n";
+ eval { PVE::QemuServer::template_create($vmid, $restored_conf) };
+ warn $@ if $@;
+ }
PVE::AccessControl::add_vm_to_pool($vmid, $pool) if $pool;
if defined($conf->{pending}->{$opt});
PVE::QemuServer::vmconfig_delete_pending_option($conf, $opt, $force);
PVE::QemuConfig->write_config($vmid, $conf);
+ } elsif ($opt =~ m/^serial\d+$/) {
+ if ($conf->{$opt} eq 'socket' || (!$conf->{$opt} && $conf->{pending}->{$opt} eq 'socket')) {
+ $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.HWType']);
+ } elsif ($authuser ne 'root@pam') {
+ die "only root can delete '$opt' config for real devices\n";
+ }
+ PVE::QemuServer::vmconfig_delete_pending_option($conf, $opt, $force);
+ PVE::QemuConfig->write_config($vmid, $conf);
+ } elsif ($opt =~ m/^usb\d+$/) {
+ if ($conf->{$opt} =~ m/spice/ || (!$conf->{$opt} && $conf->{pending}->{$opt} =~ m/spice/)) {
+ $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.HWType']);
+ } elsif ($authuser ne 'root@pam') {
+ die "only root can delete '$opt' config for real devices\n";
+ }
+ PVE::QemuServer::vmconfig_delete_pending_option($conf, $opt, $force);
+ PVE::QemuConfig->write_config($vmid, $conf);
} else {
PVE::QemuServer::vmconfig_delete_pending_option($conf, $opt, $force);
PVE::QemuConfig->write_config($vmid, $conf);
if defined($conf->{pending}->{$opt});
&$create_disks($rpcenv, $authuser, $conf->{pending}, $arch, $storecfg, $vmid, undef, {$opt => $param->{$opt}});
+ } elsif ($opt =~ m/^serial\d+/) {
+ if ((!defined($conf->{$opt}) || $conf->{$opt} eq 'socket') && $param->{$opt} eq 'socket') {
+ $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.HWType']);
+ } elsif ($authuser ne 'root@pam') {
+ die "only root can modify '$opt' config for real devices\n";
+ }
+ $conf->{pending}->{$opt} = $param->{$opt};
+ } elsif ($opt =~ m/^usb\d+/) {
+ if ((!defined($conf->{$opt}) || $conf->{$opt} =~ m/spice/) && $param->{$opt} =~ m/spice/) {
+ $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.HWType']);
+ } elsif ($authuser ne 'root@pam') {
+ die "only root can modify '$opt' config for real devices\n";
+ }
+ $conf->{pending}->{$opt} = $param->{$opt};
} else {
$conf->{pending}->{$opt} = $param->{$opt};
}
}
});
-
__PACKAGE__->register_method({
name => 'destroy_vm',
path => '{vmid}',
my ($param) = @_;
my $rpcenv = PVE::RPCEnvironment::get();
-
my $authuser = $rpcenv->get_user();
-
my $vmid = $param->{vmid};
my $skiplock = $param->{skiplock};
# test if VM exists
my $conf = PVE::QemuConfig->load_config($vmid);
-
my $storecfg = PVE::Storage::config();
-
PVE::QemuConfig->check_protection($conf, "can't remove VM $vmid");
-
die "unable to remove VM $vmid - used in HA resources\n"
if PVE::HA::Config::vm_is_ha_managed($vmid);
my $upid = shift;
syslog('info', "destroy VM $vmid: $upid\n");
-
PVE::QemuServer::vm_destroy($storecfg, $vmid, $skiplock);
-
PVE::AccessControl::remove_vm_access($vmid);
-
PVE::Firewall::remove_vmfw_conf($vmid);
};
{ subdir => 'current' },
{ subdir => 'start' },
{ subdir => 'stop' },
+ { subdir => 'reset' },
+ { subdir => 'shutdown' },
+ { subdir => 'suspend' },
+ { subdir => 'reboot' },
];
return $res;
my ($param) = @_;
my $rpcenv = PVE::RPCEnvironment::get();
-
my $authuser = $rpcenv->get_user();
my $node = extract_param($param, 'node');
-
my $vmid = extract_param($param, 'vmid');
my $machine = extract_param($param, 'machine');
- my $stateuri = extract_param($param, 'stateuri');
- raise_param_exc({ stateuri => "Only root may use this option." })
- if $stateuri && $authuser ne 'root@pam';
-
- my $skiplock = extract_param($param, 'skiplock');
- raise_param_exc({ skiplock => "Only root may use this option." })
- if $skiplock && $authuser ne 'root@pam';
-
- my $migratedfrom = extract_param($param, 'migratedfrom');
- raise_param_exc({ migratedfrom => "Only root may use this option." })
- if $migratedfrom && $authuser ne 'root@pam';
-
- my $migration_type = extract_param($param, 'migration_type');
- raise_param_exc({ migration_type => "Only root may use this option." })
- if $migration_type && $authuser ne 'root@pam';
-
- my $migration_network = extract_param($param, 'migration_network');
- raise_param_exc({ migration_network => "Only root may use this option." })
- if $migration_network && $authuser ne 'root@pam';
+ my $get_root_param = sub {
+ my $value = extract_param($param, $_[0]);
+ raise_param_exc({ "$_[0]" => "Only root may use this option." })
+ if $value && $authuser ne 'root@pam';
+ return $value;
+ };
- my $targetstorage = extract_param($param, 'targetstorage');
- raise_param_exc({ targetstorage => "Only root may use this option." })
- if $targetstorage && $authuser ne 'root@pam';
+ my $stateuri = $get_root_param->('stateuri');
+ my $skiplock = $get_root_param->('skiplock');
+ my $migratedfrom = $get_root_param->('migratedfrom');
+ my $migration_type = $get_root_param->('migration_type');
+ my $migration_network = $get_root_param->('migration_network');
+ my $targetstorage = $get_root_param->('targetstorage');
raise_param_exc({ targetstorage => "targetstorage can only by used with migratedfrom." })
if $targetstorage && !$migratedfrom;
# read spice ticket from STDIN
my $spice_ticket;
- if ($stateuri && ($stateuri eq 'tcp') && $migratedfrom && ($rpcenv->{type} eq 'cli')) {
+ if ($stateuri && ($stateuri eq 'tcp' || $stateuri eq 'unix') && $migratedfrom && ($rpcenv->{type} eq 'cli')) {
if (defined(my $line = <STDIN>)) {
chomp $line;
$spice_ticket = $line;
my $storecfg = PVE::Storage::config();
- if (PVE::HA::Config::vm_is_ha_managed($vmid) && !$stateuri &&
- $rpcenv->{type} ne 'ha') {
-
+ if (PVE::HA::Config::vm_is_ha_managed($vmid) && !$stateuri && $rpcenv->{type} ne 'ha') {
my $hacmd = sub {
my $upid = shift;
- my $service = "vm:$vmid";
-
- my $cmd = ['ha-manager', 'set', $service, '--state', 'started'];
-
print "Requesting HA start for VM $vmid\n";
+ my $cmd = ['ha-manager', 'set', "vm:$vmid", '--state', 'started'];
PVE::Tools::run_command($cmd);
-
return;
};
PVE::QemuServer::vm_start($storecfg, $vmid, $stateuri, $skiplock, $migratedfrom, undef,
$machine, $spice_ticket, $migration_network, $migration_type, $targetstorage);
-
return;
};
my ($param) = @_;
my $rpcenv = PVE::RPCEnvironment::get();
-
my $authuser = $rpcenv->get_user();
my $node = extract_param($param, 'node');
-
my $vmid = extract_param($param, 'vmid');
my $skiplock = extract_param($param, 'skiplock');
my $hacmd = sub {
my $upid = shift;
- my $service = "vm:$vmid";
-
- my $cmd = ['ha-manager', 'set', $service, '--state', 'stopped'];
-
print "Requesting HA stop for VM $vmid\n";
+ my $cmd = ['ha-manager', 'set', "vm:$vmid", '--state', 'stopped'];
PVE::Tools::run_command($cmd);
-
return;
};
PVE::QemuServer::vm_stop($storecfg, $vmid, $skiplock, 0,
$param->{timeout}, 0, 1, $keepActive, $migratedfrom);
-
return;
};
my ($param) = @_;
my $rpcenv = PVE::RPCEnvironment::get();
-
my $authuser = $rpcenv->get_user();
my $node = extract_param($param, 'node');
-
my $vmid = extract_param($param, 'vmid');
my $skiplock = extract_param($param, 'skiplock');
#
# checking the qmp status here to get feedback to the gui/cli/api
# and the status query should not take too long
- my $qmpstatus;
- eval {
- $qmpstatus = PVE::QemuServer::vm_qmp_command($vmid, { execute => "query-status" }, 0);
+ my $qmpstatus = eval {
+ PVE::QemuServer::vm_qmp_command($vmid, { execute => "query-status" }, 0);
};
my $err = $@ if $@;
}
}
- if (PVE::HA::Config::vm_is_ha_managed($vmid) &&
- ($rpcenv->{type} ne 'ha')) {
+ if (PVE::HA::Config::vm_is_ha_managed($vmid) && $rpcenv->{type} ne 'ha') {
my $hacmd = sub {
my $upid = shift;
- my $service = "vm:$vmid";
-
- my $cmd = ['ha-manager', 'set', $service, '--state', 'stopped'];
-
print "Requesting HA stop for VM $vmid\n";
+ my $cmd = ['ha-manager', 'set', "vm:$vmid", '--state', 'stopped'];
PVE::Tools::run_command($cmd);
-
return;
};
PVE::QemuServer::vm_stop($storecfg, $vmid, $skiplock, 0, $param->{timeout},
$shutdown, $param->{forceStop}, $keepActive);
-
return;
};
}
}});
+__PACKAGE__->register_method({
+ name => 'vm_reboot',
+ path => '{vmid}/status/reboot',
+ method => 'POST',
+ protected => 1,
+ proxyto => 'node',
+ description => "Reboot the VM by shutting it down, and starting it again. Applies pending changes.",
+ permissions => {
+ check => ['perm', '/vms/{vmid}', [ 'VM.PowerMgmt' ]],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid',
+ { completion => \&PVE::QemuServer::complete_vmid_running }),
+ timeout => {
+ description => "Wait maximal timeout seconds for the shutdown.",
+ type => 'integer',
+ minimum => 0,
+ optional => 1,
+ },
+ },
+ },
+ returns => {
+ type => 'string',
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+ my $authuser = $rpcenv->get_user();
+
+ my $node = extract_param($param, 'node');
+ my $vmid = extract_param($param, 'vmid');
+
+ my $qmpstatus = eval {
+ PVE::QemuServer::vm_qmp_command($vmid, { execute => "query-status" }, 0);
+ };
+ my $err = $@ if $@;
+
+ if (!$err && $qmpstatus->{status} eq "paused") {
+ die "VM is paused - cannot shutdown\n";
+ }
+
+ die "VM $vmid not running\n" if !PVE::QemuServer::check_running($vmid);
+
+ my $realcmd = sub {
+ my $upid = shift;
+
+ syslog('info', "requesting reboot of VM $vmid: $upid\n");
+ PVE::QemuServer::vm_reboot($vmid, $param->{timeout});
+ return;
+ };
+
+ return $rpcenv->fork_worker('qmreboot', $vmid, $authuser, $realcmd);
+ }});
+
__PACKAGE__->register_method({
name => 'vm_suspend',
path => '{vmid}/status/suspend',
my ($param) = @_;
my $rpcenv = PVE::RPCEnvironment::get();
-
my $authuser = $rpcenv->get_user();
my $node = extract_param($param, 'node');
-
my $vmid = extract_param($param, 'vmid');
my $todisk = extract_param($param, 'todisk') // 0;
return;
};
- return $rpcenv->fork_worker('qmsuspend', $vmid, $authuser, $realcmd);
+ my $taskname = $todisk ? 'qmsuspend' : 'qmpause';
+ return $rpcenv->fork_worker($taskname, $vmid, $authuser, $realcmd);
}});
__PACKAGE__->register_method({
my $nocheck = extract_param($param, 'nocheck');
- die "VM $vmid not running\n" if !PVE::QemuServer::check_running($vmid, $nocheck);
+ my $to_disk_suspended;
+ eval {
+ PVE::QemuConfig->lock_config($vmid, sub {
+ my $conf = PVE::QemuConfig->load_config($vmid);
+ $to_disk_suspended = PVE::QemuConfig->has_lock($conf, 'suspended');
+ });
+ };
+
+ die "VM $vmid not running\n"
+ if !$to_disk_suspended && !PVE::QemuServer::check_running($vmid, $nocheck);
my $realcmd = sub {
my $upid = shift;
syslog('info', "resume VM $vmid: $upid\n");
- PVE::QemuServer::vm_resume($vmid, $skiplock, $nocheck);
+ if (!$to_disk_suspended) {
+ PVE::QemuServer::vm_resume($vmid, $skiplock, $nocheck);
+ } else {
+ my $storecfg = PVE::Storage::config();
+ PVE::QemuServer::vm_start($storecfg, $vmid, undef, $skiplock);
+ }
return;
};
description => "Target node. Only allowed if the original VM is on shared storage.",
optional => 1,
}),
+ bwlimit => {
+ description => "Override I/O bandwidth limit (in KiB/s).",
+ optional => 1,
+ type => 'integer',
+ minimum => '0',
+ default => 'clone limit from datacenter or storage config',
+ },
},
},
returns => {
PVE::Storage::activate_volumes($storecfg, $vollist, $snapname);
+ my $bwlimit = extract_param($param, 'bwlimit');
+
my $total_jobs = scalar(keys %{$drives});
my $i = 1;
my $drive = $drives->{$opt};
my $skipcomplete = ($total_jobs != $i); # finish after last drive
+ my $src_sid = PVE::Storage::parse_volume_id($drive->{file});
+ my $storage_list = [ $src_sid ];
+ push @$storage_list, $storage if defined($storage);
+ my $clonelimit = PVE::Storage::get_bandwidth_limit('clone', $storage_list, $bwlimit);
+
my $newdrive = PVE::QemuServer::clone_disk($storecfg, $vmid, $running, $opt, $drive, $snapname,
$newid, $storage, $format, $fullclone->{$opt}, $newvollist,
- $jobs, $skipcomplete, $oldconf->{agent});
+ $jobs, $skipcomplete, $oldconf->{agent}, $clonelimit);
$newconf->{$opt} = PVE::QemuServer::print_drive($vmid, $newdrive);
maxLength => 40,
optional => 1,
},
+ bwlimit => {
+ description => "Override I/O bandwidth limit (in KiB/s).",
+ optional => 1,
+ type => 'integer',
+ minimum => '0',
+ default => 'move limit from datacenter or storage config',
+ },
},
},
returns => {
warn "moving disk with snapshots, snapshots will not be moved!\n"
if $snapshotted;
+ my $bwlimit = extract_param($param, 'bwlimit');
+ my $movelimit = PVE::Storage::get_bandwidth_limit('move', [$oldstoreid, $storeid], $bwlimit);
+
my $newdrive = PVE::QemuServer::clone_disk($storecfg, $vmid, $running, $disk, $drive, undef,
- $vmid, $storeid, $format, 1, $newvollist);
+ $vmid, $storeid, $format, 1, $newvollist, undef, undef, undef, $movelimit);
$conf->{$disk} = PVE::QemuServer::print_drive($vmid, $newdrive);
return PVE::QemuConfig->lock_config($vmid, $updatefn);
}});
+my $check_vm_disks_local = sub {
+ my ($storecfg, $vmconf, $vmid) = @_;
+
+ my $local_disks = {};
+
+ # add some more information to the disks e.g. cdrom
+ PVE::QemuServer::foreach_volid($vmconf, sub {
+ my ($volid, $attr) = @_;
+
+ my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
+ if ($storeid) {
+ my $scfg = PVE::Storage::storage_config($storecfg, $storeid);
+ return if $scfg->{shared};
+ }
+ # The shared attr here is just a special case where the vdisk
+ # is marked as shared manually
+ return if $attr->{shared};
+ return if $attr->{cdrom} and $volid eq "none";
+
+ if (exists $local_disks->{$volid}) {
+ @{$local_disks->{$volid}}{keys %$attr} = values %$attr
+ } else {
+ $local_disks->{$volid} = $attr;
+ # ensure volid is present in case it's needed
+ $local_disks->{$volid}->{volid} = $volid;
+ }
+ });
+
+ return $local_disks;
+};
+
+__PACKAGE__->register_method({
+ name => 'migrate_vm_precondition',
+ path => '{vmid}/migrate',
+ method => 'GET',
+ protected => 1,
+ proxyto => 'node',
+ description => "Get preconditions for migration.",
+ permissions => {
+ check => ['perm', '/vms/{vmid}', [ 'VM.Migrate' ]],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
+ target => get_standard_option('pve-node', {
+ description => "Target node.",
+ completion => \&PVE::Cluster::complete_migration_target,
+ optional => 1,
+ }),
+ },
+ },
+ returns => {
+ type => "object",
+ properties => {
+ running => { type => 'boolean' },
+ allowed_nodes => {
+ type => 'array',
+ optional => 1,
+ description => "List nodes allowed for offline migration, only passed if VM is offline"
+ },
+ not_allowed_nodes => {
+ type => 'object',
+ optional => 1,
+ description => "List not allowed nodes with additional informations, only passed if VM is offline"
+ },
+ local_disks => {
+ type => 'array',
+ description => "List local disks including CD-Rom, unsused and not referenced disks"
+ },
+ local_resources => {
+ type => 'array',
+ description => "List local resources e.g. pci, usb"
+ }
+ },
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+
+ my $authuser = $rpcenv->get_user();
+
+ PVE::Cluster::check_cfs_quorum();
+
+ my $res = {};
+
+ my $vmid = extract_param($param, 'vmid');
+ my $target = extract_param($param, 'target');
+ my $localnode = PVE::INotify::nodename();
+
+
+ # test if VM exists
+ my $vmconf = PVE::QemuConfig->load_config($vmid);
+ my $storecfg = PVE::Storage::config();
+
+
+ # try to detect errors early
+ PVE::QemuConfig->check_lock($vmconf);
+
+ $res->{running} = PVE::QemuServer::check_running($vmid) ? 1:0;
+
+ # if vm is not running, return target nodes where local storage is available
+ # for offline migration
+ if (!$res->{running}) {
+ $res->{allowed_nodes} = [];
+ my $checked_nodes = PVE::QemuServer::check_local_storage_availability($vmconf, $storecfg);
+ delete $checked_nodes->{$localnode};
+
+ foreach my $node (keys %$checked_nodes) {
+ if (!defined $checked_nodes->{$node}->{unavailable_storages}) {
+ push @{$res->{allowed_nodes}}, $node;
+ }
+
+ }
+ $res->{not_allowed_nodes} = $checked_nodes;
+ }
+
+
+ my $local_disks = &$check_vm_disks_local($storecfg, $vmconf, $vmid);
+ $res->{local_disks} = [ values %$local_disks ];;
+
+ my $local_resources = PVE::QemuServer::check_local_resources($vmconf, 1);
+
+ $res->{local_resources} = $local_resources;
+
+ return $res;
+
+
+ }});
+
__PACKAGE__->register_method({
name => 'migrate_vm',
path => '{vmid}/migrate',
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
- target => get_standard_option('pve-node', {
+ target => get_standard_option('pve-node', {
description => "Target node.",
completion => \&PVE::Cluster::complete_migration_target,
}),
optional => 1,
completion => \&PVE::QemuServer::complete_storage,
}),
+ bwlimit => {
+ description => "Override I/O bandwidth limit (in KiB/s).",
+ optional => 1,
+ type => 'integer',
+ minimum => '0',
+ default => 'migrate limit from datacenter or storage config',
+ },
},
},
returns => {
my ($param) = @_;
my $rpcenv = PVE::RPCEnvironment::get();
-
my $authuser = $rpcenv->get_user();
my $target = extract_param($param, 'target');
my $hacmd = sub {
my $upid = shift;
- my $service = "vm:$vmid";
-
- my $cmd = ['ha-manager', 'migrate', $service, $target];
-
print "Requesting HA migration for VM $vmid to node $target\n";
+ my $cmd = ['ha-manager', 'migrate', "vm:$vmid", $target];
PVE::Tools::run_command($cmd);
-
return;
};
my (undef, undef, undef, undef, undef, undef, $format) =
PVE::Storage::parse_volname($storecfg, $drive->{file});
- die "can't resize volume: $disk if snapshot exists\n"
+ die "can't resize volume: $disk if snapshot exists\n"
if %{$conf->{snapshots}} && $format eq 'qcow2';
my $volid = $drive->{file};
my $realcmd = sub {
PVE::Cluster::log_msg('info', $authuser, "snapshot VM $vmid: $snapname");
- PVE::QemuConfig->snapshot_create($vmid, $snapname, $param->{vmstate},
+ PVE::QemuConfig->snapshot_create($vmid, $snapname, $param->{vmstate},
$param->{description});
};
return undef;
}});
+__PACKAGE__->register_method({
+ name => 'cloudinit_generated_config_dump',
+ path => '{vmid}/cloudinit/dump',
+ method => 'GET',
+ proxyto => 'node',
+ description => "Get automatically generated cloudinit config.",
+ permissions => {
+ check => ['perm', '/vms/{vmid}', [ 'VM.Audit' ]],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
+ type => {
+ description => 'Config type.',
+ type => 'string',
+ enum => ['user', 'network', 'meta'],
+ },
+ },
+ },
+ returns => {
+ type => 'string',
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $conf = PVE::QemuConfig->load_config($param->{vmid});
+
+ return PVE::QemuServer::Cloudinit::dump_cloudinit_config($conf, $param->{vmid}, $param->{type});
+ }});
+
1;