use warnings;
use Cwd 'abs_path';
use Net::SSLeay;
-use UUID;
use POSIX;
use IO::Socket::IP;
use URI::Escape;
use PVE::Cluster qw (cfs_read_file cfs_write_file);;
+use PVE::RRD;
use PVE::SafeSyslog;
use PVE::Tools qw(extract_param);
use PVE::Exception qw(raise raise_param_exc raise_perm_exc);
use PVE::GuestHelpers;
use PVE::QemuConfig;
use PVE::QemuServer;
+use PVE::QemuServer::Monitor qw(mon_cmd);
use PVE::QemuMigrate;
use PVE::RPCEnvironment;
use PVE::AccessControl;
use PVE::Firewall;
use PVE::API2::Firewall::VM;
use PVE::API2::Qemu::Agent;
+use PVE::VZDump::Plugin;
+use PVE::DataCenterConfig;
+use PVE::SSHInfo;
BEGIN {
if (!$ENV{PVE_GENERATING_DOCS}) {
my $isCDROM = PVE::QemuServer::drive_is_cdrom($drive);
my $volid = $drive->{file};
+ my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
- if (!$volid || ($volid eq 'none' || $volid eq 'cloudinit')) {
- # nothing to check
- } elsif ($volid =~ m/^(([^:\s]+):)?(cloudinit)$/) {
+ if (!$volid || ($volid eq 'none' || $volid eq 'cloudinit' || (defined($volname) && $volname eq 'cloudinit'))) {
# nothing to check
} elsif ($isCDROM && ($volid eq 'cdrom')) {
$rpcenv->check($authuser, "/", ['Sys.Console']);
my ($ds, $disk) = @_;
my $volid = $disk->{file};
+ my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
if (!$volid || $volid eq 'none' || $volid eq 'cdrom') {
delete $disk->{size};
- $res->{$ds} = PVE::QemuServer::print_drive($vmid, $disk);
- } elsif ($volid =~ m!^(?:([^/:\s]+):)?cloudinit$!) {
- my $storeid = $1 || $default_storage;
+ $res->{$ds} = PVE::QemuServer::print_drive($disk);
+ } elsif (defined($volname) && $volname eq 'cloudinit') {
+ $storeid = $storeid // $default_storage;
die "no storage ID specified (and no default storage)\n" if !$storeid;
my $scfg = PVE::Storage::storage_config($storecfg, $storeid);
my $name = "vm-$vmid-cloudinit";
$fmt = $disk->{format} // "raw";
}
- # Initial disk created with 4MB, every time it is regenerated the disk is aligned to 4MB again.
- my $cloudinit_iso_size = 4; # in MB
- my $volid = PVE::Storage::vdisk_alloc($storecfg, $storeid, $vmid,
- $fmt, $name, $cloudinit_iso_size*1024);
+ # Initial disk created with 4 MB and aligned to 4MB on regeneration
+ my $ci_size = PVE::QemuServer::Cloudinit::CLOUDINIT_DISK_SIZE;
+ my $volid = PVE::Storage::vdisk_alloc($storecfg, $storeid, $vmid, $fmt, $name, $ci_size/1024);
$disk->{file} = $volid;
$disk->{media} = 'cdrom';
push @$vollist, $volid;
delete $disk->{format}; # no longer needed
- $res->{$ds} = PVE::QemuServer::print_drive($vmid, $disk);
+ $res->{$ds} = PVE::QemuServer::print_drive($disk);
} elsif ($volid =~ $NEW_DISK_RE) {
my ($storeid, $size) = ($2 || $default_storage, $3);
die "no storage ID specified (and no default storage)\n" if !$storeid;
$disk->{file} = $volid;
$disk->{size} = PVE::Tools::convert_size($size, 'kb' => 'b');
delete $disk->{format}; # no longer needed
- $res->{$ds} = PVE::QemuServer::print_drive($vmid, $disk);
+ $res->{$ds} = PVE::QemuServer::print_drive($disk);
} else {
PVE::Storage::check_volume_access($rpcenv, $authuser, $storecfg, $vmid, $volid);
if ($volid_is_new) {
- my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
-
PVE::Storage::activate_volumes($storecfg, [ $volid ]) if $storeid;
my $size = PVE::Storage::volume_size_info($storecfg, $volid);
- die "volume $volid does not exists\n" if !$size;
+ die "volume $volid does not exist\n" if !$size;
$disk->{size} = $size;
}
- $res->{$ds} = PVE::QemuServer::print_drive($vmid, $disk);
+ $res->{$ds} = PVE::QemuServer::print_drive($disk);
}
};
'tablet' => 1,
'vga' => 1,
'watchdog' => 1,
+ 'audio0' => 1,
};
my $generaloptions = {
'startup' => 1,
'tdf' => 1,
'template' => 1,
+ 'tags' => 1,
};
my $vmpoweroptions = {
$rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.Disk']);
} elsif ($cloudinitoptions->{$opt} || ($opt =~ m/^(?:net|ipconfig)\d+$/)) {
$rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.Network']);
+ } elsif ($opt eq 'vmstate') {
+ # the user needs Disk and PowerMgmt privileges to change the vmstate
+ # also needs privileges on the storage, that will be checked later
+ $rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.Disk', 'VM.PowerMgmt' ]);
} else {
# catches hostpci\d+, args, lock, etc.
# new options will be checked here
my ($param) = @_;
my $rpcenv = PVE::RPCEnvironment::get();
-
my $authuser = $rpcenv->get_user();
my $node = extract_param($param, 'node');
-
my $vmid = extract_param($param, 'vmid');
my $archive = extract_param($param, 'archive');
my $is_restore = !!$archive;
- my $storage = extract_param($param, 'storage');
-
+ my $bwlimit = extract_param($param, 'bwlimit');
my $force = extract_param($param, 'force');
-
- my $unique = extract_param($param, 'unique');
-
my $pool = extract_param($param, 'pool');
-
- my $bwlimit = extract_param($param, 'bwlimit');
-
my $start_after_create = extract_param($param, 'start');
-
- my $filename = PVE::QemuConfig->config_file($vmid);
-
- my $storecfg = PVE::Storage::config();
+ my $storage = extract_param($param, 'storage');
+ my $unique = extract_param($param, 'unique');
if (defined(my $ssh_keys = $param->{sshkeys})) {
$ssh_keys = URI::Escape::uri_unescape($ssh_keys);
PVE::Cluster::check_cfs_quorum();
+ my $filename = PVE::QemuConfig->config_file($vmid);
+ my $storecfg = PVE::Storage::config();
+
if (defined($pool)) {
$rpcenv->check_pool_exist($pool);
}
raise_param_exc({ $opt => "unable to parse drive options" }) if !$drive;
PVE::QemuServer::cleanup_drive_path($opt, $storecfg, $drive);
- $param->{$opt} = PVE::QemuServer::print_drive($vmid, $drive);
+ $param->{$opt} = PVE::QemuServer::print_drive($drive);
}
}
PVE::QemuConfig->check_protection($conf, $emsg);
die "$emsg vm is running\n" if PVE::QemuServer::check_running($vmid);
- die "$emsg vm is a template\n" if PVE::QemuConfig->is_template($conf);
my $realcmd = sub {
PVE::QemuServer::restore_archive($archive, $vmid, $authuser, {
storage => $storage,
pool => $pool,
unique => $unique,
- bwlimit => $bwlimit, });
-
- PVE::AccessControl::add_vm_to_pool($vmid, $pool) if $pool;
-
- if ($start_after_create) {
- eval { PVE::API2::Qemu->vm_start({ vmid => $vmid, node => $node }) };
+ bwlimit => $bwlimit,
+ });
+ my $restored_conf = PVE::QemuConfig->load_config($vmid);
+ # Convert restored VM to template if backup was VM template
+ if (PVE::QemuConfig->is_template($restored_conf)) {
+ warn "Convert to template.\n";
+ eval { PVE::QemuServer::template_create($vmid, $restored_conf) };
warn $@ if $@;
}
+
+ PVE::AccessControl::add_vm_to_pool($vmid, $pool) if $pool;
};
# ensure no old replication state are exists
PVE::ReplicationState::delete_guest_states($vmid);
- return PVE::QemuConfig->lock_config_full($vmid, 1, $realcmd);
+ PVE::QemuConfig->lock_config_full($vmid, 1, $realcmd);
+
+ if ($start_after_create) {
+ print "Execute autostart\n";
+ eval { PVE::API2::Qemu->vm_start({ vmid => $vmid, node => $node }) };
+ warn $@ if $@;
+ }
};
my $createfn = sub {
PVE::ReplicationState::delete_guest_states($vmid);
my $realcmd = sub {
-
- my $vollist = [];
-
my $conf = $param;
+ my $arch = PVE::QemuServer::get_vm_arch($conf);
- my ($arch, undef) = PVE::QemuServer::get_basic_machine_info($conf);
-
+ my $vollist = [];
eval {
-
$vollist = &$create_disks($rpcenv, $authuser, $conf, $arch, $storecfg, $vmid, $pool, $param, $storage);
if (!$conf->{bootdisk}) {
code => sub {
my ($param) = @_;
- return PVE::Cluster::create_rrd_graph(
+ return PVE::RRD::create_rrd_graph(
"pve2-vm/$param->{vmid}", $param->{timeframe},
$param->{ds}, $param->{cf});
code => sub {
my ($param) = @_;
- return PVE::Cluster::create_rrd_data(
+ return PVE::RRD::create_rrd_data(
"pve2-vm/$param->{vmid}", $param->{timeframe}, $param->{cf});
}});
code => sub {
my ($param) = @_;
- my $conf = PVE::QemuConfig->load_config($param->{vmid});
-
- if (my $snapname = $param->{snapshot}) {
- my $snapshot = $conf->{snapshots}->{$snapname};
- die "snapshot '$snapname' does not exist\n" if !defined($snapshot);
-
- $snapshot->{digest} = $conf->{digest}; # keep file digest for API
-
- $conf = $snapshot;
- }
-
- delete $conf->{snapshots};
-
- if (!$param->{current}) {
- foreach my $opt (keys %{$conf->{pending}}) {
- next if $opt eq 'delete';
- my $value = $conf->{pending}->{$opt};
- next if ref($value); # just to be sure
- $conf->{$opt} = $value;
- }
- my $pending_delete_hash = PVE::QemuServer::split_flagged_list($conf->{pending}->{delete});
- foreach my $opt (keys %$pending_delete_hash) {
- delete $conf->{$opt} if $conf->{$opt};
- }
- }
-
- delete $conf->{pending};
+ raise_param_exc({ snapshot => "cannot use 'snapshot' parameter with 'current'",
+ current => "cannot use 'snapshot' parameter with 'current'"})
+ if ($param->{snapshot} && $param->{current});
- # hide cloudinit password
- if ($conf->{cipassword}) {
- $conf->{cipassword} = '**********';
+ my $conf;
+ if ($param->{snapshot}) {
+ $conf = PVE::QemuConfig->load_snapshot_config($param->{vmid}, $param->{snapshot});
+ } else {
+ $conf = PVE::QemuConfig->load_current_config($param->{vmid}, $param->{current});
}
-
+ $conf->{cipassword} = '**********' if $conf->{cipassword};
return $conf;
+
}});
__PACKAGE__->register_method({
my $conf = PVE::QemuConfig->load_config($param->{vmid});
- my $pending_delete_hash = PVE::QemuServer::split_flagged_list($conf->{pending}->{delete});
-
- my $res = [];
-
- foreach my $opt (keys %$conf) {
- next if ref($conf->{$opt});
- my $item = { key => $opt };
- $item->{value} = $conf->{$opt} if defined($conf->{$opt});
- $item->{pending} = $conf->{pending}->{$opt} if defined($conf->{pending}->{$opt});
- $item->{delete} = ($pending_delete_hash->{$opt} ? 2 : 1) if exists $pending_delete_hash->{$opt};
-
- # hide cloudinit password
- if ($opt eq 'cipassword') {
- $item->{value} = '**********' if defined($item->{value});
- # the trailing space so that the pending string is different
- $item->{pending} = '********** ' if defined($item->{pending});
- }
- push @$res, $item;
- }
-
- foreach my $opt (keys %{$conf->{pending}}) {
- next if $opt eq 'delete';
- next if ref($conf->{pending}->{$opt}); # just to be sure
- next if defined($conf->{$opt});
- my $item = { key => $opt };
- $item->{pending} = $conf->{pending}->{$opt};
+ my $pending_delete_hash = PVE::QemuConfig->parse_pending_delete($conf->{pending}->{delete});
- # hide cloudinit password
- if ($opt eq 'cipassword') {
- $item->{pending} = '**********' if defined($item->{pending});
- }
- push @$res, $item;
- }
-
- while (my ($opt, $force) = each %$pending_delete_hash) {
- next if $conf->{pending}->{$opt}; # just to be sure
- next if $conf->{$opt};
- my $item = { key => $opt, delete => ($force ? 2 : 1)};
- push @$res, $item;
- }
+ $conf->{cipassword} = '**********' if defined($conf->{cipassword});
+ $conf->{pending}->{cipassword} = '********** ' if defined($conf->{pending}->{cipassword});
- return $res;
- }});
+ return PVE::GuestHelpers::config_with_pending_array($conf, $pending_delete_hash);
+ }});
# POST/PUT {vmid}/config implementation
#
my $volid = $drive->{file};
return if !$volid || !($drive->{replicate}//1);
return if PVE::QemuServer::drive_is_cdrom($drive);
- my ($storeid, $format);
+
+ my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
+ return if $volname eq 'cloudinit';
+
+ my $format;
if ($volid =~ $NEW_DISK_RE) {
$storeid = $2;
$format = $drive->{format} || PVE::Storage::storage_default_format($storecfg, $storeid);
} else {
- ($storeid, undef) = PVE::Storage::parse_volume_id($volid, 1);
$format = (PVE::Storage::parse_volname($storecfg, $volid))[6];
}
return if PVE::Storage::storage_can_replicate($storecfg, $storeid, $format);
raise_param_exc({ $opt => "unable to parse drive options" }) if !$drive;
PVE::QemuServer::cleanup_drive_path($opt, $storecfg, $drive);
$check_replication->($drive);
- $param->{$opt} = PVE::QemuServer::print_drive($vmid, $drive);
+ $param->{$opt} = PVE::QemuServer::print_drive($drive);
} elsif ($opt =~ m/^net(\d+)$/) {
# add macaddr
my $net = PVE::QemuServer::parse_net($param->{$opt});
die "checksum missmatch (file change by other user?)\n"
if $digest && $digest ne $conf->{digest};
+ # FIXME: 'suspended' lock should probabyl be a state or "weak" lock?!
+ if (scalar(@delete) && grep { $_ eq 'vmstate'} @delete) {
+ if (defined($conf->{lock}) && $conf->{lock} eq 'suspended') {
+ delete $conf->{lock}; # for check lock check, not written out
+ push @delete, 'lock'; # this is the real deal to write it out
+ }
+ push @delete, 'runningmachine' if $conf->{runningmachine};
+ }
+
PVE::QemuConfig->check_lock($conf) if !$skiplock;
foreach my $opt (keys %$revert) {
foreach my $opt (@delete) {
$modified->{$opt} = 1;
$conf = PVE::QemuConfig->load_config($vmid); # update/reload
- if (!defined($conf->{$opt}) && !defined($conf->{pending}->{$opt})) {
+
+ # value of what we want to delete, independent if pending or not
+ my $val = $conf->{$opt} // $conf->{pending}->{$opt};
+ if (!defined($val)) {
warn "cannot delete '$opt' - not set in current configuration!\n";
$modified->{$opt} = 0;
next;
}
+ my $is_pending_val = defined($conf->{pending}->{$opt});
+ delete $conf->{pending}->{$opt};
if ($opt =~ m/^unused/) {
- my $drive = PVE::QemuServer::parse_drive($opt, $conf->{$opt});
+ my $drive = PVE::QemuServer::parse_drive($opt, $val);
PVE::QemuConfig->check_protection($conf, "can't remove unused disk '$drive->{file}'");
$rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.Disk']);
if (PVE::QemuServer::try_deallocate_drive($storecfg, $vmid, $conf, $opt, $drive, $rpcenv, $authuser)) {
delete $conf->{$opt};
PVE::QemuConfig->write_config($vmid, $conf);
}
+ } elsif ($opt eq 'vmstate') {
+ PVE::QemuConfig->check_protection($conf, "can't remove vmstate '$val'");
+ if (PVE::QemuServer::try_deallocate_drive($storecfg, $vmid, $conf, $opt, { file => $val }, $rpcenv, $authuser, 1)) {
+ delete $conf->{$opt};
+ PVE::QemuConfig->write_config($vmid, $conf);
+ }
} elsif (PVE::QemuServer::is_valid_drivename($opt)) {
PVE::QemuConfig->check_protection($conf, "can't remove drive '$opt'");
$rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.Disk']);
- PVE::QemuServer::vmconfig_register_unused_drive($storecfg, $vmid, $conf, PVE::QemuServer::parse_drive($opt, $conf->{pending}->{$opt}))
- if defined($conf->{pending}->{$opt});
- PVE::QemuServer::vmconfig_delete_pending_option($conf, $opt, $force);
+ PVE::QemuServer::vmconfig_register_unused_drive($storecfg, $vmid, $conf, PVE::QemuServer::parse_drive($opt, $val))
+ if $is_pending_val;
+ PVE::QemuConfig->add_to_pending_delete($conf, $opt, $force);
PVE::QemuConfig->write_config($vmid, $conf);
} elsif ($opt =~ m/^serial\d+$/) {
- if ($conf->{$opt} eq 'socket') {
+ if ($val eq 'socket') {
$rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.HWType']);
} elsif ($authuser ne 'root@pam') {
die "only root can delete '$opt' config for real devices\n";
}
- PVE::QemuServer::vmconfig_delete_pending_option($conf, $opt, $force);
+ PVE::QemuConfig->add_to_pending_delete($conf, $opt, $force);
PVE::QemuConfig->write_config($vmid, $conf);
} elsif ($opt =~ m/^usb\d+$/) {
- if ($conf->{$opt} =~ m/spice/) {
+ if ($val =~ m/spice/) {
$rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.HWType']);
} elsif ($authuser ne 'root@pam') {
die "only root can delete '$opt' config for real devices\n";
}
- PVE::QemuServer::vmconfig_delete_pending_option($conf, $opt, $force);
+ PVE::QemuConfig->add_to_pending_delete($conf, $opt, $force);
PVE::QemuConfig->write_config($vmid, $conf);
} else {
- PVE::QemuServer::vmconfig_delete_pending_option($conf, $opt, $force);
+ PVE::QemuConfig->add_to_pending_delete($conf, $opt, $force);
PVE::QemuConfig->write_config($vmid, $conf);
}
}
$conf = PVE::QemuConfig->load_config($vmid); # update/reload
next if defined($conf->{pending}->{$opt}) && ($param->{$opt} eq $conf->{pending}->{$opt}); # skip if nothing changed
- my ($arch, undef) = PVE::QemuServer::get_basic_machine_info($conf);
+ my $arch = PVE::QemuServer::get_vm_arch($conf);
if (PVE::QemuServer::is_valid_drivename($opt)) {
my $drive = PVE::QemuServer::parse_drive($opt, $param->{$opt});
} else {
$conf->{pending}->{$opt} = $param->{$opt};
}
- PVE::QemuServer::vmconfig_undelete_pending_option($conf, $opt);
+ PVE::QemuConfig->remove_from_pending_delete($conf, $opt);
PVE::QemuConfig->write_config($vmid, $conf);
}
# remove pending changes when nothing changed
$conf = PVE::QemuConfig->load_config($vmid); # update/reload
- my $changes = PVE::QemuServer::vmconfig_cleanup_pending($conf);
+ my $changes = PVE::QemuConfig->cleanup_pending($conf);
PVE::QemuConfig->write_config($vmid, $conf) if $changes;
return if !scalar(keys %{$conf->{pending}});
$conf = PVE::QemuConfig->load_config($vmid); # update/reload
+ my $errors = {};
if ($running) {
- my $errors = {};
PVE::QemuServer::vmconfig_hotplug_pending($vmid, $conf, $storecfg, $modified, $errors);
- raise_param_exc($errors) if scalar(keys %$errors);
} else {
- PVE::QemuServer::vmconfig_apply_pending($vmid, $conf, $storecfg, $running);
+ PVE::QemuServer::vmconfig_apply_pending($vmid, $conf, $storecfg, $running, $errors);
}
+ raise_param_exc($errors) if scalar(keys %$errors);
return;
};
}
});
-
__PACKAGE__->register_method({
name => 'destroy_vm',
path => '{vmid}',
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid_stopped }),
skiplock => get_standard_option('skiplock'),
+ purge => {
+ type => 'boolean',
+ description => "Remove vmid from backup cron jobs.",
+ optional => 1,
+ },
},
},
returns => {
my ($param) = @_;
my $rpcenv = PVE::RPCEnvironment::get();
-
my $authuser = $rpcenv->get_user();
-
my $vmid = $param->{vmid};
my $skiplock = $param->{skiplock};
# test if VM exists
my $conf = PVE::QemuConfig->load_config($vmid);
-
my $storecfg = PVE::Storage::config();
-
PVE::QemuConfig->check_protection($conf, "can't remove VM $vmid");
-
die "unable to remove VM $vmid - used in HA resources\n"
if PVE::HA::Config::vm_is_ha_managed($vmid);
- # do not allow destroy if there are replication jobs
- my $repl_conf = PVE::ReplicationConfig->new();
- $repl_conf->check_for_existing_jobs($vmid);
+ if (!$param->{purge}) {
+ # don't allow destroy if with replication jobs but no purge param
+ my $repl_conf = PVE::ReplicationConfig->new();
+ $repl_conf->check_for_existing_jobs($vmid);
+ }
# early tests (repeat after locking)
die "VM $vmid is running - destroy failed\n"
my $upid = shift;
syslog('info', "destroy VM $vmid: $upid\n");
+ PVE::QemuConfig->lock_config($vmid, sub {
+ die "VM $vmid is running - destroy failed\n"
+ if (PVE::QemuServer::check_running($vmid));
- PVE::QemuServer::vm_destroy($storecfg, $vmid, $skiplock);
+ PVE::QemuServer::destroy_vm($storecfg, $vmid, $skiplock, { lock => 'destroyed' });
- PVE::AccessControl::remove_vm_access($vmid);
+ PVE::AccessControl::remove_vm_access($vmid);
+ PVE::Firewall::remove_vmfw_conf($vmid);
+ if ($param->{purge}) {
+ PVE::ReplicationConfig::remove_vmid_jobs($vmid);
+ PVE::VZDump::Plugin::remove_vmid_from_backup_jobs($vmid);
+ }
- PVE::Firewall::remove_vmfw_conf($vmid);
+ # only now remove the zombie config, else we can have reuse race
+ PVE::QemuConfig->destroy_config($vmid);
+ });
};
return $rpcenv->fork_worker('qmdestroy', $vmid, $authuser, $realcmd);
if ($node ne 'localhost' && $node ne PVE::INotify::nodename()) {
(undef, $family) = PVE::Cluster::remote_node_ip($node);
- my $sshinfo = PVE::Cluster::get_ssh_info($node);
+ my $sshinfo = PVE::SSHInfo::get_ssh_info($node);
# NOTE: kvm VNC traffic is already TLS encrypted or is known unsecure
- $remcmd = PVE::Cluster::ssh_info_to_command($sshinfo, $use_serial ? '-t' : '-T');
+ $remcmd = PVE::SSHInfo::ssh_info_to_command($sshinfo, $use_serial ? '-t' : '-T');
} else {
$family = PVE::Tools::get_host_address_family($node);
}
if ($node ne 'localhost' && $node ne PVE::INotify::nodename()) {
(undef, $family) = PVE::Cluster::remote_node_ip($node);
- my $sshinfo = PVE::Cluster::get_ssh_info($node);
- $remcmd = PVE::Cluster::ssh_info_to_command($sshinfo, '-t');
+ my $sshinfo = PVE::SSHInfo::get_ssh_info($node);
+ $remcmd = PVE::SSHInfo::ssh_info_to_command($sshinfo, '-t');
push @$remcmd, '--';
} else {
$family = PVE::Tools::get_host_address_family($node);
my ($ticket, undef, $remote_viewer_config) =
PVE::AccessControl::remote_viewer_config($authuser, $vmid, $node, $proxy, $title, $port);
- PVE::QemuServer::vm_mon_cmd($vmid, "set_password", protocol => 'spice', password => $ticket);
- PVE::QemuServer::vm_mon_cmd($vmid, "expire_password", protocol => 'spice', time => "+30");
+ mon_cmd($vmid, "set_password", protocol => 'spice', password => $ticket);
+ mon_cmd($vmid, "expire_password", protocol => 'spice', time => "+30");
return $remote_viewer_config;
}});
{ subdir => 'current' },
{ subdir => 'start' },
{ subdir => 'stop' },
+ { subdir => 'reset' },
+ { subdir => 'shutdown' },
+ { subdir => 'suspend' },
+ { subdir => 'reboot' },
];
return $res;
description => "CIDR of the (sub) network that is used for migration.",
optional => 1,
},
- machine => get_standard_option('pve-qm-machine'),
+ machine => get_standard_option('pve-qemu-machine'),
targetstorage => {
description => "Target storage for the migration. (Can be '1' to use the same storage id as on the source node.)",
type => 'string',
my ($param) = @_;
my $rpcenv = PVE::RPCEnvironment::get();
-
my $authuser = $rpcenv->get_user();
my $node = extract_param($param, 'node');
-
my $vmid = extract_param($param, 'vmid');
my $machine = extract_param($param, 'machine');
- my $stateuri = extract_param($param, 'stateuri');
- raise_param_exc({ stateuri => "Only root may use this option." })
- if $stateuri && $authuser ne 'root@pam';
-
- my $skiplock = extract_param($param, 'skiplock');
- raise_param_exc({ skiplock => "Only root may use this option." })
- if $skiplock && $authuser ne 'root@pam';
-
- my $migratedfrom = extract_param($param, 'migratedfrom');
- raise_param_exc({ migratedfrom => "Only root may use this option." })
- if $migratedfrom && $authuser ne 'root@pam';
-
- my $migration_type = extract_param($param, 'migration_type');
- raise_param_exc({ migration_type => "Only root may use this option." })
- if $migration_type && $authuser ne 'root@pam';
-
- my $migration_network = extract_param($param, 'migration_network');
- raise_param_exc({ migration_network => "Only root may use this option." })
- if $migration_network && $authuser ne 'root@pam';
+ my $get_root_param = sub {
+ my $value = extract_param($param, $_[0]);
+ raise_param_exc({ "$_[0]" => "Only root may use this option." })
+ if $value && $authuser ne 'root@pam';
+ return $value;
+ };
- my $targetstorage = extract_param($param, 'targetstorage');
- raise_param_exc({ targetstorage => "Only root may use this option." })
- if $targetstorage && $authuser ne 'root@pam';
+ my $stateuri = $get_root_param->('stateuri');
+ my $skiplock = $get_root_param->('skiplock');
+ my $migratedfrom = $get_root_param->('migratedfrom');
+ my $migration_type = $get_root_param->('migration_type');
+ my $migration_network = $get_root_param->('migration_network');
+ my $targetstorage = $get_root_param->('targetstorage');
raise_param_exc({ targetstorage => "targetstorage can only by used with migratedfrom." })
if $targetstorage && !$migratedfrom;
# read spice ticket from STDIN
my $spice_ticket;
- if ($stateuri && ($stateuri eq 'tcp') && $migratedfrom && ($rpcenv->{type} eq 'cli')) {
+ if ($stateuri && ($stateuri eq 'tcp' || $stateuri eq 'unix') && $migratedfrom && ($rpcenv->{type} eq 'cli')) {
if (defined(my $line = <STDIN>)) {
chomp $line;
$spice_ticket = $line;
my $storecfg = PVE::Storage::config();
- if (PVE::HA::Config::vm_is_ha_managed($vmid) && !$stateuri &&
- $rpcenv->{type} ne 'ha') {
-
+ if (PVE::HA::Config::vm_is_ha_managed($vmid) && !$stateuri && $rpcenv->{type} ne 'ha') {
my $hacmd = sub {
my $upid = shift;
- my $service = "vm:$vmid";
-
- my $cmd = ['ha-manager', 'set', $service, '--state', 'started'];
-
print "Requesting HA start for VM $vmid\n";
+ my $cmd = ['ha-manager', 'set', "vm:$vmid", '--state', 'started'];
PVE::Tools::run_command($cmd);
-
return;
};
PVE::QemuServer::vm_start($storecfg, $vmid, $stateuri, $skiplock, $migratedfrom, undef,
$machine, $spice_ticket, $migration_network, $migration_type, $targetstorage);
-
return;
};
my ($param) = @_;
my $rpcenv = PVE::RPCEnvironment::get();
-
my $authuser = $rpcenv->get_user();
my $node = extract_param($param, 'node');
-
my $vmid = extract_param($param, 'vmid');
my $skiplock = extract_param($param, 'skiplock');
my $hacmd = sub {
my $upid = shift;
- my $service = "vm:$vmid";
-
- my $cmd = ['ha-manager', 'set', $service, '--state', 'stopped'];
-
print "Requesting HA stop for VM $vmid\n";
+ my $cmd = ['ha-manager', 'crm-command', 'stop', "vm:$vmid", '0'];
PVE::Tools::run_command($cmd);
-
return;
};
PVE::QemuServer::vm_stop($storecfg, $vmid, $skiplock, 0,
$param->{timeout}, 0, 1, $keepActive, $migratedfrom);
-
return;
};
my ($param) = @_;
my $rpcenv = PVE::RPCEnvironment::get();
-
my $authuser = $rpcenv->get_user();
my $node = extract_param($param, 'node');
-
my $vmid = extract_param($param, 'vmid');
my $skiplock = extract_param($param, 'skiplock');
#
# checking the qmp status here to get feedback to the gui/cli/api
# and the status query should not take too long
- my $qmpstatus;
- eval {
- $qmpstatus = PVE::QemuServer::vm_qmp_command($vmid, { execute => "query-status" }, 0);
+ my $qmpstatus = eval {
+ PVE::QemuConfig::assert_config_exists_on_node($vmid);
+ mon_cmd($vmid, "query-status");
};
my $err = $@ if $@;
}
}
- if (PVE::HA::Config::vm_is_ha_managed($vmid) &&
- ($rpcenv->{type} ne 'ha')) {
+ if (PVE::HA::Config::vm_is_ha_managed($vmid) && $rpcenv->{type} ne 'ha') {
+ my $timeout = $param->{timeout} // 60;
my $hacmd = sub {
my $upid = shift;
- my $service = "vm:$vmid";
-
- my $cmd = ['ha-manager', 'set', $service, '--state', 'stopped'];
-
print "Requesting HA stop for VM $vmid\n";
+ my $cmd = ['ha-manager', 'crm-command', 'stop', "vm:$vmid", "$timeout"];
PVE::Tools::run_command($cmd);
-
return;
};
PVE::QemuServer::vm_stop($storecfg, $vmid, $skiplock, 0, $param->{timeout},
$shutdown, $param->{forceStop}, $keepActive);
-
return;
};
}
}});
+__PACKAGE__->register_method({
+ name => 'vm_reboot',
+ path => '{vmid}/status/reboot',
+ method => 'POST',
+ protected => 1,
+ proxyto => 'node',
+ description => "Reboot the VM by shutting it down, and starting it again. Applies pending changes.",
+ permissions => {
+ check => ['perm', '/vms/{vmid}', [ 'VM.PowerMgmt' ]],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid',
+ { completion => \&PVE::QemuServer::complete_vmid_running }),
+ timeout => {
+ description => "Wait maximal timeout seconds for the shutdown.",
+ type => 'integer',
+ minimum => 0,
+ optional => 1,
+ },
+ },
+ },
+ returns => {
+ type => 'string',
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+ my $authuser = $rpcenv->get_user();
+
+ my $node = extract_param($param, 'node');
+ my $vmid = extract_param($param, 'vmid');
+
+ my $qmpstatus = eval {
+ PVE::QemuConfig::assert_config_exists_on_node($vmid);
+ mon_cmd($vmid, "query-status");
+ };
+ my $err = $@ if $@;
+
+ if (!$err && $qmpstatus->{status} eq "paused") {
+ die "VM is paused - cannot shutdown\n";
+ }
+
+ die "VM $vmid not running\n" if !PVE::QemuServer::check_running($vmid);
+
+ my $realcmd = sub {
+ my $upid = shift;
+
+ syslog('info', "requesting reboot of VM $vmid: $upid\n");
+ PVE::QemuServer::vm_reboot($vmid, $param->{timeout});
+ return;
+ };
+
+ return $rpcenv->fork_worker('qmreboot', $vmid, $authuser, $realcmd);
+ }});
+
__PACKAGE__->register_method({
name => 'vm_suspend',
path => '{vmid}/status/suspend',
proxyto => 'node',
description => "Suspend virtual machine.",
permissions => {
+ description => "You need 'VM.PowerMgmt' on /vms/{vmid}, and if you have set 'todisk',".
+ " you need also 'VM.Config.Disk' on /vms/{vmid} and 'Datastore.AllocateSpace'".
+ " on the storage for the vmstate.",
check => ['perm', '/vms/{vmid}', [ 'VM.PowerMgmt' ]],
},
parameters => {
my ($param) = @_;
my $rpcenv = PVE::RPCEnvironment::get();
-
my $authuser = $rpcenv->get_user();
my $node = extract_param($param, 'node');
-
my $vmid = extract_param($param, 'vmid');
my $todisk = extract_param($param, 'todisk') // 0;
die "Cannot suspend HA managed VM to disk\n"
if $todisk && PVE::HA::Config::vm_is_ha_managed($vmid);
- my $taskname = $todisk ? 'qmsuspend' : 'qmpause';
+ # early check for storage permission, for better user feedback
+ if ($todisk) {
+ $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.Disk']);
+
+ if (!$statestorage) {
+ # get statestorage from config if none is given
+ my $conf = PVE::QemuConfig->load_config($vmid);
+ my $storecfg = PVE::Storage::config();
+ $statestorage = PVE::QemuServer::find_vmstate_storage($conf, $storecfg);
+ }
+
+ $rpcenv->check($authuser, "/storage/$statestorage", ['Datastore.AllocateSpace']);
+ }
my $realcmd = sub {
my $upid = shift;
return;
};
+ my $taskname = $todisk ? 'qmsuspend' : 'qmpause';
return $rpcenv->fork_worker($taskname, $vmid, $authuser, $realcmd);
}});
my ($param) = @_;
my $rpcenv = PVE::RPCEnvironment::get();
-
- my $authuser = $rpcenv->get_user();
+ my $authuser = $rpcenv->get_user();
my $node = extract_param($param, 'node');
-
my $vmid = extract_param($param, 'vmid');
-
my $newid = extract_param($param, 'newid');
-
my $pool = extract_param($param, 'pool');
-
- if (defined($pool)) {
- $rpcenv->check_pool_exist($pool);
- }
+ $rpcenv->check_pool_exist($pool) if defined($pool);
my $snapname = extract_param($param, 'snapname');
-
my $storage = extract_param($param, 'storage');
-
my $format = extract_param($param, 'format');
-
my $target = extract_param($param, 'target');
my $localnode = PVE::INotify::nodename();
- undef $target if $target && ($target eq $localnode || $target eq 'localhost');
-
- PVE::Cluster::check_node_exists($target) if $target;
+ if ($target && ($target eq $localnode || $target eq 'localhost')) {
+ undef $target;
+ } else {
+ PVE::Cluster::check_node_exists($target);
+ }
my $storecfg = PVE::Storage::config();
}
}
- PVE::Cluster::check_cfs_quorum();
+ PVE::Cluster::check_cfs_quorum();
my $running = PVE::QemuServer::check_running($vmid) || 0;
my $shared_lock = $running ? 0 : 1;
my $clonefn = sub {
-
- # do all tests after lock
- # we also try to do all tests before we fork the worker
+ # do all tests after lock but before forking worker - if possible
my $conf = PVE::QemuConfig->load_config($vmid);
-
PVE::QemuConfig->check_lock($conf);
my $verify_running = PVE::QemuServer::check_running($vmid) || 0;
-
die "unexpected state change\n" if $verify_running != $running;
die "snapshot '$snapname' does not exist\n"
if $snapname && !defined( $conf->{snapshots}->{$snapname});
- my $full = extract_param($param, 'full');
- if (!defined($full)) {
- $full = !PVE::QemuConfig->is_template($conf);
- }
+ my $full = extract_param($param, 'full') // !PVE::QemuConfig->is_template($conf);
die "parameter 'storage' not allowed for linked clones\n"
if defined($storage) && !$full;
my $sharedvm = &$check_storage_access_clone($rpcenv, $authuser, $storecfg, $oldconf, $storage);
- die "can't clone VM to node '$target' (VM uses local storage)\n" if $target && !$sharedvm;
+ die "can't clone VM to node '$target' (VM uses local storage)\n"
+ if $target && !$sharedvm;
my $conffile = PVE::QemuConfig->config_file($newid);
-
die "unable to create VM $newid: config file already exists\n"
if -f $conffile;
my $smbios1 = PVE::QemuServer::parse_smbios1($newconf->{smbios1} || '');
$smbios1->{uuid} = PVE::QemuServer::generate_uuid();
$newconf->{smbios1} = PVE::QemuServer::print_smbios1($smbios1);
-
- # auto generate a new vmgenid if the option was set
+ # auto generate a new vmgenid only if the option was set for template
if ($newconf->{vmgenid}) {
$newconf->{vmgenid} = PVE::QemuServer::generate_uuid();
}
if ($param->{name}) {
$newconf->{name} = $param->{name};
} else {
- if ($oldconf->{name}) {
- $newconf->{name} = "Copy-of-$oldconf->{name}";
- } else {
- $newconf->{name} = "Copy-of-VM-$vmid";
- }
+ $newconf->{name} = "Copy-of-VM-" . ($oldconf->{name} // $vmid);
}
if ($param->{description}) {
}
# create empty/temp config - this fails if VM already exists on other node
+ # FIXME use PVE::QemuConfig->create_and_lock_config and adapt code
PVE::Tools::file_set_contents($conffile, "# qmclone temporary file\nlock: clone\n");
my $realcmd = sub {
$newid, $storage, $format, $fullclone->{$opt}, $newvollist,
$jobs, $skipcomplete, $oldconf->{agent}, $clonelimit);
- $newconf->{$opt} = PVE::QemuServer::print_drive($vmid, $newdrive);
+ $newconf->{$opt} = PVE::QemuServer::print_drive($newdrive);
PVE::QemuConfig->write_config($newid, $newconf);
$i++;
unlink $conffile;
eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $jobs) };
-
sleep 1; # some storage like rbd need to wait before release volume - really?
foreach my $volid (@$newvollist) {
eval { PVE::Storage::vdisk_free($storecfg, $volid); };
warn $@ if $@;
}
+
+ PVE::Firewall::remove_vmfw_conf($newid);
+
die "clone failed: $err";
}
my ($param) = @_;
my $rpcenv = PVE::RPCEnvironment::get();
-
my $authuser = $rpcenv->get_user();
my $node = extract_param($param, 'node');
-
my $vmid = extract_param($param, 'vmid');
-
my $digest = extract_param($param, 'digest');
-
my $disk = extract_param($param, 'disk');
-
my $storeid = extract_param($param, 'storage');
-
my $format = extract_param($param, 'format');
my $storecfg = PVE::Storage::config();
my $updatefn = sub {
-
my $conf = PVE::QemuConfig->load_config($vmid);
-
PVE::QemuConfig->check_lock($conf);
- die "checksum missmatch (file change by other user?)\n"
+ die "VM config checksum missmatch (file change by other user?)\n"
if $digest && $digest ne $conf->{digest};
die "disk '$disk' does not exist\n" if !$conf->{$disk};
my $drive = PVE::QemuServer::parse_drive($disk, $conf->{$disk});
- my $old_volid = $drive->{file} || die "disk '$disk' has no associated volume\n";
-
+ die "disk '$disk' has no associated volume\n" if !$drive->{file};
die "you can't move a cdrom\n" if PVE::QemuServer::drive_is_cdrom($drive, 1);
+ my $old_volid = $drive->{file};
my $oldfmt;
my ($oldstoreid, $oldvolname) = PVE::Storage::parse_volume_id($old_volid);
if ($oldvolname =~ m/\.(raw|qcow2|vmdk)$/){
$oldfmt = $1;
}
- die "you can't move on the same storage with same format\n" if $oldstoreid eq $storeid &&
+ die "you can't move to the same storage with same format\n" if $oldstoreid eq $storeid &&
(!$format || !$oldfmt || $oldfmt eq $format);
# this only checks snapshots because $disk is passed!
PVE::Storage::activate_volumes($storecfg, [ $drive->{file} ]);
my $realcmd = sub {
-
my $newvollist = [];
eval {
my $newdrive = PVE::QemuServer::clone_disk($storecfg, $vmid, $running, $disk, $drive, undef,
$vmid, $storeid, $format, 1, $newvollist, undef, undef, undef, $movelimit);
- $conf->{$disk} = PVE::QemuServer::print_drive($vmid, $newdrive);
+ $conf->{$disk} = PVE::QemuServer::print_drive($newdrive);
PVE::QemuConfig->add_unused_volume($conf, $old_volid) if !$param->{delete};
PVE::QemuConfig->write_config($vmid, $conf);
- if ($running && PVE::QemuServer::parse_guest_agent($conf)->{fstrim_cloned_disks} && PVE::QemuServer::qga_check_running($vmid)) {
- eval { PVE::QemuServer::vm_mon_cmd($vmid, "guest-fstrim"); };
+ my $do_trim = PVE::QemuServer::parse_guest_agent($conf)->{fstrim_cloned_disks};
+ if ($running && $do_trim && PVE::QemuServer::qga_check_running($vmid)) {
+ eval { mon_cmd($vmid, "guest-fstrim") };
}
eval {
warn $@ if $@;
};
if (my $err = $@) {
-
- foreach my $volid (@$newvollist) {
- eval { PVE::Storage::vdisk_free($storecfg, $volid); };
- warn $@ if $@;
- }
+ foreach my $volid (@$newvollist) {
+ eval { PVE::Storage::vdisk_free($storecfg, $volid) };
+ warn $@ if $@;
+ }
die "storage migration failed: $err";
}
return PVE::QemuConfig->lock_config($vmid, $updatefn);
}});
+my $check_vm_disks_local = sub {
+ my ($storecfg, $vmconf, $vmid) = @_;
+
+ my $local_disks = {};
+
+ # add some more information to the disks e.g. cdrom
+ PVE::QemuServer::foreach_volid($vmconf, sub {
+ my ($volid, $attr) = @_;
+
+ my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
+ if ($storeid) {
+ my $scfg = PVE::Storage::storage_config($storecfg, $storeid);
+ return if $scfg->{shared};
+ }
+ # The shared attr here is just a special case where the vdisk
+ # is marked as shared manually
+ return if $attr->{shared};
+ return if $attr->{cdrom} and $volid eq "none";
+
+ if (exists $local_disks->{$volid}) {
+ @{$local_disks->{$volid}}{keys %$attr} = values %$attr
+ } else {
+ $local_disks->{$volid} = $attr;
+ # ensure volid is present in case it's needed
+ $local_disks->{$volid}->{volid} = $volid;
+ }
+ });
+
+ return $local_disks;
+};
+
+__PACKAGE__->register_method({
+ name => 'migrate_vm_precondition',
+ path => '{vmid}/migrate',
+ method => 'GET',
+ protected => 1,
+ proxyto => 'node',
+ description => "Get preconditions for migration.",
+ permissions => {
+ check => ['perm', '/vms/{vmid}', [ 'VM.Migrate' ]],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
+ target => get_standard_option('pve-node', {
+ description => "Target node.",
+ completion => \&PVE::Cluster::complete_migration_target,
+ optional => 1,
+ }),
+ },
+ },
+ returns => {
+ type => "object",
+ properties => {
+ running => { type => 'boolean' },
+ allowed_nodes => {
+ type => 'array',
+ optional => 1,
+ description => "List nodes allowed for offline migration, only passed if VM is offline"
+ },
+ not_allowed_nodes => {
+ type => 'object',
+ optional => 1,
+ description => "List not allowed nodes with additional informations, only passed if VM is offline"
+ },
+ local_disks => {
+ type => 'array',
+ description => "List local disks including CD-Rom, unsused and not referenced disks"
+ },
+ local_resources => {
+ type => 'array',
+ description => "List local resources e.g. pci, usb"
+ }
+ },
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+
+ my $authuser = $rpcenv->get_user();
+
+ PVE::Cluster::check_cfs_quorum();
+
+ my $res = {};
+
+ my $vmid = extract_param($param, 'vmid');
+ my $target = extract_param($param, 'target');
+ my $localnode = PVE::INotify::nodename();
+
+
+ # test if VM exists
+ my $vmconf = PVE::QemuConfig->load_config($vmid);
+ my $storecfg = PVE::Storage::config();
+
+
+ # try to detect errors early
+ PVE::QemuConfig->check_lock($vmconf);
+
+ $res->{running} = PVE::QemuServer::check_running($vmid) ? 1:0;
+
+ # if vm is not running, return target nodes where local storage is available
+ # for offline migration
+ if (!$res->{running}) {
+ $res->{allowed_nodes} = [];
+ my $checked_nodes = PVE::QemuServer::check_local_storage_availability($vmconf, $storecfg);
+ delete $checked_nodes->{$localnode};
+
+ foreach my $node (keys %$checked_nodes) {
+ if (!defined $checked_nodes->{$node}->{unavailable_storages}) {
+ push @{$res->{allowed_nodes}}, $node;
+ }
+
+ }
+ $res->{not_allowed_nodes} = $checked_nodes;
+ }
+
+
+ my $local_disks = &$check_vm_disks_local($storecfg, $vmconf, $vmid);
+ $res->{local_disks} = [ values %$local_disks ];;
+
+ my $local_resources = PVE::QemuServer::check_local_resources($vmconf, 1);
+
+ $res->{local_resources} = $local_resources;
+
+ return $res;
+
+
+ }});
+
__PACKAGE__->register_method({
name => 'migrate_vm',
path => '{vmid}/migrate',
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
- target => get_standard_option('pve-node', {
+ target => get_standard_option('pve-node', {
description => "Target node.",
completion => \&PVE::Cluster::complete_migration_target,
}),
online => {
type => 'boolean',
- description => "Use online/live migration.",
+ description => "Use online/live migration if VM is running. Ignored if VM is stopped.",
optional => 1,
},
force => {
targetstorage => get_standard_option('pve-storage-id', {
description => "Default target storage.",
optional => 1,
- completion => \&PVE::QemuServer::complete_storage,
+ completion => \&PVE::QemuServer::complete_migration_storage,
}),
bwlimit => {
description => "Override I/O bandwidth limit (in KiB/s).",
my ($param) = @_;
my $rpcenv = PVE::RPCEnvironment::get();
-
my $authuser = $rpcenv->get_user();
my $target = extract_param($param, 'target');
my $vmid = extract_param($param, 'vmid');
- raise_param_exc({ targetstorage => "Live storage migration can only be done online." })
- if !$param->{online} && $param->{targetstorage};
-
raise_param_exc({ force => "Only root may use this option." })
if $param->{force} && $authuser ne 'root@pam';
PVE::QemuConfig->check_lock($conf);
if (PVE::QemuServer::check_running($vmid)) {
- die "cant migrate running VM without --online\n"
- if !$param->{online};
+ die "can't migrate running VM without --online\n" if !$param->{online};
+ } else {
+ warn "VM isn't running. Doing offline migration instead.\n" if $param->{online};
+ $param->{online} = 0;
}
+ raise_param_exc({ targetstorage => "Live storage migration can only be done online." })
+ if !$param->{online} && $param->{targetstorage};
+
my $storecfg = PVE::Storage::config();
if( $param->{targetstorage}) {
my $hacmd = sub {
my $upid = shift;
- my $service = "vm:$vmid";
-
- my $cmd = ['ha-manager', 'migrate', $service, $target];
-
print "Requesting HA migration for VM $vmid to node $target\n";
+ my $cmd = ['ha-manager', 'migrate', "vm:$vmid", $target];
PVE::Tools::run_command($cmd);
-
return;
};
my $res = '';
eval {
- $res = PVE::QemuServer::vm_human_monitor_command($vmid, $param->{command});
+ $res = PVE::QemuServer::Monitor::hmp_cmd($vmid, $param->{command});
};
$res = "ERROR: $@" if $@;
my (undef, undef, undef, undef, undef, undef, $format) =
PVE::Storage::parse_volname($storecfg, $drive->{file});
- die "can't resize volume: $disk if snapshot exists\n"
+ die "can't resize volume: $disk if snapshot exists\n"
if %{$conf->{snapshots}} && $format eq 'qcow2';
my $volid = $drive->{file};
PVE::Storage::activate_volumes($storecfg, [$volid]);
my $size = PVE::Storage::volume_size_info($storecfg, $volid, 5);
+ die "Could not determine current size of volume '$volid'\n" if !defined($size);
+
die "internal error" if $sizestr !~ m/^(\+)?(\d+(\.\d+)?)([KMGT])?$/;
my ($ext, $newsize, $unit) = ($1, $2, $4);
if ($unit) {
PVE::QemuServer::qemu_block_resize($vmid, "drive-$disk", $storecfg, $volid, $newsize);
$drive->{size} = $newsize;
- $conf->{$disk} = PVE::QemuServer::print_drive($vmid, $drive);
+ $conf->{$disk} = PVE::QemuServer::print_drive($drive);
PVE::QemuConfig->write_config($vmid, $conf);
};
die "unable to use snapshot name 'current' (reserved name)\n"
if $snapname eq 'current';
+ die "unable to use snapshot name 'pending' (reserved name)\n"
+ if lc($snapname) eq 'pending';
+
my $realcmd = sub {
PVE::Cluster::log_msg('info', $authuser, "snapshot VM $vmid: $snapname");
- PVE::QemuConfig->snapshot_create($vmid, $snapname, $param->{vmstate},
+ PVE::QemuConfig->snapshot_create($vmid, $snapname, $param->{vmstate},
$param->{description});
};
return undef;
}});
+__PACKAGE__->register_method({
+ name => 'cloudinit_generated_config_dump',
+ path => '{vmid}/cloudinit/dump',
+ method => 'GET',
+ proxyto => 'node',
+ description => "Get automatically generated cloudinit config.",
+ permissions => {
+ check => ['perm', '/vms/{vmid}', [ 'VM.Audit' ]],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
+ type => {
+ description => 'Config type.',
+ type => 'string',
+ enum => ['user', 'network', 'meta'],
+ },
+ },
+ },
+ returns => {
+ type => 'string',
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $conf = PVE::QemuConfig->load_config($param->{vmid});
+
+ return PVE::QemuServer::Cloudinit::dump_cloudinit_config($conf, $param->{vmid}, $param->{type});
+ }});
+
1;