use warnings;
use Cwd 'abs_path';
use Net::SSLeay;
-use UUID;
use POSIX;
use IO::Socket::IP;
use URI::Escape;
use PVE::Cluster qw (cfs_read_file cfs_write_file);;
+use PVE::RRD;
use PVE::SafeSyslog;
use PVE::Tools qw(extract_param);
use PVE::Exception qw(raise raise_param_exc raise_perm_exc);
use PVE::GuestHelpers;
use PVE::QemuConfig;
use PVE::QemuServer;
+use PVE::QemuServer::Monitor qw(mon_cmd);
use PVE::QemuMigrate;
use PVE::RPCEnvironment;
use PVE::AccessControl;
use PVE::Firewall;
use PVE::API2::Firewall::VM;
use PVE::API2::Qemu::Agent;
+use PVE::VZDump::Plugin;
+use PVE::DataCenterConfig;
+use PVE::SSHInfo;
BEGIN {
if (!$ENV{PVE_GENERATING_DOCS}) {
my $isCDROM = PVE::QemuServer::drive_is_cdrom($drive);
my $volid = $drive->{file};
+ my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
- if (!$volid || ($volid eq 'none' || $volid eq 'cloudinit')) {
- # nothing to check
- } elsif ($volid =~ m/^(([^:\s]+):)?(cloudinit)$/) {
+ if (!$volid || ($volid eq 'none' || $volid eq 'cloudinit' || (defined($volname) && $volname eq 'cloudinit'))) {
# nothing to check
} elsif ($isCDROM && ($volid eq 'cdrom')) {
$rpcenv->check($authuser, "/", ['Sys.Console']);
my ($ds, $disk) = @_;
my $volid = $disk->{file};
+ my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
if (!$volid || $volid eq 'none' || $volid eq 'cdrom') {
delete $disk->{size};
- $res->{$ds} = PVE::QemuServer::print_drive($vmid, $disk);
- } elsif ($volid =~ m!^(?:([^/:\s]+):)?cloudinit$!) {
- my $storeid = $1 || $default_storage;
+ $res->{$ds} = PVE::QemuServer::print_drive($disk);
+ } elsif (defined($volname) && $volname eq 'cloudinit') {
+ $storeid = $storeid // $default_storage;
die "no storage ID specified (and no default storage)\n" if !$storeid;
my $scfg = PVE::Storage::storage_config($storecfg, $storeid);
my $name = "vm-$vmid-cloudinit";
$disk->{media} = 'cdrom';
push @$vollist, $volid;
delete $disk->{format}; # no longer needed
- $res->{$ds} = PVE::QemuServer::print_drive($vmid, $disk);
+ $res->{$ds} = PVE::QemuServer::print_drive($disk);
} elsif ($volid =~ $NEW_DISK_RE) {
my ($storeid, $size) = ($2 || $default_storage, $3);
die "no storage ID specified (and no default storage)\n" if !$storeid;
$disk->{file} = $volid;
$disk->{size} = PVE::Tools::convert_size($size, 'kb' => 'b');
delete $disk->{format}; # no longer needed
- $res->{$ds} = PVE::QemuServer::print_drive($vmid, $disk);
+ $res->{$ds} = PVE::QemuServer::print_drive($disk);
} else {
PVE::Storage::check_volume_access($rpcenv, $authuser, $storecfg, $vmid, $volid);
if ($volid_is_new) {
- my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
-
PVE::Storage::activate_volumes($storecfg, [ $volid ]) if $storeid;
my $size = PVE::Storage::volume_size_info($storecfg, $volid);
- die "volume $volid does not exists\n" if !$size;
+ die "volume $volid does not exist\n" if !$size;
$disk->{size} = $size;
}
- $res->{$ds} = PVE::QemuServer::print_drive($vmid, $disk);
+ $res->{$ds} = PVE::QemuServer::print_drive($disk);
}
};
'startup' => 1,
'tdf' => 1,
'template' => 1,
+ 'tags' => 1,
};
my $vmpoweroptions = {
$rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.Disk']);
} elsif ($cloudinitoptions->{$opt} || ($opt =~ m/^(?:net|ipconfig)\d+$/)) {
$rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.Network']);
+ } elsif ($opt eq 'vmstate') {
+ # the user needs Disk and PowerMgmt privileges to change the vmstate
+ # also needs privileges on the storage, that will be checked later
+ $rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.Disk', 'VM.PowerMgmt' ]);
} else {
# catches hostpci\d+, args, lock, etc.
# new options will be checked here
my ($param) = @_;
my $rpcenv = PVE::RPCEnvironment::get();
-
my $authuser = $rpcenv->get_user();
my $node = extract_param($param, 'node');
-
my $vmid = extract_param($param, 'vmid');
my $archive = extract_param($param, 'archive');
my $is_restore = !!$archive;
- my $storage = extract_param($param, 'storage');
-
+ my $bwlimit = extract_param($param, 'bwlimit');
my $force = extract_param($param, 'force');
-
- my $unique = extract_param($param, 'unique');
-
my $pool = extract_param($param, 'pool');
-
- my $bwlimit = extract_param($param, 'bwlimit');
-
my $start_after_create = extract_param($param, 'start');
-
- my $filename = PVE::QemuConfig->config_file($vmid);
-
- my $storecfg = PVE::Storage::config();
+ my $storage = extract_param($param, 'storage');
+ my $unique = extract_param($param, 'unique');
if (defined(my $ssh_keys = $param->{sshkeys})) {
$ssh_keys = URI::Escape::uri_unescape($ssh_keys);
PVE::Cluster::check_cfs_quorum();
+ my $filename = PVE::QemuConfig->config_file($vmid);
+ my $storecfg = PVE::Storage::config();
+
if (defined($pool)) {
$rpcenv->check_pool_exist($pool);
}
raise_param_exc({ $opt => "unable to parse drive options" }) if !$drive;
PVE::QemuServer::cleanup_drive_path($opt, $storecfg, $drive);
- $param->{$opt} = PVE::QemuServer::print_drive($vmid, $drive);
+ $param->{$opt} = PVE::QemuServer::print_drive($drive);
}
}
}
PVE::AccessControl::add_vm_to_pool($vmid, $pool) if $pool;
-
- if ($start_after_create) {
- eval { PVE::API2::Qemu->vm_start({ vmid => $vmid, node => $node }) };
- warn $@ if $@;
- }
};
# ensure no old replication state are exists
PVE::ReplicationState::delete_guest_states($vmid);
- return PVE::QemuConfig->lock_config_full($vmid, 1, $realcmd);
+ PVE::QemuConfig->lock_config_full($vmid, 1, $realcmd);
+
+ if ($start_after_create) {
+ print "Execute autostart\n";
+ eval { PVE::API2::Qemu->vm_start({ vmid => $vmid, node => $node }) };
+ warn $@ if $@;
+ }
};
my $createfn = sub {
my $conf = $param;
- my ($arch, undef) = PVE::QemuServer::get_basic_machine_info($conf);
+ my $arch = PVE::QemuServer::get_vm_arch($conf);
eval {
code => sub {
my ($param) = @_;
- return PVE::Cluster::create_rrd_graph(
+ return PVE::RRD::create_rrd_graph(
"pve2-vm/$param->{vmid}", $param->{timeframe},
$param->{ds}, $param->{cf});
code => sub {
my ($param) = @_;
- return PVE::Cluster::create_rrd_data(
+ return PVE::RRD::create_rrd_data(
"pve2-vm/$param->{vmid}", $param->{timeframe}, $param->{cf});
}});
my $volid = $drive->{file};
return if !$volid || !($drive->{replicate}//1);
return if PVE::QemuServer::drive_is_cdrom($drive);
- my ($storeid, $format);
+
+ my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
+ return if $volname eq 'cloudinit';
+
+ my $format;
if ($volid =~ $NEW_DISK_RE) {
$storeid = $2;
$format = $drive->{format} || PVE::Storage::storage_default_format($storecfg, $storeid);
} else {
- ($storeid, undef) = PVE::Storage::parse_volume_id($volid, 1);
$format = (PVE::Storage::parse_volname($storecfg, $volid))[6];
}
return if PVE::Storage::storage_can_replicate($storecfg, $storeid, $format);
raise_param_exc({ $opt => "unable to parse drive options" }) if !$drive;
PVE::QemuServer::cleanup_drive_path($opt, $storecfg, $drive);
$check_replication->($drive);
- $param->{$opt} = PVE::QemuServer::print_drive($vmid, $drive);
+ $param->{$opt} = PVE::QemuServer::print_drive($drive);
} elsif ($opt =~ m/^net(\d+)$/) {
# add macaddr
my $net = PVE::QemuServer::parse_net($param->{$opt});
die "checksum missmatch (file change by other user?)\n"
if $digest && $digest ne $conf->{digest};
+ # FIXME: 'suspended' lock should probabyl be a state or "weak" lock?!
+ if (scalar(@delete) && grep { $_ eq 'vmstate'} @delete) {
+ if (defined($conf->{lock}) && $conf->{lock} eq 'suspended') {
+ delete $conf->{lock}; # for check lock check, not written out
+ push @delete, 'lock'; # this is the real deal to write it out
+ }
+ push @delete, 'runningmachine' if $conf->{runningmachine};
+ }
+
PVE::QemuConfig->check_lock($conf) if !$skiplock;
foreach my $opt (keys %$revert) {
next;
}
my $is_pending_val = defined($conf->{pending}->{$opt});
+ delete $conf->{pending}->{$opt};
if ($opt =~ m/^unused/) {
my $drive = PVE::QemuServer::parse_drive($opt, $val);
delete $conf->{$opt};
PVE::QemuConfig->write_config($vmid, $conf);
}
+ } elsif ($opt eq 'vmstate') {
+ PVE::QemuConfig->check_protection($conf, "can't remove vmstate '$val'");
+ if (PVE::QemuServer::try_deallocate_drive($storecfg, $vmid, $conf, $opt, { file => $val }, $rpcenv, $authuser, 1)) {
+ delete $conf->{$opt};
+ PVE::QemuConfig->write_config($vmid, $conf);
+ }
} elsif (PVE::QemuServer::is_valid_drivename($opt)) {
PVE::QemuConfig->check_protection($conf, "can't remove drive '$opt'");
$rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.Disk']);
$conf = PVE::QemuConfig->load_config($vmid); # update/reload
next if defined($conf->{pending}->{$opt}) && ($param->{$opt} eq $conf->{pending}->{$opt}); # skip if nothing changed
- my ($arch, undef) = PVE::QemuServer::get_basic_machine_info($conf);
+ my $arch = PVE::QemuServer::get_vm_arch($conf);
if (PVE::QemuServer::is_valid_drivename($opt)) {
my $drive = PVE::QemuServer::parse_drive($opt, $param->{$opt});
$conf = PVE::QemuConfig->load_config($vmid); # update/reload
+ my $errors = {};
if ($running) {
- my $errors = {};
PVE::QemuServer::vmconfig_hotplug_pending($vmid, $conf, $storecfg, $modified, $errors);
- raise_param_exc($errors) if scalar(keys %$errors);
} else {
- PVE::QemuServer::vmconfig_apply_pending($vmid, $conf, $storecfg, $running);
+ PVE::QemuServer::vmconfig_apply_pending($vmid, $conf, $storecfg, $running, $errors);
}
+ raise_param_exc($errors) if scalar(keys %$errors);
return;
};
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid_stopped }),
skiplock => get_standard_option('skiplock'),
+ purge => {
+ type => 'boolean',
+ description => "Remove vmid from backup cron jobs.",
+ optional => 1,
+ },
},
},
returns => {
die "unable to remove VM $vmid - used in HA resources\n"
if PVE::HA::Config::vm_is_ha_managed($vmid);
- # do not allow destroy if there are replication jobs
- my $repl_conf = PVE::ReplicationConfig->new();
- $repl_conf->check_for_existing_jobs($vmid);
+ if (!$param->{purge}) {
+ # don't allow destroy if with replication jobs but no purge param
+ my $repl_conf = PVE::ReplicationConfig->new();
+ $repl_conf->check_for_existing_jobs($vmid);
+ }
# early tests (repeat after locking)
die "VM $vmid is running - destroy failed\n"
PVE::QemuConfig->lock_config($vmid, sub {
die "VM $vmid is running - destroy failed\n"
if (PVE::QemuServer::check_running($vmid));
- PVE::QemuServer::destroy_vm($storecfg, $vmid, 1, $skiplock);
+
+ PVE::QemuServer::destroy_vm($storecfg, $vmid, $skiplock, { lock => 'destroyed' });
+
PVE::AccessControl::remove_vm_access($vmid);
PVE::Firewall::remove_vmfw_conf($vmid);
+ if ($param->{purge}) {
+ PVE::ReplicationConfig::remove_vmid_jobs($vmid);
+ PVE::VZDump::Plugin::remove_vmid_from_backup_jobs($vmid);
+ }
# only now remove the zombie config, else we can have reuse race
PVE::QemuConfig->destroy_config($vmid);
if ($node ne 'localhost' && $node ne PVE::INotify::nodename()) {
(undef, $family) = PVE::Cluster::remote_node_ip($node);
- my $sshinfo = PVE::Cluster::get_ssh_info($node);
+ my $sshinfo = PVE::SSHInfo::get_ssh_info($node);
# NOTE: kvm VNC traffic is already TLS encrypted or is known unsecure
- $remcmd = PVE::Cluster::ssh_info_to_command($sshinfo, $use_serial ? '-t' : '-T');
+ $remcmd = PVE::SSHInfo::ssh_info_to_command($sshinfo, $use_serial ? '-t' : '-T');
} else {
$family = PVE::Tools::get_host_address_family($node);
}
if ($node ne 'localhost' && $node ne PVE::INotify::nodename()) {
(undef, $family) = PVE::Cluster::remote_node_ip($node);
- my $sshinfo = PVE::Cluster::get_ssh_info($node);
- $remcmd = PVE::Cluster::ssh_info_to_command($sshinfo, '-t');
+ my $sshinfo = PVE::SSHInfo::get_ssh_info($node);
+ $remcmd = PVE::SSHInfo::ssh_info_to_command($sshinfo, '-t');
push @$remcmd, '--';
} else {
$family = PVE::Tools::get_host_address_family($node);
my ($ticket, undef, $remote_viewer_config) =
PVE::AccessControl::remote_viewer_config($authuser, $vmid, $node, $proxy, $title, $port);
- PVE::QemuServer::vm_mon_cmd($vmid, "set_password", protocol => 'spice', password => $ticket);
- PVE::QemuServer::vm_mon_cmd($vmid, "expire_password", protocol => 'spice', time => "+30");
+ mon_cmd($vmid, "set_password", protocol => 'spice', password => $ticket);
+ mon_cmd($vmid, "expire_password", protocol => 'spice', time => "+30");
return $remote_viewer_config;
}});
description => "CIDR of the (sub) network that is used for migration.",
optional => 1,
},
- machine => get_standard_option('pve-qm-machine'),
+ machine => get_standard_option('pve-qemu-machine'),
targetstorage => {
description => "Target storage for the migration. (Can be '1' to use the same storage id as on the source node.)",
type => 'string',
print "Requesting HA stop for VM $vmid\n";
- my $cmd = ['ha-manager', 'set', "vm:$vmid", '--state', 'stopped'];
+ my $cmd = ['ha-manager', 'crm-command', 'stop', "vm:$vmid", '0'];
PVE::Tools::run_command($cmd);
return;
};
# checking the qmp status here to get feedback to the gui/cli/api
# and the status query should not take too long
my $qmpstatus = eval {
- PVE::QemuServer::vm_qmp_command($vmid, { execute => "query-status" }, 0);
+ PVE::QemuConfig::assert_config_exists_on_node($vmid);
+ mon_cmd($vmid, "query-status");
};
my $err = $@ if $@;
if (PVE::HA::Config::vm_is_ha_managed($vmid) && $rpcenv->{type} ne 'ha') {
+ my $timeout = $param->{timeout} // 60;
my $hacmd = sub {
my $upid = shift;
print "Requesting HA stop for VM $vmid\n";
- my $cmd = ['ha-manager', 'set', "vm:$vmid", '--state', 'stopped'];
+ my $cmd = ['ha-manager', 'crm-command', 'stop', "vm:$vmid", "$timeout"];
PVE::Tools::run_command($cmd);
return;
};
my $vmid = extract_param($param, 'vmid');
my $qmpstatus = eval {
- PVE::QemuServer::vm_qmp_command($vmid, { execute => "query-status" }, 0);
+ PVE::QemuConfig::assert_config_exists_on_node($vmid);
+ mon_cmd($vmid, "query-status");
};
my $err = $@ if $@;
proxyto => 'node',
description => "Suspend virtual machine.",
permissions => {
+ description => "You need 'VM.PowerMgmt' on /vms/{vmid}, and if you have set 'todisk',".
+ " you need also 'VM.Config.Disk' on /vms/{vmid} and 'Datastore.AllocateSpace'".
+ " on the storage for the vmstate.",
check => ['perm', '/vms/{vmid}', [ 'VM.PowerMgmt' ]],
},
parameters => {
die "Cannot suspend HA managed VM to disk\n"
if $todisk && PVE::HA::Config::vm_is_ha_managed($vmid);
+ # early check for storage permission, for better user feedback
+ if ($todisk) {
+ $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.Disk']);
+
+ if (!$statestorage) {
+ # get statestorage from config if none is given
+ my $conf = PVE::QemuConfig->load_config($vmid);
+ my $storecfg = PVE::Storage::config();
+ $statestorage = PVE::QemuServer::find_vmstate_storage($conf, $storecfg);
+ }
+
+ $rpcenv->check($authuser, "/storage/$statestorage", ['Datastore.AllocateSpace']);
+ }
+
my $realcmd = sub {
my $upid = shift;
} elsif (PVE::QemuServer::is_valid_drivename($opt)) {
my $drive = PVE::QemuServer::parse_drive($opt, $value);
die "unable to parse drive options for '$opt'\n" if !$drive;
- if (PVE::QemuServer::drive_is_cdrom($drive)) {
+ if (PVE::QemuServer::drive_is_cdrom($drive, 1)) {
$newconf->{$opt} = $value; # simply copy configuration
} else {
- if ($full) {
+ if ($full || PVE::QemuServer::drive_is_cloudinit($drive)) {
die "Full clone feature is not supported for drive '$opt'\n"
if !PVE::Storage::volume_has_feature($storecfg, 'copy', $drive->{file}, $snapname, $running);
$fullclone->{$opt} = 1;
$newid, $storage, $format, $fullclone->{$opt}, $newvollist,
$jobs, $skipcomplete, $oldconf->{agent}, $clonelimit);
- $newconf->{$opt} = PVE::QemuServer::print_drive($vmid, $newdrive);
+ $newconf->{$opt} = PVE::QemuServer::print_drive($newdrive);
PVE::QemuConfig->write_config($newid, $newconf);
$i++;
my $newdrive = PVE::QemuServer::clone_disk($storecfg, $vmid, $running, $disk, $drive, undef,
$vmid, $storeid, $format, 1, $newvollist, undef, undef, undef, $movelimit);
- $conf->{$disk} = PVE::QemuServer::print_drive($vmid, $newdrive);
+ $conf->{$disk} = PVE::QemuServer::print_drive($newdrive);
PVE::QemuConfig->add_unused_volume($conf, $old_volid) if !$param->{delete};
PVE::QemuConfig->write_config($vmid, $conf);
if ($running && PVE::QemuServer::parse_guest_agent($conf)->{fstrim_cloned_disks} && PVE::QemuServer::qga_check_running($vmid)) {
- eval { PVE::QemuServer::vm_mon_cmd($vmid, "guest-fstrim"); };
+ eval { mon_cmd($vmid, "guest-fstrim"); };
}
eval {
targetstorage => get_standard_option('pve-storage-id', {
description => "Default target storage.",
optional => 1,
- completion => \&PVE::QemuServer::complete_storage,
+ completion => \&PVE::QemuServer::complete_migration_storage,
}),
bwlimit => {
description => "Override I/O bandwidth limit (in KiB/s).",
if (PVE::QemuServer::check_running($vmid)) {
die "can't migrate running VM without --online\n" if !$param->{online};
} else {
- warn "VM isn't running. Doing offline migration instead\n." if $param->{online};
+ warn "VM isn't running. Doing offline migration instead.\n" if $param->{online};
$param->{online} = 0;
}
my $res = '';
eval {
- $res = PVE::QemuServer::vm_human_monitor_command($vmid, $param->{command});
+ $res = PVE::QemuServer::Monitor::hmp_cmd($vmid, $param->{command});
};
$res = "ERROR: $@" if $@;
PVE::QemuServer::qemu_block_resize($vmid, "drive-$disk", $storecfg, $volid, $newsize);
$drive->{size} = $newsize;
- $conf->{$disk} = PVE::QemuServer::print_drive($vmid, $drive);
+ $conf->{$disk} = PVE::QemuServer::print_drive($drive);
PVE::QemuConfig->write_config($vmid, $conf);
};
die "unable to use snapshot name 'current' (reserved name)\n"
if $snapname eq 'current';
+ die "unable to use snapshot name 'pending' (reserved name)\n"
+ if lc($snapname) eq 'pending';
+
my $realcmd = sub {
PVE::Cluster::log_msg('info', $authuser, "snapshot VM $vmid: $snapname");
PVE::QemuConfig->snapshot_create($vmid, $snapname, $param->{vmstate},