use warnings;
use Cwd 'abs_path';
use Net::SSLeay;
-use UUID;
use POSIX;
use IO::Socket::IP;
use URI::Escape;
use PVE::Cluster qw (cfs_read_file cfs_write_file);;
+use PVE::RRD;
use PVE::SafeSyslog;
use PVE::Tools qw(extract_param);
use PVE::Exception qw(raise raise_param_exc raise_perm_exc);
use PVE::GuestHelpers;
use PVE::QemuConfig;
use PVE::QemuServer;
+use PVE::QemuServer::Monitor qw(mon_cmd);
use PVE::QemuMigrate;
use PVE::RPCEnvironment;
use PVE::AccessControl;
use PVE::Firewall;
use PVE::API2::Firewall::VM;
use PVE::API2::Qemu::Agent;
+use PVE::VZDump::Plugin;
+use PVE::DataCenterConfig;
+use PVE::SSHInfo;
BEGIN {
if (!$ENV{PVE_GENERATING_DOCS}) {
my $isCDROM = PVE::QemuServer::drive_is_cdrom($drive);
my $volid = $drive->{file};
+ my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
- if (!$volid || ($volid eq 'none' || $volid eq 'cloudinit')) {
- # nothing to check
- } elsif ($volid =~ m/^(([^:\s]+):)?(cloudinit)$/) {
+ if (!$volid || ($volid eq 'none' || $volid eq 'cloudinit' || (defined($volname) && $volname eq 'cloudinit'))) {
# nothing to check
} elsif ($isCDROM && ($volid eq 'cdrom')) {
$rpcenv->check($authuser, "/", ['Sys.Console']);
my ($ds, $disk) = @_;
my $volid = $disk->{file};
+ my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
if (!$volid || $volid eq 'none' || $volid eq 'cdrom') {
delete $disk->{size};
$res->{$ds} = PVE::QemuServer::print_drive($vmid, $disk);
- } elsif ($volid =~ m!^(?:([^/:\s]+):)?cloudinit$!) {
- my $storeid = $1 || $default_storage;
+ } elsif (defined($volname) && $volname eq 'cloudinit') {
+ $storeid = $storeid // $default_storage;
die "no storage ID specified (and no default storage)\n" if !$storeid;
my $scfg = PVE::Storage::storage_config($storecfg, $storeid);
my $name = "vm-$vmid-cloudinit";
+
my $fmt = undef;
if ($scfg->{path}) {
- $name .= ".qcow2";
- $fmt = 'qcow2';
- }else{
- $fmt = 'raw';
+ $fmt = $disk->{format} // "qcow2";
+ $name .= ".$fmt";
+ } else {
+ $fmt = $disk->{format} // "raw";
}
- # FIXME: Reasonable size? qcow2 shouldn't grow if the space isn't used anyway?
- my $cloudinit_iso_size = 5; # in MB
- my $volid = PVE::Storage::vdisk_alloc($storecfg, $storeid, $vmid,
- $fmt, $name, $cloudinit_iso_size*1024);
+
+ # Initial disk created with 4 MB and aligned to 4MB on regeneration
+ my $ci_size = PVE::QemuServer::Cloudinit::CLOUDINIT_DISK_SIZE;
+ my $volid = PVE::Storage::vdisk_alloc($storecfg, $storeid, $vmid, $fmt, $name, $ci_size/1024);
$disk->{file} = $volid;
$disk->{media} = 'cdrom';
push @$vollist, $volid;
if ($volid_is_new) {
- my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
-
PVE::Storage::activate_volumes($storecfg, [ $volid ]) if $storeid;
my $size = PVE::Storage::volume_size_info($storecfg, $volid);
'tablet' => 1,
'vga' => 1,
'watchdog' => 1,
+ 'audio0' => 1,
};
my $generaloptions = {
'startup' => 1,
'tdf' => 1,
'template' => 1,
+ 'tags' => 1,
};
my $vmpoweroptions = {
};
my $cloudinitoptions = {
+ cicustom => 1,
cipassword => 1,
citype => 1,
ciuser => 1,
return 1 if $authuser eq 'root@pam';
foreach my $opt (@$key_list) {
- # disk checks need to be done somewhere else
+ # some checks (e.g., disk, serial port, usb) need to be done somewhere
+ # else, as there the permission can be value dependend
next if PVE::QemuServer::is_valid_drivename($opt);
next if $opt eq 'cdrom';
- next if $opt =~ m/^unused\d+$/;
+ next if $opt =~ m/^(?:unused|serial|usb)\d+$/;
+
if ($cpuoptions->{$opt} || $opt =~ m/^numa\d+$/) {
$rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.CPU']);
} elsif ($cloudinitoptions->{$opt} || ($opt =~ m/^(?:net|ipconfig)\d+$/)) {
$rpcenv->check_vm_perm($authuser, $vmid, $pool, ['VM.Config.Network']);
} else {
- # catches usb\d+, hostpci\d+, args, lock, etc.
+ # catches hostpci\d+, args, lock, etc.
# new options will be checked here
die "only root can set '$opt' config\n";
}
description => "Add the VM to the specified pool.",
},
bwlimit => {
- description => "Override i/o bandwidth limit (in KiB/s).",
+ description => "Override I/O bandwidth limit (in KiB/s).",
optional => 1,
type => 'integer',
minimum => '0',
+ default => 'restore limit from datacenter or storage config',
},
start => {
optional => 1,
my ($param) = @_;
my $rpcenv = PVE::RPCEnvironment::get();
-
my $authuser = $rpcenv->get_user();
my $node = extract_param($param, 'node');
-
my $vmid = extract_param($param, 'vmid');
my $archive = extract_param($param, 'archive');
my $is_restore = !!$archive;
- my $storage = extract_param($param, 'storage');
-
+ my $bwlimit = extract_param($param, 'bwlimit');
my $force = extract_param($param, 'force');
-
- my $unique = extract_param($param, 'unique');
-
my $pool = extract_param($param, 'pool');
-
- my $bwlimit = extract_param($param, 'bwlimit');
-
my $start_after_create = extract_param($param, 'start');
-
- my $filename = PVE::QemuConfig->config_file($vmid);
-
- my $storecfg = PVE::Storage::config();
+ my $storage = extract_param($param, 'storage');
+ my $unique = extract_param($param, 'unique');
if (defined(my $ssh_keys = $param->{sshkeys})) {
$ssh_keys = URI::Escape::uri_unescape($ssh_keys);
PVE::Cluster::check_cfs_quorum();
+ my $filename = PVE::QemuConfig->config_file($vmid);
+ my $storecfg = PVE::Storage::config();
+
if (defined($pool)) {
$rpcenv->check_pool_exist($pool);
}
PVE::QemuConfig->check_protection($conf, $emsg);
die "$emsg vm is running\n" if PVE::QemuServer::check_running($vmid);
- die "$emsg vm is a template\n" if PVE::QemuConfig->is_template($conf);
my $realcmd = sub {
PVE::QemuServer::restore_archive($archive, $vmid, $authuser, {
storage => $storage,
pool => $pool,
unique => $unique,
- bwlimit => $bwlimit, });
+ bwlimit => $bwlimit,
+ });
+ my $restored_conf = PVE::QemuConfig->load_config($vmid);
+ # Convert restored VM to template if backup was VM template
+ if (PVE::QemuConfig->is_template($restored_conf)) {
+ warn "Convert to template.\n";
+ eval { PVE::QemuServer::template_create($vmid, $restored_conf) };
+ warn $@ if $@;
+ }
PVE::AccessControl::add_vm_to_pool($vmid, $pool) if $pool;
code => sub {
my ($param) = @_;
- return PVE::Cluster::create_rrd_graph(
+ return PVE::RRD::create_rrd_graph(
"pve2-vm/$param->{vmid}", $param->{timeframe},
$param->{ds}, $param->{cf});
code => sub {
my ($param) = @_;
- return PVE::Cluster::create_rrd_data(
+ return PVE::RRD::create_rrd_data(
"pve2-vm/$param->{vmid}", $param->{timeframe}, $param->{cf});
}});
code => sub {
my ($param) = @_;
- my $conf = PVE::QemuConfig->load_config($param->{vmid});
-
- my $snapname = $param->{snapshot};
- if ($snapname) {
- my $snapshot = $conf->{snapshots}->{$snapname};
- die "snapshot '$snapname' does not exist\n"
- if !defined($snapshot);
-
- # we need the digest of the file
- $snapshot->{digest} = $conf->{digest};
- $conf = $snapshot;
- }
-
- delete $conf->{snapshots};
-
- if (!$param->{current}) {
- foreach my $opt (keys %{$conf->{pending}}) {
- next if $opt eq 'delete';
- my $value = $conf->{pending}->{$opt};
- next if ref($value); # just to be sure
- $conf->{$opt} = $value;
- }
- my $pending_delete_hash = PVE::QemuServer::split_flagged_list($conf->{pending}->{delete});
- foreach my $opt (keys %$pending_delete_hash) {
- delete $conf->{$opt} if $conf->{$opt};
- }
- }
+ raise_param_exc({ snapshot => "cannot use 'snapshot' parameter with 'current'",
+ current => "cannot use 'snapshot' parameter with 'current'"})
+ if ($param->{snapshot} && $param->{current});
- delete $conf->{pending};
-
- # hide cloudinit password
- if ($conf->{cipassword}) {
- $conf->{cipassword} = '**********';
+ my $conf;
+ if ($param->{snapshot}) {
+ $conf = PVE::QemuConfig->load_snapshot_config($param->{vmid}, $param->{snapshot});
+ } else {
+ $conf = PVE::QemuConfig->load_current_config($param->{vmid}, $param->{current});
}
-
+ $conf->{cipassword} = '**********' if $conf->{cipassword};
return $conf;
+
}});
__PACKAGE__->register_method({
my $conf = PVE::QemuConfig->load_config($param->{vmid});
- my $pending_delete_hash = PVE::QemuServer::split_flagged_list($conf->{pending}->{delete});
-
- my $res = [];
-
- foreach my $opt (keys %$conf) {
- next if ref($conf->{$opt});
- my $item = { key => $opt };
- $item->{value} = $conf->{$opt} if defined($conf->{$opt});
- $item->{pending} = $conf->{pending}->{$opt} if defined($conf->{pending}->{$opt});
- $item->{delete} = ($pending_delete_hash->{$opt} ? 2 : 1) if exists $pending_delete_hash->{$opt};
-
- # hide cloudinit password
- if ($opt eq 'cipassword') {
- $item->{value} = '**********' if defined($item->{value});
- # the trailing space so that the pending string is different
- $item->{pending} = '********** ' if defined($item->{pending});
- }
- push @$res, $item;
- }
-
- foreach my $opt (keys %{$conf->{pending}}) {
- next if $opt eq 'delete';
- next if ref($conf->{pending}->{$opt}); # just to be sure
- next if defined($conf->{$opt});
- my $item = { key => $opt };
- $item->{pending} = $conf->{pending}->{$opt};
-
- # hide cloudinit password
- if ($opt eq 'cipassword') {
- $item->{pending} = '**********' if defined($item->{pending});
- }
- push @$res, $item;
- }
+ my $pending_delete_hash = PVE::QemuConfig->parse_pending_delete($conf->{pending}->{delete});
- while (my ($opt, $force) = each %$pending_delete_hash) {
- next if $conf->{pending}->{$opt}; # just to be sure
- next if $conf->{$opt};
- my $item = { key => $opt, delete => ($force ? 2 : 1)};
- push @$res, $item;
- }
+ $conf->{cipassword} = '**********' if defined($conf->{cipassword});
+ $conf->{pending}->{cipassword} = '********** ' if defined($conf->{pending}->{cipassword});
- return $res;
- }});
+ return PVE::GuestHelpers::config_with_pending_array($conf, $pending_delete_hash);
+ }});
# POST/PUT {vmid}/config implementation
#
my $volid = $drive->{file};
return if !$volid || !($drive->{replicate}//1);
return if PVE::QemuServer::drive_is_cdrom($drive);
- my ($storeid, $format);
+
+ my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
+ return if $volname eq 'cloudinit';
+
+ my $format;
if ($volid =~ $NEW_DISK_RE) {
$storeid = $2;
$format = $drive->{format} || PVE::Storage::storage_default_format($storecfg, $storeid);
} else {
- ($storeid, undef) = PVE::Storage::parse_volume_id($volid, 1);
$format = (PVE::Storage::parse_volname($storecfg, $volid))[6];
}
return if PVE::Storage::storage_can_replicate($storecfg, $storeid, $format);
if ($param->{$opt} eq '1') {
$param->{$opt} = PVE::QemuServer::generate_uuid();
}
+ } elsif ($opt eq 'hookscript') {
+ eval { PVE::GuestHelpers::check_hookscript($param->{$opt}, $storecfg); };
+ raise_param_exc({ $opt => $@ }) if $@;
}
}
foreach my $opt (@delete) {
$modified->{$opt} = 1;
$conf = PVE::QemuConfig->load_config($vmid); # update/reload
- if (!defined($conf->{$opt}) && !defined($conf->{pending}->{$opt})) {
+
+ # value of what we want to delete, independent if pending or not
+ my $val = $conf->{$opt} // $conf->{pending}->{$opt};
+ if (!defined($val)) {
warn "cannot delete '$opt' - not set in current configuration!\n";
$modified->{$opt} = 0;
next;
}
+ my $is_pending_val = defined($conf->{pending}->{$opt});
+ delete $conf->{pending}->{$opt};
if ($opt =~ m/^unused/) {
- my $drive = PVE::QemuServer::parse_drive($opt, $conf->{$opt});
+ my $drive = PVE::QemuServer::parse_drive($opt, $val);
PVE::QemuConfig->check_protection($conf, "can't remove unused disk '$drive->{file}'");
$rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.Disk']);
if (PVE::QemuServer::try_deallocate_drive($storecfg, $vmid, $conf, $opt, $drive, $rpcenv, $authuser)) {
} elsif (PVE::QemuServer::is_valid_drivename($opt)) {
PVE::QemuConfig->check_protection($conf, "can't remove drive '$opt'");
$rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.Disk']);
- PVE::QemuServer::vmconfig_register_unused_drive($storecfg, $vmid, $conf, PVE::QemuServer::parse_drive($opt, $conf->{pending}->{$opt}))
- if defined($conf->{pending}->{$opt});
- PVE::QemuServer::vmconfig_delete_pending_option($conf, $opt, $force);
+ PVE::QemuServer::vmconfig_register_unused_drive($storecfg, $vmid, $conf, PVE::QemuServer::parse_drive($opt, $val))
+ if $is_pending_val;
+ PVE::QemuConfig->add_to_pending_delete($conf, $opt, $force);
+ PVE::QemuConfig->write_config($vmid, $conf);
+ } elsif ($opt =~ m/^serial\d+$/) {
+ if ($val eq 'socket') {
+ $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.HWType']);
+ } elsif ($authuser ne 'root@pam') {
+ die "only root can delete '$opt' config for real devices\n";
+ }
+ PVE::QemuConfig->add_to_pending_delete($conf, $opt, $force);
+ PVE::QemuConfig->write_config($vmid, $conf);
+ } elsif ($opt =~ m/^usb\d+$/) {
+ if ($val =~ m/spice/) {
+ $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.HWType']);
+ } elsif ($authuser ne 'root@pam') {
+ die "only root can delete '$opt' config for real devices\n";
+ }
+ PVE::QemuConfig->add_to_pending_delete($conf, $opt, $force);
PVE::QemuConfig->write_config($vmid, $conf);
} else {
- PVE::QemuServer::vmconfig_delete_pending_option($conf, $opt, $force);
+ PVE::QemuConfig->add_to_pending_delete($conf, $opt, $force);
PVE::QemuConfig->write_config($vmid, $conf);
}
}
if defined($conf->{pending}->{$opt});
&$create_disks($rpcenv, $authuser, $conf->{pending}, $arch, $storecfg, $vmid, undef, {$opt => $param->{$opt}});
+ } elsif ($opt =~ m/^serial\d+/) {
+ if ((!defined($conf->{$opt}) || $conf->{$opt} eq 'socket') && $param->{$opt} eq 'socket') {
+ $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.HWType']);
+ } elsif ($authuser ne 'root@pam') {
+ die "only root can modify '$opt' config for real devices\n";
+ }
+ $conf->{pending}->{$opt} = $param->{$opt};
+ } elsif ($opt =~ m/^usb\d+/) {
+ if ((!defined($conf->{$opt}) || $conf->{$opt} =~ m/spice/) && $param->{$opt} =~ m/spice/) {
+ $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.HWType']);
+ } elsif ($authuser ne 'root@pam') {
+ die "only root can modify '$opt' config for real devices\n";
+ }
+ $conf->{pending}->{$opt} = $param->{$opt};
} else {
$conf->{pending}->{$opt} = $param->{$opt};
}
- PVE::QemuServer::vmconfig_undelete_pending_option($conf, $opt);
+ PVE::QemuConfig->remove_from_pending_delete($conf, $opt);
PVE::QemuConfig->write_config($vmid, $conf);
}
# remove pending changes when nothing changed
$conf = PVE::QemuConfig->load_config($vmid); # update/reload
- my $changes = PVE::QemuServer::vmconfig_cleanup_pending($conf);
+ my $changes = PVE::QemuConfig->cleanup_pending($conf);
PVE::QemuConfig->write_config($vmid, $conf) if $changes;
return if !scalar(keys %{$conf->{pending}});
}
});
-
__PACKAGE__->register_method({
name => 'destroy_vm',
path => '{vmid}',
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid_stopped }),
skiplock => get_standard_option('skiplock'),
+ purge => {
+ type => 'boolean',
+ description => "Remove vmid from backup cron jobs.",
+ optional => 1,
+ },
},
},
returns => {
my ($param) = @_;
my $rpcenv = PVE::RPCEnvironment::get();
-
my $authuser = $rpcenv->get_user();
-
my $vmid = $param->{vmid};
my $skiplock = $param->{skiplock};
# test if VM exists
my $conf = PVE::QemuConfig->load_config($vmid);
-
my $storecfg = PVE::Storage::config();
-
PVE::QemuConfig->check_protection($conf, "can't remove VM $vmid");
-
die "unable to remove VM $vmid - used in HA resources\n"
if PVE::HA::Config::vm_is_ha_managed($vmid);
- # do not allow destroy if there are replication jobs
- my $repl_conf = PVE::ReplicationConfig->new();
- $repl_conf->check_for_existing_jobs($vmid);
+ if (!$param->{purge}) {
+ # don't allow destroy if with replication jobs but no purge param
+ my $repl_conf = PVE::ReplicationConfig->new();
+ $repl_conf->check_for_existing_jobs($vmid);
+ }
# early tests (repeat after locking)
die "VM $vmid is running - destroy failed\n"
my $upid = shift;
syslog('info', "destroy VM $vmid: $upid\n");
+ PVE::QemuConfig->lock_config($vmid, sub {
+ die "VM $vmid is running - destroy failed\n"
+ if (PVE::QemuServer::check_running($vmid));
- PVE::QemuServer::vm_destroy($storecfg, $vmid, $skiplock);
+ PVE::QemuServer::destroy_vm($storecfg, $vmid, $skiplock, { lock => 'destroyed' });
- PVE::AccessControl::remove_vm_access($vmid);
+ PVE::AccessControl::remove_vm_access($vmid);
+ PVE::Firewall::remove_vmfw_conf($vmid);
+ if ($param->{purge}) {
+ PVE::ReplicationConfig::remove_vmid_jobs($vmid);
+ PVE::VZDump::Plugin::remove_vmid_from_backup_jobs($vmid);
+ }
- PVE::Firewall::remove_vmfw_conf($vmid);
+ # only now remove the zombie config, else we can have reuse race
+ PVE::QemuConfig->destroy_config($vmid);
+ });
};
return $rpcenv->fork_worker('qmdestroy', $vmid, $authuser, $realcmd);
if ($node ne 'localhost' && $node ne PVE::INotify::nodename()) {
(undef, $family) = PVE::Cluster::remote_node_ip($node);
- my $sshinfo = PVE::Cluster::get_ssh_info($node);
+ my $sshinfo = PVE::SSHInfo::get_ssh_info($node);
# NOTE: kvm VNC traffic is already TLS encrypted or is known unsecure
- $remcmd = PVE::Cluster::ssh_info_to_command($sshinfo, $use_serial ? '-t' : '-T');
+ $remcmd = PVE::SSHInfo::ssh_info_to_command($sshinfo, $use_serial ? '-t' : '-T');
} else {
$family = PVE::Tools::get_host_address_family($node);
}
if ($node ne 'localhost' && $node ne PVE::INotify::nodename()) {
(undef, $family) = PVE::Cluster::remote_node_ip($node);
- my $sshinfo = PVE::Cluster::get_ssh_info($node);
- $remcmd = PVE::Cluster::ssh_info_to_command($sshinfo, '-t');
+ my $sshinfo = PVE::SSHInfo::get_ssh_info($node);
+ $remcmd = PVE::SSHInfo::ssh_info_to_command($sshinfo, '-t');
push @$remcmd, '--';
} else {
$family = PVE::Tools::get_host_address_family($node);
my ($ticket, undef, $remote_viewer_config) =
PVE::AccessControl::remote_viewer_config($authuser, $vmid, $node, $proxy, $title, $port);
- PVE::QemuServer::vm_mon_cmd($vmid, "set_password", protocol => 'spice', password => $ticket);
- PVE::QemuServer::vm_mon_cmd($vmid, "expire_password", protocol => 'spice', time => "+30");
+ mon_cmd($vmid, "set_password", protocol => 'spice', password => $ticket);
+ mon_cmd($vmid, "expire_password", protocol => 'spice', time => "+30");
return $remote_viewer_config;
}});
{ subdir => 'current' },
{ subdir => 'start' },
{ subdir => 'stop' },
+ { subdir => 'reset' },
+ { subdir => 'shutdown' },
+ { subdir => 'suspend' },
+ { subdir => 'reboot' },
];
return $res;
description => "CIDR of the (sub) network that is used for migration.",
optional => 1,
},
- machine => get_standard_option('pve-qm-machine'),
+ machine => get_standard_option('pve-qemu-machine'),
targetstorage => {
description => "Target storage for the migration. (Can be '1' to use the same storage id as on the source node.)",
type => 'string',
my ($param) = @_;
my $rpcenv = PVE::RPCEnvironment::get();
-
my $authuser = $rpcenv->get_user();
my $node = extract_param($param, 'node');
-
my $vmid = extract_param($param, 'vmid');
my $machine = extract_param($param, 'machine');
- my $stateuri = extract_param($param, 'stateuri');
- raise_param_exc({ stateuri => "Only root may use this option." })
- if $stateuri && $authuser ne 'root@pam';
-
- my $skiplock = extract_param($param, 'skiplock');
- raise_param_exc({ skiplock => "Only root may use this option." })
- if $skiplock && $authuser ne 'root@pam';
-
- my $migratedfrom = extract_param($param, 'migratedfrom');
- raise_param_exc({ migratedfrom => "Only root may use this option." })
- if $migratedfrom && $authuser ne 'root@pam';
-
- my $migration_type = extract_param($param, 'migration_type');
- raise_param_exc({ migration_type => "Only root may use this option." })
- if $migration_type && $authuser ne 'root@pam';
-
- my $migration_network = extract_param($param, 'migration_network');
- raise_param_exc({ migration_network => "Only root may use this option." })
- if $migration_network && $authuser ne 'root@pam';
+ my $get_root_param = sub {
+ my $value = extract_param($param, $_[0]);
+ raise_param_exc({ "$_[0]" => "Only root may use this option." })
+ if $value && $authuser ne 'root@pam';
+ return $value;
+ };
- my $targetstorage = extract_param($param, 'targetstorage');
- raise_param_exc({ targetstorage => "Only root may use this option." })
- if $targetstorage && $authuser ne 'root@pam';
+ my $stateuri = $get_root_param->('stateuri');
+ my $skiplock = $get_root_param->('skiplock');
+ my $migratedfrom = $get_root_param->('migratedfrom');
+ my $migration_type = $get_root_param->('migration_type');
+ my $migration_network = $get_root_param->('migration_network');
+ my $targetstorage = $get_root_param->('targetstorage');
raise_param_exc({ targetstorage => "targetstorage can only by used with migratedfrom." })
if $targetstorage && !$migratedfrom;
# read spice ticket from STDIN
my $spice_ticket;
- if ($stateuri && ($stateuri eq 'tcp') && $migratedfrom && ($rpcenv->{type} eq 'cli')) {
+ if ($stateuri && ($stateuri eq 'tcp' || $stateuri eq 'unix') && $migratedfrom && ($rpcenv->{type} eq 'cli')) {
if (defined(my $line = <STDIN>)) {
chomp $line;
$spice_ticket = $line;
my $storecfg = PVE::Storage::config();
- if (PVE::HA::Config::vm_is_ha_managed($vmid) && !$stateuri &&
- $rpcenv->{type} ne 'ha') {
-
+ if (PVE::HA::Config::vm_is_ha_managed($vmid) && !$stateuri && $rpcenv->{type} ne 'ha') {
my $hacmd = sub {
my $upid = shift;
- my $service = "vm:$vmid";
-
- my $cmd = ['ha-manager', 'set', $service, '--state', 'started'];
-
print "Requesting HA start for VM $vmid\n";
+ my $cmd = ['ha-manager', 'set', "vm:$vmid", '--state', 'started'];
PVE::Tools::run_command($cmd);
-
return;
};
PVE::QemuServer::vm_start($storecfg, $vmid, $stateuri, $skiplock, $migratedfrom, undef,
$machine, $spice_ticket, $migration_network, $migration_type, $targetstorage);
-
return;
};
my ($param) = @_;
my $rpcenv = PVE::RPCEnvironment::get();
-
my $authuser = $rpcenv->get_user();
my $node = extract_param($param, 'node');
-
my $vmid = extract_param($param, 'vmid');
my $skiplock = extract_param($param, 'skiplock');
my $hacmd = sub {
my $upid = shift;
- my $service = "vm:$vmid";
-
- my $cmd = ['ha-manager', 'set', $service, '--state', 'stopped'];
-
print "Requesting HA stop for VM $vmid\n";
+ my $cmd = ['ha-manager', 'crm-command', 'stop', "vm:$vmid", '0'];
PVE::Tools::run_command($cmd);
-
return;
};
PVE::QemuServer::vm_stop($storecfg, $vmid, $skiplock, 0,
$param->{timeout}, 0, 1, $keepActive, $migratedfrom);
-
return;
};
my ($param) = @_;
my $rpcenv = PVE::RPCEnvironment::get();
-
my $authuser = $rpcenv->get_user();
my $node = extract_param($param, 'node');
-
my $vmid = extract_param($param, 'vmid');
my $skiplock = extract_param($param, 'skiplock');
#
# checking the qmp status here to get feedback to the gui/cli/api
# and the status query should not take too long
- my $qmpstatus;
- eval {
- $qmpstatus = PVE::QemuServer::vm_qmp_command($vmid, { execute => "query-status" }, 0);
+ my $qmpstatus = eval {
+ PVE::QemuConfig::assert_config_exists_on_node($vmid);
+ mon_cmd($vmid, "query-status");
};
my $err = $@ if $@;
}
}
- if (PVE::HA::Config::vm_is_ha_managed($vmid) &&
- ($rpcenv->{type} ne 'ha')) {
+ if (PVE::HA::Config::vm_is_ha_managed($vmid) && $rpcenv->{type} ne 'ha') {
+ my $timeout = $param->{timeout} // 60;
my $hacmd = sub {
my $upid = shift;
- my $service = "vm:$vmid";
-
- my $cmd = ['ha-manager', 'set', $service, '--state', 'stopped'];
-
print "Requesting HA stop for VM $vmid\n";
+ my $cmd = ['ha-manager', 'crm-command', 'stop', "vm:$vmid", "$timeout"];
PVE::Tools::run_command($cmd);
-
return;
};
PVE::QemuServer::vm_stop($storecfg, $vmid, $skiplock, 0, $param->{timeout},
$shutdown, $param->{forceStop}, $keepActive);
-
return;
};
}
}});
+__PACKAGE__->register_method({
+ name => 'vm_reboot',
+ path => '{vmid}/status/reboot',
+ method => 'POST',
+ protected => 1,
+ proxyto => 'node',
+ description => "Reboot the VM by shutting it down, and starting it again. Applies pending changes.",
+ permissions => {
+ check => ['perm', '/vms/{vmid}', [ 'VM.PowerMgmt' ]],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid',
+ { completion => \&PVE::QemuServer::complete_vmid_running }),
+ timeout => {
+ description => "Wait maximal timeout seconds for the shutdown.",
+ type => 'integer',
+ minimum => 0,
+ optional => 1,
+ },
+ },
+ },
+ returns => {
+ type => 'string',
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+ my $authuser = $rpcenv->get_user();
+
+ my $node = extract_param($param, 'node');
+ my $vmid = extract_param($param, 'vmid');
+
+ my $qmpstatus = eval {
+ PVE::QemuConfig::assert_config_exists_on_node($vmid);
+ mon_cmd($vmid, "query-status");
+ };
+ my $err = $@ if $@;
+
+ if (!$err && $qmpstatus->{status} eq "paused") {
+ die "VM is paused - cannot shutdown\n";
+ }
+
+ die "VM $vmid not running\n" if !PVE::QemuServer::check_running($vmid);
+
+ my $realcmd = sub {
+ my $upid = shift;
+
+ syslog('info', "requesting reboot of VM $vmid: $upid\n");
+ PVE::QemuServer::vm_reboot($vmid, $param->{timeout});
+ return;
+ };
+
+ return $rpcenv->fork_worker('qmreboot', $vmid, $authuser, $realcmd);
+ }});
+
__PACKAGE__->register_method({
name => 'vm_suspend',
path => '{vmid}/status/suspend',
vmid => get_standard_option('pve-vmid',
{ completion => \&PVE::QemuServer::complete_vmid_running }),
skiplock => get_standard_option('skiplock'),
+ todisk => {
+ type => 'boolean',
+ default => 0,
+ optional => 1,
+ description => 'If set, suspends the VM to disk. Will be resumed on next VM start.',
+ },
+ statestorage => get_standard_option('pve-storage-id', {
+ description => "The storage for the VM state",
+ requires => 'todisk',
+ optional => 1,
+ completion => \&PVE::Storage::complete_storage_enabled,
+ }),
},
},
returns => {
my ($param) = @_;
my $rpcenv = PVE::RPCEnvironment::get();
-
my $authuser = $rpcenv->get_user();
my $node = extract_param($param, 'node');
-
my $vmid = extract_param($param, 'vmid');
+ my $todisk = extract_param($param, 'todisk') // 0;
+
+ my $statestorage = extract_param($param, 'statestorage');
+
my $skiplock = extract_param($param, 'skiplock');
raise_param_exc({ skiplock => "Only root may use this option." })
if $skiplock && $authuser ne 'root@pam';
die "VM $vmid not running\n" if !PVE::QemuServer::check_running($vmid);
+ die "Cannot suspend HA managed VM to disk\n"
+ if $todisk && PVE::HA::Config::vm_is_ha_managed($vmid);
+
my $realcmd = sub {
my $upid = shift;
syslog('info', "suspend VM $vmid: $upid\n");
- PVE::QemuServer::vm_suspend($vmid, $skiplock);
+ PVE::QemuServer::vm_suspend($vmid, $skiplock, $todisk, $statestorage);
return;
};
- return $rpcenv->fork_worker('qmsuspend', $vmid, $authuser, $realcmd);
+ my $taskname = $todisk ? 'qmsuspend' : 'qmpause';
+ return $rpcenv->fork_worker($taskname, $vmid, $authuser, $realcmd);
}});
__PACKAGE__->register_method({
my $nocheck = extract_param($param, 'nocheck');
- die "VM $vmid not running\n" if !PVE::QemuServer::check_running($vmid, $nocheck);
+ my $to_disk_suspended;
+ eval {
+ PVE::QemuConfig->lock_config($vmid, sub {
+ my $conf = PVE::QemuConfig->load_config($vmid);
+ $to_disk_suspended = PVE::QemuConfig->has_lock($conf, 'suspended');
+ });
+ };
+
+ die "VM $vmid not running\n"
+ if !$to_disk_suspended && !PVE::QemuServer::check_running($vmid, $nocheck);
my $realcmd = sub {
my $upid = shift;
syslog('info', "resume VM $vmid: $upid\n");
- PVE::QemuServer::vm_resume($vmid, $skiplock, $nocheck);
+ if (!$to_disk_suspended) {
+ PVE::QemuServer::vm_resume($vmid, $skiplock, $nocheck);
+ } else {
+ my $storecfg = PVE::Storage::config();
+ PVE::QemuServer::vm_start($storecfg, $vmid, undef, $skiplock);
+ }
return;
};
description => "Target node. Only allowed if the original VM is on shared storage.",
optional => 1,
}),
+ bwlimit => {
+ description => "Override I/O bandwidth limit (in KiB/s).",
+ optional => 1,
+ type => 'integer',
+ minimum => '0',
+ default => 'clone limit from datacenter or storage config',
+ },
},
},
returns => {
PVE::Storage::activate_volumes($storecfg, $vollist, $snapname);
+ my $bwlimit = extract_param($param, 'bwlimit');
+
my $total_jobs = scalar(keys %{$drives});
my $i = 1;
my $drive = $drives->{$opt};
my $skipcomplete = ($total_jobs != $i); # finish after last drive
+ my $src_sid = PVE::Storage::parse_volume_id($drive->{file});
+ my $storage_list = [ $src_sid ];
+ push @$storage_list, $storage if defined($storage);
+ my $clonelimit = PVE::Storage::get_bandwidth_limit('clone', $storage_list, $bwlimit);
+
my $newdrive = PVE::QemuServer::clone_disk($storecfg, $vmid, $running, $opt, $drive, $snapname,
$newid, $storage, $format, $fullclone->{$opt}, $newvollist,
- $jobs, $skipcomplete, $oldconf->{agent});
+ $jobs, $skipcomplete, $oldconf->{agent}, $clonelimit);
$newconf->{$opt} = PVE::QemuServer::print_drive($vmid, $newdrive);
maxLength => 40,
optional => 1,
},
+ bwlimit => {
+ description => "Override I/O bandwidth limit (in KiB/s).",
+ optional => 1,
+ type => 'integer',
+ minimum => '0',
+ default => 'move limit from datacenter or storage config',
+ },
},
},
returns => {
warn "moving disk with snapshots, snapshots will not be moved!\n"
if $snapshotted;
+ my $bwlimit = extract_param($param, 'bwlimit');
+ my $movelimit = PVE::Storage::get_bandwidth_limit('move', [$oldstoreid, $storeid], $bwlimit);
+
my $newdrive = PVE::QemuServer::clone_disk($storecfg, $vmid, $running, $disk, $drive, undef,
- $vmid, $storeid, $format, 1, $newvollist);
+ $vmid, $storeid, $format, 1, $newvollist, undef, undef, undef, $movelimit);
$conf->{$disk} = PVE::QemuServer::print_drive($vmid, $newdrive);
PVE::QemuConfig->write_config($vmid, $conf);
if ($running && PVE::QemuServer::parse_guest_agent($conf)->{fstrim_cloned_disks} && PVE::QemuServer::qga_check_running($vmid)) {
- eval { PVE::QemuServer::vm_mon_cmd($vmid, "guest-fstrim"); };
+ eval { mon_cmd($vmid, "guest-fstrim"); };
}
eval {
return PVE::QemuConfig->lock_config($vmid, $updatefn);
}});
+my $check_vm_disks_local = sub {
+ my ($storecfg, $vmconf, $vmid) = @_;
+
+ my $local_disks = {};
+
+ # add some more information to the disks e.g. cdrom
+ PVE::QemuServer::foreach_volid($vmconf, sub {
+ my ($volid, $attr) = @_;
+
+ my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
+ if ($storeid) {
+ my $scfg = PVE::Storage::storage_config($storecfg, $storeid);
+ return if $scfg->{shared};
+ }
+ # The shared attr here is just a special case where the vdisk
+ # is marked as shared manually
+ return if $attr->{shared};
+ return if $attr->{cdrom} and $volid eq "none";
+
+ if (exists $local_disks->{$volid}) {
+ @{$local_disks->{$volid}}{keys %$attr} = values %$attr
+ } else {
+ $local_disks->{$volid} = $attr;
+ # ensure volid is present in case it's needed
+ $local_disks->{$volid}->{volid} = $volid;
+ }
+ });
+
+ return $local_disks;
+};
+
+__PACKAGE__->register_method({
+ name => 'migrate_vm_precondition',
+ path => '{vmid}/migrate',
+ method => 'GET',
+ protected => 1,
+ proxyto => 'node',
+ description => "Get preconditions for migration.",
+ permissions => {
+ check => ['perm', '/vms/{vmid}', [ 'VM.Migrate' ]],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
+ target => get_standard_option('pve-node', {
+ description => "Target node.",
+ completion => \&PVE::Cluster::complete_migration_target,
+ optional => 1,
+ }),
+ },
+ },
+ returns => {
+ type => "object",
+ properties => {
+ running => { type => 'boolean' },
+ allowed_nodes => {
+ type => 'array',
+ optional => 1,
+ description => "List nodes allowed for offline migration, only passed if VM is offline"
+ },
+ not_allowed_nodes => {
+ type => 'object',
+ optional => 1,
+ description => "List not allowed nodes with additional informations, only passed if VM is offline"
+ },
+ local_disks => {
+ type => 'array',
+ description => "List local disks including CD-Rom, unsused and not referenced disks"
+ },
+ local_resources => {
+ type => 'array',
+ description => "List local resources e.g. pci, usb"
+ }
+ },
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+
+ my $authuser = $rpcenv->get_user();
+
+ PVE::Cluster::check_cfs_quorum();
+
+ my $res = {};
+
+ my $vmid = extract_param($param, 'vmid');
+ my $target = extract_param($param, 'target');
+ my $localnode = PVE::INotify::nodename();
+
+
+ # test if VM exists
+ my $vmconf = PVE::QemuConfig->load_config($vmid);
+ my $storecfg = PVE::Storage::config();
+
+
+ # try to detect errors early
+ PVE::QemuConfig->check_lock($vmconf);
+
+ $res->{running} = PVE::QemuServer::check_running($vmid) ? 1:0;
+
+ # if vm is not running, return target nodes where local storage is available
+ # for offline migration
+ if (!$res->{running}) {
+ $res->{allowed_nodes} = [];
+ my $checked_nodes = PVE::QemuServer::check_local_storage_availability($vmconf, $storecfg);
+ delete $checked_nodes->{$localnode};
+
+ foreach my $node (keys %$checked_nodes) {
+ if (!defined $checked_nodes->{$node}->{unavailable_storages}) {
+ push @{$res->{allowed_nodes}}, $node;
+ }
+
+ }
+ $res->{not_allowed_nodes} = $checked_nodes;
+ }
+
+
+ my $local_disks = &$check_vm_disks_local($storecfg, $vmconf, $vmid);
+ $res->{local_disks} = [ values %$local_disks ];;
+
+ my $local_resources = PVE::QemuServer::check_local_resources($vmconf, 1);
+
+ $res->{local_resources} = $local_resources;
+
+ return $res;
+
+
+ }});
+
__PACKAGE__->register_method({
name => 'migrate_vm',
path => '{vmid}/migrate',
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
- target => get_standard_option('pve-node', {
+ target => get_standard_option('pve-node', {
description => "Target node.",
completion => \&PVE::Cluster::complete_migration_target,
}),
online => {
type => 'boolean',
- description => "Use online/live migration.",
+ description => "Use online/live migration if VM is running. Ignored if VM is stopped.",
optional => 1,
},
force => {
targetstorage => get_standard_option('pve-storage-id', {
description => "Default target storage.",
optional => 1,
- completion => \&PVE::QemuServer::complete_storage,
+ completion => \&PVE::QemuServer::complete_migration_storage,
}),
+ bwlimit => {
+ description => "Override I/O bandwidth limit (in KiB/s).",
+ optional => 1,
+ type => 'integer',
+ minimum => '0',
+ default => 'migrate limit from datacenter or storage config',
+ },
},
},
returns => {
my ($param) = @_;
my $rpcenv = PVE::RPCEnvironment::get();
-
my $authuser = $rpcenv->get_user();
my $target = extract_param($param, 'target');
my $vmid = extract_param($param, 'vmid');
- raise_param_exc({ targetstorage => "Live storage migration can only be done online." })
- if !$param->{online} && $param->{targetstorage};
-
raise_param_exc({ force => "Only root may use this option." })
if $param->{force} && $authuser ne 'root@pam';
PVE::QemuConfig->check_lock($conf);
if (PVE::QemuServer::check_running($vmid)) {
- die "cant migrate running VM without --online\n"
- if !$param->{online};
+ die "can't migrate running VM without --online\n" if !$param->{online};
+ } else {
+ warn "VM isn't running. Doing offline migration instead\n." if $param->{online};
+ $param->{online} = 0;
}
+ raise_param_exc({ targetstorage => "Live storage migration can only be done online." })
+ if !$param->{online} && $param->{targetstorage};
+
my $storecfg = PVE::Storage::config();
if( $param->{targetstorage}) {
my $hacmd = sub {
my $upid = shift;
- my $service = "vm:$vmid";
-
- my $cmd = ['ha-manager', 'migrate', $service, $target];
-
print "Requesting HA migration for VM $vmid to node $target\n";
+ my $cmd = ['ha-manager', 'migrate', "vm:$vmid", $target];
PVE::Tools::run_command($cmd);
-
return;
};
my $res = '';
eval {
- $res = PVE::QemuServer::vm_human_monitor_command($vmid, $param->{command});
+ $res = PVE::QemuServer::Monitor::hmp_cmd($vmid, $param->{command});
};
$res = "ERROR: $@" if $@;
my (undef, undef, undef, undef, undef, undef, $format) =
PVE::Storage::parse_volname($storecfg, $drive->{file});
- die "can't resize volume: $disk if snapshot exists\n"
+ die "can't resize volume: $disk if snapshot exists\n"
if %{$conf->{snapshots}} && $format eq 'qcow2';
my $volid = $drive->{file};
PVE::Storage::activate_volumes($storecfg, [$volid]);
my $size = PVE::Storage::volume_size_info($storecfg, $volid, 5);
+ die "Could not determine current size of volume '$volid'\n" if !defined($size);
+
die "internal error" if $sizestr !~ m/^(\+)?(\d+(\.\d+)?)([KMGT])?$/;
my ($ext, $newsize, $unit) = ($1, $2, $4);
if ($unit) {
my $realcmd = sub {
PVE::Cluster::log_msg('info', $authuser, "snapshot VM $vmid: $snapname");
- PVE::QemuConfig->snapshot_create($vmid, $snapname, $param->{vmstate},
+ PVE::QemuConfig->snapshot_create($vmid, $snapname, $param->{vmstate},
$param->{description});
};
return undef;
}});
+__PACKAGE__->register_method({
+ name => 'cloudinit_generated_config_dump',
+ path => '{vmid}/cloudinit/dump',
+ method => 'GET',
+ proxyto => 'node',
+ description => "Get automatically generated cloudinit config.",
+ permissions => {
+ check => ['perm', '/vms/{vmid}', [ 'VM.Audit' ]],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
+ type => {
+ description => 'Config type.',
+ type => 'string',
+ enum => ['user', 'network', 'meta'],
+ },
+ },
+ },
+ returns => {
+ type => 'string',
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $conf = PVE::QemuConfig->load_config($param->{vmid});
+
+ return PVE::QemuServer::Cloudinit::dump_cloudinit_config($conf, $param->{vmid}, $param->{type});
+ }});
+
1;