use PVE::GuestHelpers;
use PVE::QemuConfig;
use PVE::QemuServer;
+use PVE::QemuServer::Drive;
+use PVE::QemuServer::CPUConfig;
use PVE::QemuServer::Monitor qw(mon_cmd);
use PVE::QemuMigrate;
use PVE::RPCEnvironment;
my $check_storage_access = sub {
my ($rpcenv, $authuser, $storecfg, $vmid, $settings, $default_storage) = @_;
- PVE::QemuServer::foreach_drive($settings, sub {
+ PVE::QemuConfig->foreach_volume($settings, sub {
my ($ds, $drive) = @_;
my $isCDROM = PVE::QemuServer::drive_is_cdrom($drive);
my $sharedvm = 1;
- PVE::QemuServer::foreach_drive($conf, sub {
+ PVE::QemuConfig->foreach_volume($conf, sub {
my ($ds, $drive) = @_;
my $isCDROM = PVE::QemuServer::drive_is_cdrom($drive);
}
};
- eval { PVE::QemuServer::foreach_drive($settings, $code); };
+ eval { PVE::QemuConfig->foreach_volume($settings, $code); };
# free allocated images on error
if (my $err = $@) {
return $vollist;
};
+my $check_cpu_model_access = sub {
+ my ($rpcenv, $authuser, $new, $existing) = @_;
+
+ return if !defined($new->{cpu});
+
+ my $cpu = PVE::JSONSchema::check_format('pve-vm-cpu-conf', $new->{cpu});
+ return if !$cpu || !$cpu->{cputype}; # always allow default
+ my $cputype = $cpu->{cputype};
+
+ if ($existing && $existing->{cpu}) {
+ # changing only other settings doesn't require permissions for CPU model
+ my $existingCpu = PVE::JSONSchema::check_format('pve-vm-cpu-conf', $existing->{cpu});
+ return if $existingCpu->{cputype} eq $cputype;
+ }
+
+ if (PVE::QemuServer::CPUConfig::is_custom_model($cputype)) {
+ $rpcenv->check($authuser, "/nodes", ['Sys.Audit']);
+ }
+};
+
my $cpuoptions = {
'cores' => 1,
'cpu' => 1,
return $res;
}});
+my $parse_restore_archive = sub {
+ my ($storecfg, $archive) = @_;
+
+ my ($archive_storeid, $archive_volname) = PVE::Storage::parse_volume_id($archive, 1);
+
+ if (defined($archive_storeid)) {
+ my $scfg = PVE::Storage::storage_config($storecfg, $archive_storeid);
+ if ($scfg->{type} eq 'pbs') {
+ return {
+ type => 'pbs',
+ volid => $archive,
+ };
+ }
+ }
+ my $path = PVE::Storage::abs_filesystem_path($storecfg, $archive);
+ return {
+ type => 'file',
+ path => $path,
+ };
+};
__PACKAGE__->register_method({
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid', { completion => \&PVE::Cluster::complete_next_vmid }),
archive => {
- description => "The backup file.",
+ description => "The backup archive. Either the file system path to a .tar or .vma file (use '-' to pipe data from stdin) or a proxmox storage backup volume identifier.",
type => 'string',
optional => 1,
maxLength => 255,
&$check_vm_modify_config_perm($rpcenv, $authuser, $vmid, $pool, [ keys %$param]);
+ &$check_cpu_model_access($rpcenv, $authuser, $param);
+
foreach my $opt (keys %$param) {
if (PVE::QemuServer::is_valid_drivename($opt)) {
my $drive = PVE::QemuServer::parse_drive($opt, $param->{$opt});
if ($archive eq '-') {
die "pipe requires cli environment\n"
if $rpcenv->{type} ne 'cli';
+ $archive = { type => 'pipe' };
} else {
PVE::Storage::check_volume_access($rpcenv, $authuser, $storecfg, $vmid, $archive);
- $archive = PVE::Storage::abs_filesystem_path($storecfg, $archive);
+
+ $archive = $parse_restore_archive->($storecfg, $archive);
}
}
die "$emsg vm is running\n" if PVE::QemuServer::check_running($vmid);
my $realcmd = sub {
- PVE::QemuServer::restore_archive($archive, $vmid, $authuser, {
+ my $restore_options = {
storage => $storage,
pool => $pool,
unique => $unique,
bwlimit => $bwlimit,
- });
+ };
+ if ($archive->{type} eq 'file' || $archive->{type} eq 'pipe') {
+ PVE::QemuServer::restore_file_archive($archive->{path} // '-', $vmid, $authuser, $restore_options);
+ } elsif ($archive->{type} eq 'pbs') {
+ PVE::QemuServer::restore_proxmox_backup_archive($archive->{volid}, $vmid, $authuser, $restore_options);
+ } else {
+ die "unknown backup archive type\n";
+ }
my $restored_conf = PVE::QemuConfig->load_config($vmid);
# Convert restored VM to template if backup was VM template
if (PVE::QemuConfig->is_template($restored_conf)) {
$vollist = &$create_disks($rpcenv, $authuser, $conf, $arch, $storecfg, $vmid, $pool, $param, $storage);
if (!$conf->{bootdisk}) {
- my $firstdisk = PVE::QemuServer::resolve_first_disk($conf);
+ my $firstdisk = PVE::QemuServer::Drive::resolve_first_disk($conf);
$conf->{bootdisk} = $firstdisk if $firstdisk;
}
path => '{vmid}/config',
method => 'GET',
proxyto => 'node',
- description => "Get current virtual machine configuration. This does not include pending configuration changes (see 'pending' API).",
+ description => "Get the virtual machine configuration with pending configuration " .
+ "changes applied. Set the 'current' parameter to get the current configuration instead.",
permissions => {
check => ['perm', '/vms/{vmid}', [ 'VM.Audit' ]],
},
},
},
returns => {
- description => "The current VM configuration.",
+ description => "The VM configuration.",
type => "object",
properties => PVE::QemuServer::json_config_properties({
digest => {
path => '{vmid}/pending',
method => 'GET',
proxyto => 'node',
- description => "Get virtual machine configuration, including pending changes.",
+ description => "Get the virtual machine configuration with both current and pending values.",
permissions => {
check => ['perm', '/vms/{vmid}', [ 'VM.Audit' ]],
},
return if PVE::QemuServer::drive_is_cdrom($drive);
my ($storeid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
- return if $volname eq 'cloudinit';
+ die "cannot add non-managed/pass-through volume to a replicated VM\n"
+ if !defined($storeid);
+
+ return if defined($volname) && $volname eq 'cloudinit';
my $format;
if ($volid =~ $NEW_DISK_RE) {
die "checksum missmatch (file change by other user?)\n"
if $digest && $digest ne $conf->{digest};
+ &$check_cpu_model_access($rpcenv, $authuser, $param, $conf);
+
# FIXME: 'suspended' lock should probabyl be a state or "weak" lock?!
if (scalar(@delete) && grep { $_ eq 'vmstate'} @delete) {
if (defined($conf->{lock}) && $conf->{lock} eq 'suspended') {
push @delete, 'lock'; # this is the real deal to write it out
}
push @delete, 'runningmachine' if $conf->{runningmachine};
+ push @delete, 'runningcpu' if $conf->{runningcpu};
}
PVE::QemuConfig->check_lock($conf) if !$skiplock;
raise_param_exc({ skiplock => "Only root may use this option." })
if $skiplock && $authuser ne 'root@pam';
- # test if VM exists
- my $conf = PVE::QemuConfig->load_config($vmid);
- my $storecfg = PVE::Storage::config();
- PVE::QemuConfig->check_protection($conf, "can't remove VM $vmid");
- die "unable to remove VM $vmid - used in HA resources\n"
- if PVE::HA::Config::vm_is_ha_managed($vmid);
-
- if (!$param->{purge}) {
- # don't allow destroy if with replication jobs but no purge param
- my $repl_conf = PVE::ReplicationConfig->new();
- $repl_conf->check_for_existing_jobs($vmid);
- }
+ my $early_checks = sub {
+ # test if VM exists
+ my $conf = PVE::QemuConfig->load_config($vmid);
+ PVE::QemuConfig->check_protection($conf, "can't remove VM $vmid");
+
+ my $ha_managed = PVE::HA::Config::service_is_configured("vm:$vmid");
- # early tests (repeat after locking)
- die "VM $vmid is running - destroy failed\n"
- if PVE::QemuServer::check_running($vmid);
+ if (!$param->{purge}) {
+ die "unable to remove VM $vmid - used in HA resources and purge parameter not set.\n"
+ if $ha_managed;
+ # don't allow destroy if with replication jobs but no purge param
+ my $repl_conf = PVE::ReplicationConfig->new();
+ $repl_conf->check_for_existing_jobs($vmid);
+ }
+
+ die "VM $vmid is running - destroy failed\n"
+ if PVE::QemuServer::check_running($vmid);
+
+ return $ha_managed;
+ };
+
+ $early_checks->();
my $realcmd = sub {
my $upid = shift;
+ my $storecfg = PVE::Storage::config();
+
syslog('info', "destroy VM $vmid: $upid\n");
PVE::QemuConfig->lock_config($vmid, sub {
- die "VM $vmid is running - destroy failed\n"
- if (PVE::QemuServer::check_running($vmid));
+ # repeat, config might have changed
+ my $ha_managed = $early_checks->();
PVE::QemuServer::destroy_vm($storecfg, $vmid, $skiplock, { lock => 'destroyed' });
PVE::AccessControl::remove_vm_access($vmid);
PVE::Firewall::remove_vmfw_conf($vmid);
if ($param->{purge}) {
+ print "purging VM $vmid from related configurations..\n";
PVE::ReplicationConfig::remove_vmid_jobs($vmid);
PVE::VZDump::Plugin::remove_vmid_from_backup_jobs($vmid);
+
+ if ($ha_managed) {
+ PVE::HA::Config::delete_service_from_config("vm:$vmid");
+ print "NOTE: removed VM $vmid from HA resource configuration.\n";
+ }
}
# only now remove the zombie config, else we can have reuse race
my $websocket = $param->{websocket};
my $conf = PVE::QemuConfig->load_config($vmid, $node); # check if VM exists
- my $use_serial = ($conf->{vga} && ($conf->{vga} =~ m/^serial\d+$/));
+
+ my $serial;
+ if ($conf->{vga}) {
+ my $vga = PVE::QemuServer::parse_vga($conf->{vga});
+ $serial = $vga->{type} if $vga->{type} =~ m/^serial\d+$/;
+ }
my $authpath = "/vms/$vmid";
(undef, $family) = PVE::Cluster::remote_node_ip($node);
my $sshinfo = PVE::SSHInfo::get_ssh_info($node);
# NOTE: kvm VNC traffic is already TLS encrypted or is known unsecure
- $remcmd = PVE::SSHInfo::ssh_info_to_command($sshinfo, $use_serial ? '-t' : '-T');
+ $remcmd = PVE::SSHInfo::ssh_info_to_command($sshinfo, defined($serial) ? '-t' : '-T');
} else {
$family = PVE::Tools::get_host_address_family($node);
}
my $cmd;
- if ($use_serial) {
+ if (defined($serial)) {
- my $termcmd = [ '/usr/sbin/qm', 'terminal', $vmid, '-iface', $conf->{vga}, '-escape', '0' ];
+ my $termcmd = [ '/usr/sbin/qm', 'terminal', $vmid, '-iface', $serial, '-escape', '0' ];
$cmd = ['/usr/bin/vncterm', '-rfbport', $port,
'-timeout', $timeout, '-authpath', $authpath,
optional => 1,
},
machine => get_standard_option('pve-qemu-machine'),
- targetstorage => {
- description => "Target storage for the migration. (Can be '1' to use the same storage id as on the source node.)",
+ 'force-cpu' => {
+ description => "Override QEMU's -cpu argument with the given string.",
type => 'string',
- optional => 1
+ optional => 1,
},
+ targetstorage => get_standard_option('pve-targetstorage'),
timeout => {
description => "Wait maximal timeout seconds.",
type => 'integer',
my $timeout = extract_param($param, 'timeout');
my $machine = extract_param($param, 'machine');
+ my $force_cpu = extract_param($param, 'force-cpu');
my $get_root_param = sub {
my $value = extract_param($param, $_[0]);
my $migration_network = $get_root_param->('migration_network');
my $targetstorage = $get_root_param->('targetstorage');
- raise_param_exc({ targetstorage => "targetstorage can only by used with migratedfrom." })
- if $targetstorage && !$migratedfrom;
+ my $storagemap;
+
+ if ($targetstorage) {
+ raise_param_exc({ targetstorage => "targetstorage can only by used with migratedfrom." })
+ if !$migratedfrom;
+ $storagemap = eval { PVE::JSONSchema::parse_idmap($targetstorage, 'pve-storage-id') };
+ raise_param_exc({ targetstorage => "failed to parse storage map: $@" })
+ if $@;
+ }
# read spice ticket from STDIN
my $spice_ticket;
+ my $nbd_protocol_version = 0;
+ my $replicated_volumes = {};
if ($stateuri && ($stateuri eq 'tcp' || $stateuri eq 'unix') && $migratedfrom && ($rpcenv->{type} eq 'cli')) {
- if (defined(my $line = <STDIN>)) {
+ while (defined(my $line = <STDIN>)) {
chomp $line;
- $spice_ticket = $line;
+ if ($line =~ m/^spice_ticket: (.+)$/) {
+ $spice_ticket = $1;
+ } elsif ($line =~ m/^nbd_protocol_version: (\d+)$/) {
+ $nbd_protocol_version = $1;
+ } elsif ($line =~ m/^replicated_volume: (.*)$/) {
+ $replicated_volumes->{$1} = 1;
+ } else {
+ # fallback for old source node
+ $spice_ticket = $line;
+ }
}
}
syslog('info', "start VM $vmid: $upid\n");
- PVE::QemuServer::vm_start($storecfg, $vmid, $stateuri, $skiplock, $migratedfrom, undef, $machine,
- $spice_ticket, $migration_network, $migration_type, $targetstorage, $timeout);
+ my $migrate_opts = {
+ migratedfrom => $migratedfrom,
+ spice_ticket => $spice_ticket,
+ network => $migration_network,
+ type => $migration_type,
+ storagemap => $storagemap,
+ nbd_proto_version => $nbd_protocol_version,
+ replicated_volumes => $replicated_volumes,
+ };
+
+ my $params = {
+ statefile => $stateuri,
+ skiplock => $skiplock,
+ forcemachine => $machine,
+ timeout => $timeout,
+ forcecpu => $force_cpu,
+ };
+
+ PVE::QemuServer::vm_start($storecfg, $vmid, $params, $migrate_opts);
return;
};
if $skiplock && $authuser ne 'root@pam';
my $nocheck = extract_param($param, 'nocheck');
+ raise_param_exc({ nocheck => "Only root may use this option." })
+ if $nocheck && $authuser ne 'root@pam';
my $to_disk_suspended;
eval {
PVE::QemuServer::vm_resume($vmid, $skiplock, $nocheck);
} else {
my $storecfg = PVE::Storage::config();
- PVE::QemuServer::vm_start($storecfg, $vmid, undef, $skiplock);
+ PVE::QemuServer::vm_start($storecfg, $vmid, { skiplock => $skiplock });
}
return;
my $localnode = PVE::INotify::nodename();
- if ($target eq $localnode || $target eq 'localhost') {
+ if ($target && ($target eq $localnode || $target eq 'localhost')) {
undef $target;
}
my $running = PVE::QemuServer::check_running($vmid) || 0;
- # exclusive lock if VM is running - else shared lock is enough;
- my $shared_lock = $running ? 0 : 1;
-
my $clonefn = sub {
# do all tests after lock but before forking worker - if possible
foreach my $opt (keys %$drives) {
my $drive = $drives->{$opt};
my $skipcomplete = ($total_jobs != $i); # finish after last drive
+ my $completion = $skipcomplete ? 'skip' : 'complete';
my $src_sid = PVE::Storage::parse_volume_id($drive->{file});
my $storage_list = [ $src_sid ];
my $newdrive = PVE::QemuServer::clone_disk($storecfg, $vmid, $running, $opt, $drive, $snapname,
$newid, $storage, $format, $fullclone->{$opt}, $newvollist,
- $jobs, $skipcomplete, $oldconf->{agent}, $clonelimit);
+ $jobs, $completion, $oldconf->{agent}, $clonelimit, $oldconf);
$newconf->{$opt} = PVE::QemuServer::print_drive($newdrive);
return $rpcenv->fork_worker('qmclone', $vmid, $authuser, $realcmd);
};
- return PVE::QemuConfig->lock_config_mode($vmid, 1, $shared_lock, sub {
- # Aquire exclusive lock lock for $newid
+ # Aquire exclusive lock lock for $newid
+ my $lock_target_vm = sub {
return PVE::QemuConfig->lock_config_full($newid, 1, $clonefn);
- });
+ };
+ # exclusive lock if VM is running - else shared lock is enough;
+ if ($running) {
+ return PVE::QemuConfig->lock_config_full($vmid, 1, $lock_target_vm);
+ } else {
+ return PVE::QemuConfig->lock_config_shared($vmid, 1, $lock_target_vm);
+ }
}});
__PACKAGE__->register_method({
disk => {
type => 'string',
description => "The disk you want to move.",
- enum => [ PVE::QemuServer::valid_drive_names() ],
+ enum => [PVE::QemuServer::Drive::valid_drive_names()],
},
storage => get_standard_option('pve-storage-id', {
description => "Target storage.",
(!$format || !$oldfmt || $oldfmt eq $format);
# this only checks snapshots because $disk is passed!
- my $snapshotted = PVE::QemuServer::is_volume_in_use($storecfg, $conf, $disk, $old_volid);
+ my $snapshotted = PVE::QemuServer::Drive::is_volume_in_use($storecfg, $conf, $disk, $old_volid);
die "you can't move a disk with snapshots and delete the source\n"
if $snapshotted && $param->{delete};
my $movelimit = PVE::Storage::get_bandwidth_limit('move', [$oldstoreid, $storeid], $bwlimit);
my $newdrive = PVE::QemuServer::clone_disk($storecfg, $vmid, $running, $disk, $drive, undef,
- $vmid, $storeid, $format, 1, $newvollist, undef, undef, undef, $movelimit);
+ $vmid, $storeid, $format, 1, $newvollist, undef, undef, undef, $movelimit, $conf);
$conf->{$disk} = PVE::QemuServer::print_drive($newdrive);
description => "Enable live storage migration for local disk",
optional => 1,
},
- targetstorage => get_standard_option('pve-storage-id', {
- description => "Default target storage.",
- optional => 1,
+ targetstorage => get_standard_option('pve-targetstorage', {
completion => \&PVE::QemuServer::complete_migration_storage,
}),
bwlimit => {
$param->{online} = 0;
}
- raise_param_exc({ targetstorage => "Live storage migration can only be done online." })
- if !$param->{online} && $param->{targetstorage};
-
my $storecfg = PVE::Storage::config();
- if( $param->{targetstorage}) {
- PVE::Storage::storage_check_node($storecfg, $param->{targetstorage}, $target);
+ if (my $targetstorage = $param->{targetstorage}) {
+ my $check_storage = sub {
+ my ($target_sid) = @_;
+ PVE::Storage::storage_check_node($storecfg, $target_sid, $target);
+ $rpcenv->check($authuser, "/storage/$target_sid", ['Datastore.AllocateSpace']);
+ my $scfg = PVE::Storage::storage_config($storecfg, $target_sid);
+ raise_param_exc({ targetstorage => "storage '$target_sid' does not support vm images"})
+ if !$scfg->{content}->{images};
+ };
+
+ my $storagemap = eval { PVE::JSONSchema::parse_idmap($targetstorage, 'pve-storage-id') };
+ raise_param_exc({ targetstorage => "failed to parse storage map: $@" })
+ if $@;
+
+ $rpcenv->check_vm_perm($authuser, $vmid, undef, ['VM.Config.Disk'])
+ if !defined($storagemap->{identity});
+
+ foreach my $source (values %{$storagemap->{entries}}) {
+ $check_storage->($source);
+ }
+
+ $check_storage->($storagemap->{default})
+ if $storagemap->{default};
+
+ PVE::QemuServer::check_storage_availability($storecfg, $conf, $target)
+ if $storagemap->{identity};
+
+ $param->{storagemap} = $storagemap;
} else {
PVE::QemuServer::check_storage_availability($storecfg, $conf, $target);
}
disk => {
type => 'string',
description => "The disk you want to resize.",
- enum => [PVE::QemuServer::valid_drive_names()],
+ enum => [PVE::QemuServer::Drive::valid_drive_names()],
},
size => {
type => 'string',
optional => 1,
type => 'string',
description => "If you want to convert only 1 disk to base image.",
- enum => [PVE::QemuServer::valid_drive_names()],
+ enum => [PVE::QemuServer::Drive::valid_drive_names()],
},
},