}
-my $nodename = PVE::INotify::nodename();
+my $nodename_cache;
+sub nodename {
+ $nodename_cache //= PVE::INotify::nodename();
+ return $nodename_cache;
+}
my $cpu_vendor_list = {
# Intel CPUs
}
sub print_drive {
- my ($vmid, $drive) = @_;
+ my ($drive) = @_;
my $data = { %$drive };
delete $data->{$_} for qw(index interface);
return PVE::JSONSchema::print_property_string($data, $alldrive_fmt);
delete $res->{host};
foreach my $id (@idlist) {
my $devs = PVE::SysFSTools::lspci($id);
- if (!scalar(@$devs)) {
- die "no pci device found for '$id'\n";
- }
+ die "no PCI device found for '$id'\n" if !scalar(@$devs);
push @{$res->{pciid}}, @$devs;
}
return $res;
my $v = parse_drive($key, $value);
if (my $volid = filename_to_volume_id($vmid, $v->{file}, $v->{media})) {
$v->{file} = $volid;
- $value = print_drive($vmid, $v);
+ $value = print_drive($v);
} else {
warn "vm $vmid - unable to parse value of '$key'\n";
next;
my $res = {};
return $res if !$vmlist || !$vmlist->{ids};
my $ids = $vmlist->{ids};
+ my $nodename = nodename();
foreach my $vmid (keys %$ids) {
my $d = $ids->{$vmid};
my $nodelist = PVE::Cluster::get_nodelist();
my $nodehash = { map { $_ => 1 } @$nodelist };
- my $nodename = PVE::INotify::nodename();
+ my $nodename = nodename();
foreach_drive($conf, sub {
my ($ds, $drive) = @_;
my $ostype = $conf->{ostype};
my $winversion = windows_version($ostype);
my $kvm = $conf->{kvm};
+ my $nodename = nodename();
my $arch = get_vm_arch($conf);
my $kvm_binary = get_command_for_arch($arch);
my $machine_version = PVE::QemuServer::Machine::extract_version($machine_type, $kvmver);
$kvm //= 1 if is_native($arch);
+ $machine_version =~ m/(\d+)\.(\d+)/;
+ die "Installed QEMU version '$kvmver' is too old to run machine type '$machine_type', please upgrade node '$nodename'\n"
+ if !PVE::QemuServer::min_version($kvmver, $1, $2);
+
if ($kvm) {
die "KVM virtualisation configured, but not available. Either disable in VM configuration or enable in BIOS.\n"
if !defined kvm_version();
my $pciaddr = print_pci_addr("spice", $bridges, $arch, $machine_type);
- my $nodename = PVE::INotify::nodename();
my $pfamily = PVE::Tools::get_host_address_family($nodename);
my @nodeaddrs = PVE::Tools::getaddrinfo_all('localhost', family => $pfamily);
die "failed to get an ip address of type $pfamily for 'localhost'\n" if !@nodeaddrs;
if ($changes) {
PVE::QemuConfig->write_config($vmid, $conf);
- $conf = PVE::QemuConfig->load_config($vmid); # update/reload
}
my $hotplug_features = parse_hotplug_features(defined($conf->{hotplug}) ? $conf->{hotplug} : '1');
if (my $err = $@) {
&$add_error($opt, $err) if $err ne "skip\n";
} else {
- # save new config if hotplug was successful
delete $conf->{$opt};
PVE::QemuConfig->remove_from_pending_delete($conf, $opt);
- PVE::QemuConfig->write_config($vmid, $conf);
- $conf = PVE::QemuConfig->load_config($vmid); # update/reload
}
}
if (my $err = $@) {
&$add_error($opt, $err) if $err ne "skip\n";
} else {
- # save new config if hotplug was successful
$conf->{$opt} = $value;
delete $conf->{pending}->{$opt};
- PVE::QemuConfig->write_config($vmid, $conf);
- $conf = PVE::QemuConfig->load_config($vmid); # update/reload
}
}
+
+ PVE::QemuConfig->write_config($vmid, $conf);
}
sub try_deallocate_drive {
sub vmconfig_apply_pending {
- my ($vmid, $conf, $storecfg) = @_;
+ my ($vmid, $conf, $storecfg, $errors) = @_;
+
+ my $add_apply_error = sub {
+ my ($opt, $msg) = @_;
+ my $err_msg = "unable to apply pending change $opt : $msg";
+ $errors->{$opt} = $err_msg;
+ warn $err_msg;
+ };
# cold plug
my $pending_delete_hash = PVE::QemuConfig->parse_pending_delete($conf->{pending}->{delete});
foreach my $opt (sort keys %$pending_delete_hash) {
- die "internal error" if $opt =~ m/^unused/;
my $force = $pending_delete_hash->{$opt}->{force};
- $conf = PVE::QemuConfig->load_config($vmid); # update/reload
- if (!defined($conf->{$opt})) {
- PVE::QemuConfig->remove_from_pending_delete($conf, $opt);
- PVE::QemuConfig->write_config($vmid, $conf);
- } elsif (is_valid_drivename($opt)) {
- vmconfig_delete_or_detach_drive($vmid, $storecfg, $conf, $opt, $force);
- PVE::QemuConfig->remove_from_pending_delete($conf, $opt);
- delete $conf->{$opt};
- PVE::QemuConfig->write_config($vmid, $conf);
+ eval {
+ die "internal error" if $opt =~ m/^unused/;
+ $conf = PVE::QemuConfig->load_config($vmid); # update/reload
+ if (!defined($conf->{$opt})) {
+ PVE::QemuConfig->remove_from_pending_delete($conf, $opt);
+ PVE::QemuConfig->write_config($vmid, $conf);
+ } elsif (is_valid_drivename($opt)) {
+ vmconfig_delete_or_detach_drive($vmid, $storecfg, $conf, $opt, $force);
+ PVE::QemuConfig->remove_from_pending_delete($conf, $opt);
+ delete $conf->{$opt};
+ PVE::QemuConfig->write_config($vmid, $conf);
+ } else {
+ PVE::QemuConfig->remove_from_pending_delete($conf, $opt);
+ delete $conf->{$opt};
+ PVE::QemuConfig->write_config($vmid, $conf);
+ }
+ };
+ if (my $err = $@) {
+ $add_apply_error->($opt, $err);
} else {
PVE::QemuConfig->remove_from_pending_delete($conf, $opt);
delete $conf->{$opt};
foreach my $opt (keys %{$conf->{pending}}) { # add/change
$conf = PVE::QemuConfig->load_config($vmid); # update/reload
- if (defined($conf->{$opt}) && ($conf->{$opt} eq $conf->{pending}->{$opt})) {
- # skip if nothing changed
- } elsif (is_valid_drivename($opt)) {
- vmconfig_register_unused_drive($storecfg, $vmid, $conf, parse_drive($opt, $conf->{$opt}))
- if defined($conf->{$opt});
- $conf->{$opt} = $conf->{pending}->{$opt};
+ eval {
+ if (defined($conf->{$opt}) && ($conf->{$opt} eq $conf->{pending}->{$opt})) {
+ # skip if nothing changed
+ } elsif (is_valid_drivename($opt)) {
+ vmconfig_register_unused_drive($storecfg, $vmid, $conf, parse_drive($opt, $conf->{$opt}))
+ if defined($conf->{$opt});
+ $conf->{$opt} = $conf->{pending}->{$opt};
+ } else {
+ $conf->{$opt} = $conf->{pending}->{$opt};
+ }
+ };
+ if (my $err = $@) {
+ $add_apply_error->($opt, $err);
} else {
- $conf->{$opt} = $conf->{pending}->{$opt};
+ $conf->{$opt} = delete $conf->{pending}->{$opt};
+ PVE::QemuConfig->cleanup_pending($conf);
}
- delete $conf->{pending}->{$opt};
PVE::QemuConfig->write_config($vmid, $conf);
}
}
my $newdrive = $drive;
$newdrive->{format} = $format;
$newdrive->{file} = $newvolid;
- my $drivestr = PVE::QemuServer::print_drive($vmid, $newdrive);
+ my $drivestr = print_drive($newdrive);
$local_volumes->{$opt} = $drivestr;
#pass drive to conf for command line
$conf->{$opt} = $drivestr;
if ($statefile eq 'tcp') {
my $localip = "localhost";
my $datacenterconf = PVE::Cluster::cfs_read_file('datacenter.cfg');
- my $nodename = PVE::INotify::nodename();
+ my $nodename = nodename();
if (!defined($migration_type)) {
if (defined($datacenterconf->{migration}->{type})) {
: $defaults->{cpuunits};
my $start_timeout = ($conf->{hugepages} || $is_suspended) ? 300 : 30;
- my %run_params = (timeout => $statefile ? undef : $start_timeout, umask => 0077);
+ my %run_params = (
+ timeout => $statefile ? undef : $start_timeout,
+ umask => 0077,
+ noerr => 1,
+ );
+
+ # when migrating, prefix QEMU output so other side can pick up any
+ # errors that might occur and show the user
+ if ($migratedfrom) {
+ $run_params{quiet} = 1;
+ $run_params{logfunc} = sub { print "QEMU: $_[0]\n" };
+ }
my %properties = (
Slice => 'qemu.slice',
my $run_qemu = sub {
PVE::Tools::run_fork sub {
PVE::Systemd::enter_systemd_scope($vmid, "Proxmox VE VM $vmid", %properties);
- run_command($cmd, %run_params);
+
+ my $exitcode = run_command($cmd, %run_params);
+ die "QEMU exited with code $exitcode\n" if $exitcode;
};
};
#start nbd server for storage migration
if ($targetstorage) {
- my $nodename = PVE::INotify::nodename();
+ my $nodename = nodename();
my $localip = $get_migration_ip->($migration_network, $nodename);
my $pfamily = PVE::Tools::get_host_address_family($nodename);
my $storage_migrate_port = PVE::Tools::next_migrate_port($pfamily);
});
}
+# note: if using the statestorage parameter, the caller has to check privileges
sub vm_suspend {
my ($vmid, $skiplock, $includestate, $statestorage) = @_;
$conf->{lock} = 'suspending';
my $date = strftime("%Y-%m-%d", localtime(time()));
$storecfg = PVE::Storage::config();
+ if (!$statestorage) {
+ $statestorage = find_vmstate_storage($conf, $storecfg);
+ # check permissions for the storage
+ my $rpcenv = PVE::RPCEnvironment::get();
+ if ($rpcenv->{type} ne 'cli') {
+ my $authuser = $rpcenv->get_user();
+ $rpcenv->check($authuser, "/storage/$statestorage", ['Datastore.AllocateSpace']);
+ }
+ }
+
+
$vmstate = PVE::QemuConfig->__snapshot_save_vmstate($vmid, $conf, "suspend-$date", $storecfg, $statestorage, 1);
$path = PVE::Storage::path($storecfg, $vmstate);
PVE::QemuConfig->write_config($vmid, $conf);
} elsif ($map->{$virtdev}) {
delete $di->{format}; # format can change on restore
$di->{file} = $map->{$virtdev};
- $value = print_drive($vmid, $di);
+ $value = print_drive($di);
print $outfd "$virtdev: $value\n";
} else {
print $outfd $line;
}
sub update_disksize {
+ my ($drive, $volid_hash) = @_;
+
+ my $volid = $drive->{file};
+ return undef if !defined($volid);
+
+ my $oldsize = $drive->{size};
+ my $newsize = $volid_hash->{$volid}->{size};
+
+ if (defined($newsize) && defined($oldsize) && $newsize != $oldsize) {
+ $drive->{size} = $newsize;
+
+ my $old_fmt = PVE::JSONSchema::format_size($oldsize);
+ my $new_fmt = PVE::JSONSchema::format_size($newsize);
+
+ return wantarray ? ($drive, $old_fmt, $new_fmt) : $drive;
+ }
+
+ return undef;
+}
+
+sub update_disk_config {
my ($vmid, $conf, $volid_hash) = @_;
my $changes;
my $volid = $drive->{file};
next if !$volid;
+ # mark volid as "in-use" for next step
$referenced->{$volid} = 1;
if ($volid_hash->{$volid} &&
(my $path = $volid_hash->{$volid}->{path})) {
next if drive_is_cdrom($drive);
next if !$volid_hash->{$volid};
- $drive->{size} = $volid_hash->{$volid}->{size};
- my $new = print_drive($vmid, $drive);
- if ($new ne $conf->{$opt}) {
+ my ($updated, $old_size, $new_size) = update_disksize($drive, $volid_hash);
+ if (defined($updated)) {
$changes = 1;
- $conf->{$opt} = $new;
- print "$prefix update disk '$opt' information.\n";
+ $conf->{$opt} = print_drive($updated);
+ print "$prefix size of disk '$volid' ($opt) updated from $old_size to $new_size\n";
}
}
}
my $volid = $conf->{$opt};
my $path = $volid_hash->{$volid}->{path} if $volid_hash->{$volid};
if ($referenced->{$volid} || ($path && $referencedpath->{$path})) {
- print "$prefix remove entry '$opt', its volume '$volid' is in use.\n";
+ print "$prefix remove entry '$opt', its volume '$volid' is in use\n";
$changes = 1;
delete $conf->{$opt};
}
next if $referencedpath->{$path};
$changes = 1;
my $key = PVE::QemuConfig->add_unused_volume($conf, $volid);
- print "$prefix add unreferenced volume '$volid' as '$key' to config.\n";
+ print "$prefix add unreferenced volume '$volid' as '$key' to config\n";
$referencedpath->{$path} = 1; # avoid to add more than once (aliases)
}
$vm_volids->{$volid} = $info if $info->{vmid} && $info->{vmid} == $vmid;
}
- my $changes = update_disksize($vmid, $conf, $vm_volids);
+ my $changes = update_disk_config($vmid, $conf, $vm_volids);
PVE::QemuConfig->write_config($vmid, $conf) if $changes && !$dryrun;
};
my $conffile = PVE::QemuConfig->config_file($vmid);
my $tmpfn = "$conffile.$$.tmp";
- # Note: $oldconf is undef if VM does not exists
+ # Note: $oldconf is undef if VM does not exist
my $cfs_path = PVE::QemuConfig->cfs_config_path($vmid);
my $oldconf = PVE::Cluster::cfs_read_file($cfs_path);
my $voliddst = PVE::Storage::vdisk_create_base($storecfg, $volid);
$drive->{file} = $voliddst;
- $conf->{$ds} = print_drive($vmid, $drive);
+ $conf->{$ds} = print_drive($drive);
PVE::QemuConfig->write_config($vmid, $conf);
});
}
return $firstdisk;
}
+# NOTE: if this logic changes, please update docs & possibly gui logic
+sub find_vmstate_storage {
+ my ($conf, $storecfg) = @_;
+
+ # first, return storage from conf if set
+ return $conf->{vmstatestorage} if $conf->{vmstatestorage};
+
+ my ($target, $shared, $local);
+
+ foreach_storage_used_by_vm($conf, sub {
+ my ($sid) = @_;
+ my $scfg = PVE::Storage::storage_config($storecfg, $sid);
+ my $dst = $scfg->{shared} ? \$shared : \$local;
+ $$dst = $sid if !$$dst || $scfg->{path}; # prefer file based storage
+ });
+
+ # second, use shared storage where VM has at least one disk
+ # third, use local storage where VM has at least one disk
+ # fall back to local storage
+ $target = $shared // $local // 'local';
+
+ return $target;
+}
+
sub generate_uuid {
my ($uuid, $uuid_str);
UUID::generate($uuid);