use PVE::JSONSchema qw(get_standard_option parse_property_string);
use PVE::ProcFSTools;
use PVE::PBSClient;
+use PVE::RESTEnvironment qw(log_warn);
use PVE::RPCEnvironment;
use PVE::Storage;
use PVE::SysFSTools;
optional => 1,
});
-PVE::JSONSchema::register_standard_option('pve-targetstorage', {
- description => "Mapping from source to target storages. Providing only a single storage ID maps all source storages to that storage. Providing the special value '1' will map each source storage to itself.",
- type => 'string',
- format => 'storage-pair-list',
- optional => 1,
-});
-
#no warnings 'redefine';
my $nodename_cache;
# sometimes, just plain disable...
my $lvm_no_io_uring = $scfg && $scfg->{type} eq 'lvm';
+ # io_uring causes problems when used with CIFS since kernel 5.15
+ # Some discussion: https://www.spinics.net/lists/linux-cifs/msg26734.html
+ my $cifs_no_io_uring = $scfg && $scfg->{type} eq 'cifs';
+
if (!$drive->{aio}) {
- if ($io_uring && !$rbd_no_io_uring && !$lvm_no_io_uring) {
+ if ($io_uring && !$rbd_no_io_uring && !$lvm_no_io_uring && !$cifs_no_io_uring) {
# io_uring supports all cache modes
$opts .= ",aio=io_uring";
} else {
if (drive_is_cloudinit($drive)) {
eval { PVE::Storage::vdisk_free($storecfg, $drive->{file}) };
warn $@ if $@;
+ delete $conf->{cloudinit};
} elsif (!drive_is_cdrom($drive)) {
my $volid = $drive->{file};
if (vm_is_volid_owner($storecfg, $vmid, $volid)) {
digest => Digest::SHA::sha1_hex($raw),
snapshots => {},
pending => {},
+ cloudinit => {},
};
my $handle_error = sub {
$descr = undef;
$conf = $res->{$section} = {};
next;
+ } elsif ($line =~ m/^\[special:cloudinit\]\s*$/i) {
+ $section = 'cloudinit';
+ $descr = undef;
+ $conf = $res->{$section} = {};
+ next;
} elsif ($line =~ m/^\[([a-z][a-z0-9_\-]+)\]\s*$/i) {
$section = $1;
foreach my $key (keys %$cref) {
next if $key eq 'digest' || $key eq 'description' || $key eq 'snapshots' ||
- $key eq 'snapstate' || $key eq 'pending';
+ $key eq 'snapstate' || $key eq 'pending' || $key eq 'cloudinit';
my $value = $cref->{$key};
if ($key eq 'delete') {
die "propertry 'delete' is only allowed in [PENDING]\n"
&$cleanup_config($conf->{pending}, 1);
+ &$cleanup_config($conf->{cloudinit});
+
foreach my $snapname (keys %{$conf->{snapshots}}) {
die "internal error: snapshot name '$snapname' is forbidden" if lc($snapname) eq 'pending';
&$cleanup_config($conf->{snapshots}->{$snapname}, undef, $snapname);
}
foreach my $key (sort keys %$conf) {
- next if $key =~ /^(digest|description|pending|snapshots)$/;
+ next if $key =~ /^(digest|description|pending|cloudinit|snapshots)$/;
$raw .= "$key: $conf->{$key}\n";
}
return $raw;
$raw .= &$generate_raw_config($conf->{pending}, 1);
}
+ if (scalar(keys %{$conf->{cloudinit}})){
+ $raw .= "\n[special:cloudinit]\n";
+ $raw .= &$generate_raw_config($conf->{cloudinit});
+ }
+
foreach my $snapname (sort keys %{$conf->{snapshots}}) {
$raw .= "\n[$snapname]\n";
$raw .= &$generate_raw_config($conf->{snapshots}->{$snapname});
return \@flags;
}
-my sub get_cpuunits {
- my ($conf) = @_;
- my $is_cgroupv2 = PVE::CGroup::cgroup_mode() == 2;
-
- my $cpuunits = $conf->{cpuunits};
- return $is_cgroupv2 ? 100 : 1024 if !defined($cpuunits);
-
- if ($is_cgroupv2) {
- $cpuunits = 10000 if $cpuunits >= 10000; # v1 can be higher, so clamp v2 there
- } else {
- $cpuunits = 2 if $cpuunits < 2; # v2 can be lower, so clamp v1 there
- }
- return $cpuunits;
-}
-
# Since commit 277d33454f77ec1d1e0bc04e37621e4dd2424b67 in pve-qemu, smm is not off by default
# anymore. But smm=off seems to be required when using SeaBIOS and serial display.
my sub should_disable_smm {
$read_only_str = ',readonly=on' if drive_is_read_only($conf, $d);
} else {
- warn "no efidisk configured! Using temporary efivars disk.\n";
+ log_warn("no efidisk configured! Using temporary efivars disk.");
$path = "/tmp/$vmid-ovmf.fd";
PVE::Tools::file_copy($ovmf_vars, $path, -s $ovmf_vars);
$format = 'raw';
die "skip\n" if !$hotplug_features->{memory};
PVE::QemuServer::Memory::qemu_memory_hotplug($vmid, $conf, $defaults, $opt);
} elsif ($opt eq 'cpuunits') {
- $cgroup->change_cpu_shares(undef, 1024);
+ $cgroup->change_cpu_shares(undef);
} elsif ($opt eq 'cpulimit') {
$cgroup->change_cpu_quota(undef, undef); # reset, cgroup module can better decide values
} else {
die "skip\n" if !$hotplug_features->{memory};
$value = PVE::QemuServer::Memory::qemu_memory_hotplug($vmid, $conf, $defaults, $opt, $value);
} elsif ($opt eq 'cpuunits') {
- my $new_cpuunits = get_cpuunits({ $opt => $conf->{pending}->{$opt} }); # to clamp
- $cgroup->change_cpu_shares($new_cpuunits, 1024);
+ my $new_cpuunits = PVE::CGroup::clamp_cpu_shares($conf->{pending}->{$opt}); #clamp
+ $cgroup->change_cpu_shares($new_cpuunits);
} elsif ($opt eq 'cpulimit') {
my $cpulimit = $conf->{pending}->{$opt} == 0 ? -1 : int($conf->{pending}->{$opt} * 100000);
$cgroup->change_cpu_quota($cpulimit, 100000);
PVE::QemuServer::PCI::reserve_pci_usage($pci_id_list, $vmid, $start_timeout);
eval {
+ my $uuid;
for my $id (sort keys %$pci_devices) {
my $d = $pci_devices->{$id};
for my $dev ($d->{pciid}->@*) {
- PVE::QemuServer::PCI::prepare_pci_device($vmid, $dev->{id}, $id, $d->{mdev});
+ my $info = PVE::QemuServer::PCI::prepare_pci_device($vmid, $dev->{id}, $id, $d->{mdev});
+
+ # nvidia grid needs the uuid of the mdev as qemu parameter
+ if ($d->{mdev} && !defined($uuid) && $info->{vendor} eq '10de') {
+ $uuid = PVE::QemuServer::PCI::generate_mdev_uuid($vmid, $id);
+ }
}
}
+ push @$cmd, '-uuid', $uuid if defined($uuid);
};
if (my $err = $@) {
eval { PVE::QemuServer::PCI::remove_pci_reservation($pci_id_list) };
# timeout should be more than enough here...
PVE::Systemd::wait_for_unit_removed("$vmid.scope", 20);
- my $cpuunits = get_cpuunits($conf);
+ my $cpuunits = PVE::CGroup::clamp_cpu_shares($conf->{cpuunits});
my %run_params = (
timeout => $statefile ? undef : $start_timeout,