+}
+
+sub hugepages_mount {
+
+ my $mountdata = PVE::ProcFSTools::parse_proc_mounts();
+
+ foreach my $size (qw(2048 1048576)) {
+ return if (! -d "/sys/kernel/mm/hugepages/hugepages-${size}kB");
+
+ my $path = "/run/hugepages/kvm/${size}kB";
+
+ my $found = grep {
+ $_->[2] =~ /^hugetlbfs/ &&
+ $_->[1] eq $path
+ } @$mountdata;
+
+ if (!$found) {
+
+ File::Path::make_path($path) if (!-d $path);
+ my $cmd = ['/bin/mount', '-t', 'hugetlbfs', '-o', "pagesize=${size}k", 'hugetlbfs', $path];
+ run_command($cmd, errmsg => "hugepage mount error");
+ }
+ }
+}
+
+sub hugepages_mount_path {
+ my ($size) = @_;
+
+ $size = $size * 1024;
+ return "/run/hugepages/kvm/${size}kB";
+
+}
+
+sub hugepages_nr {
+ my ($size, $hugepages_size) = @_;
+
+ return $size / $hugepages_size;
+}
+
+sub hugepages_size {
+ my ($conf, $size) = @_;
+
+ die "hugepages option is not enabled" if !$conf->{hugepages};
+
+ if ($conf->{hugepages} eq 'any') {
+
+ #try to use 1GB if available && memory size is matching
+ if (-d "/sys/kernel/mm/hugepages/hugepages-1048576kB" && ($size % 1024 == 0)) {
+ return 1024;
+ } else {
+ return 2;
+ }
+
+ } else {
+
+ my $hugepagesize = $conf->{hugepages} * 1024 . "kB";
+
+ if (! -d "/sys/kernel/mm/hugepages/hugepages-$hugepagesize") {
+ die "your system doesn't support hugepages of $hugepagesize";
+ }
+ die "Memory size $size is not a multiple of the requested hugepages size $hugepagesize" if ($size % $conf->{hugepages}) != 0;
+ return $conf->{hugepages};
+ }
+
+}
+
+sub hugepages_topology {
+ my ($conf) = @_;
+
+ my $hugepages_topology = {};
+
+ return if !$conf->{numa};
+
+ my $defaults = PVE::QemuServer::load_defaults();
+ my $memory = $conf->{memory} || $defaults->{memory};
+ my $static_memory = 0;
+ my $sockets = 1;
+ $sockets = $conf->{smp} if $conf->{smp}; # old style - no longer iused
+ $sockets = $conf->{sockets} if $conf->{sockets};
+ my $numa_custom_topology = undef;
+ my $hotplug_features = PVE::QemuServer::parse_hotplug_features(defined($conf->{hotplug}) ? $conf->{hotplug} : '1');
+
+ if ($hotplug_features->{memory}) {
+ $static_memory = $STATICMEM;
+ $static_memory = $static_memory * $sockets if ($conf->{hugepages} && $conf->{hugepages} == 1024);
+ } else {
+ $static_memory = $memory;
+ }
+
+ #custom numa topology
+ for (my $i = 0; $i < $MAX_NUMA; $i++) {
+ next if !$conf->{"numa$i"};
+ my $numa = PVE::QemuServer::parse_numa($conf->{"numa$i"});
+ next if !$numa;
+
+ $numa_custom_topology = 1;
+ my $numa_memory = $numa->{memory};
+
+ my $hugepages_size = hugepages_size($conf, $numa_memory);
+ $hugepages_topology->{$hugepages_size}->{$i} += hugepages_nr($numa_memory, $hugepages_size);
+
+ }
+
+ #if no custom numa tology, we split memory and cores across numa nodes
+ if(!$numa_custom_topology) {
+
+ my $numa_memory = ($static_memory / $sockets);
+
+ for (my $i = 0; $i < $sockets; $i++) {
+
+ my $hugepages_size = hugepages_size($conf, $numa_memory);
+ $hugepages_topology->{$hugepages_size}->{$i} += hugepages_nr($numa_memory, $hugepages_size);
+ }
+ }
+
+ if ($hotplug_features->{memory}) {
+ foreach_dimm($conf, undef, $memory, $sockets, sub {
+ my ($conf, undef, $name, $dimm_size, $numanode, $current_size, $memory) = @_;
+
+ my $hugepages_size = hugepages_size($conf, $dimm_size);
+ $hugepages_topology->{$hugepages_size}->{$numanode} += hugepages_nr($dimm_size, $hugepages_size);
+ });
+ }
+
+ return $hugepages_topology;
+}
+
+sub hugepages_host_topology {
+
+ #read host hugepages
+ my $hugepages_host_topology = {};
+
+ dir_glob_foreach("/sys/devices/system/node/", 'node(\d+)', sub {
+ my ($nodepath, $numanode) = @_;
+
+ dir_glob_foreach("/sys/devices/system/node/$nodepath/hugepages/", 'hugepages\-(\d+)kB', sub {
+ my ($hugepages_path, $hugepages_size) = @_;
+
+ $hugepages_size = $hugepages_size / 1024;
+ my $hugepages_nr = PVE::Tools::file_read_firstline("/sys/devices/system/node/$nodepath/hugepages/$hugepages_path/nr_hugepages");
+ $hugepages_host_topology->{$hugepages_size}->{$numanode} = $hugepages_nr;
+ });
+ });
+
+ return $hugepages_host_topology;
+}
+
+sub hugepages_allocate {
+ my ($hugepages_topology, $hugepages_host_topology) = @_;
+
+ #allocate new hupages if needed
+ foreach my $size (sort keys %$hugepages_topology) {
+
+ my $nodes = $hugepages_topology->{$size};
+
+ foreach my $numanode (keys %$nodes) {
+
+ my $hugepages_size = $size * 1024;
+ my $hugepages_requested = $hugepages_topology->{$size}->{$numanode};
+ my $path = "/sys/devices/system/node/node${numanode}/hugepages/hugepages-${hugepages_size}kB/";
+ my $hugepages_free = PVE::Tools::file_read_firstline($path."free_hugepages");
+ my $hugepages_nr = PVE::Tools::file_read_firstline($path."nr_hugepages");
+
+ if ($hugepages_requested > $hugepages_free) {
+ my $hugepages_needed = $hugepages_requested - $hugepages_free;
+ PVE::ProcFSTools::write_proc_entry($path."nr_hugepages", $hugepages_nr + $hugepages_needed);
+ #verify that is correctly allocated
+ $hugepages_free = PVE::Tools::file_read_firstline($path."free_hugepages");
+ if ($hugepages_free < $hugepages_requested) {
+ #rollback to initial host config
+ hugepages_reset($hugepages_host_topology);
+ die "hugepage allocation failed";
+ }
+ }
+
+ }
+ }
+
+}
+
+sub hugepages_pre_deallocate {
+ my ($hugepages_topology) = @_;
+
+ foreach my $size (sort keys %$hugepages_topology) {
+
+ my $hugepages_size = $size * 1024;
+ my $path = "/sys/kernel/mm/hugepages/hugepages-${hugepages_size}kB/";
+ my $hugepages_nr = PVE::Tools::file_read_firstline($path."nr_hugepages");
+ PVE::ProcFSTools::write_proc_entry($path."nr_hugepages", 0);
+ }
+}
+
+sub hugepages_reset {
+ my ($hugepages_topology) = @_;
+
+ foreach my $size (sort keys %$hugepages_topology) {
+
+ my $nodes = $hugepages_topology->{$size};
+ foreach my $numanode (keys %$nodes) {
+
+ my $hugepages_nr = $hugepages_topology->{$size}->{$numanode};
+ my $hugepages_size = $size * 1024;
+ my $path = "/sys/devices/system/node/node${numanode}/hugepages/hugepages-${hugepages_size}kB/";
+
+ PVE::ProcFSTools::write_proc_entry($path."nr_hugepages", $hugepages_nr);
+ }
+ }
+}
+
+sub hugepages_update_locked {
+ my ($code, @param) = @_;
+
+ my $timeout = 60; #could be long if a lot of hugepages need to be alocated
+
+ my $lock_filename = "/var/lock/hugepages.lck";
+
+ my $res = lock_file($lock_filename, $timeout, $code, @param);
+ die $@ if $@;
+
+ return $res;
+}