my $ZPOOL = "/sbin/zpool";
my $SGDISK = "/sbin/sgdisk";
my $PVS = "/sbin/pvs";
+my $LVS = "/sbin/lvs";
my $UDEVADM = "/bin/udevadm";
sub verify_blockdev_path {
} elsif (defined($type) && $type eq 'text') {
$smartdata->{text} = '' if !defined $smartdata->{text};
$smartdata->{text} .= "$line\n";
+ # extract wearout from nvme text, allow for decimal values
+ if ($line =~ m/Percentage Used:.*(\d+(?:\.\d+)?)\%/i) {
+ $smartdata->{wearout} = 100 - $1;
+ }
} elsif ($line =~ m/SMART Disabled/) {
$smartdata->{health} = "SMART Disabled";
}
sub get_zfs_devices {
my $list = {};
+ return {} if ! -x $ZPOOL;
+
# use zpool and parttype uuid,
# because log and cache do not have
# zfs type uuid
return $journalhash;
}
+# reads the lv_tags and matches them with the devices
+sub get_ceph_volume_infos {
+ my $result = {};
+
+ my $cmd = [ $LVS, '-S', 'lv_name=~^osd-', '-o', 'devices,lv_name,lv_tags',
+ '--noheadings', '--readonly', '--separator', ';' ];
+
+ run_command($cmd, outfunc => sub {
+ my $line = shift;
+ $line =~ s/(?:^\s+)|(?:\s+$)//g; # trim whitespaces
+
+ my $fields = [ split(';', $line) ];
+
+ # lvs syntax is /dev/sdX(Y) where Y is the start (which we do not need)
+ my ($dev) = $fields->[0] =~ m|^(/dev/[a-z]+)|;
+ if ($fields->[1] =~ m|^osd-([^-]+)-|) {
+ my $type = $1;
+ # $result autovivification is wanted, to not creating empty hashes
+ if (($type eq 'block' || $type eq 'data') && $fields->[2] =~ m/ceph.osd_id=([^,])/) {
+ $result->{$dev}->{osdid} = $1;
+ $result->{$dev}->{bluestore} = ($type eq 'block');
+ } else {
+ # undef++ becomes '1' (see `perldoc perlop`: Auto-increment)
+ $result->{$dev}->{$type}++;
+ }
+ }
+ });
+
+ return $result;
+}
+
sub get_udev_info {
my ($dev) = @_;
}
sub get_wear_leveling_info {
- my ($attributes, $model) = @_;
+ my ($smartdata, $model) = @_;
+ my $attributes = $smartdata->{attributes};
+
+ if (defined($smartdata->{wearout})) {
+ return $smartdata->{wearout};
+ }
my $wearout;
}
sub get_disks {
- my ($disk, $nosmart) = @_;
+ my ($disks, $nosmart) = @_;
my $disklist = {};
my $mounted = {};
};
my $journalhash = get_ceph_journals();
+ my $ceph_volume_infos = get_ceph_volume_infos();
my $zfslist = get_zfs_devices();
my $lvmlist = get_lvm_devices();
- # we get cciss/c0d0 but need cciss!c0d0
- if (defined($disk) && $disk =~ m|^cciss/|) {
- $disk =~ s|cciss/|cciss!|;
+ my $disk_regex = ".*";
+ if (defined($disks)) {
+ if (!ref($disks)) {
+ $disks = [ $disks ];
+ } elsif (ref($disks) ne 'ARRAY') {
+ die "disks is not a string or array reference\n";
+ }
+ # we get cciss/c0d0 but need cciss!c0d0
+ map { s|cciss/|cciss!| } @$disks;
+
+ $disk_regex = "(?:" . join('|', @$disks) . ")";
}
- dir_glob_foreach('/sys/block', '.*', sub {
+ dir_glob_foreach('/sys/block', $disk_regex, sub {
my ($dev) = @_;
- return if defined($disk) && $disk ne $dev;
# whitelisting following devices
# hdX: ide block device
# sdX: sd block device
if ($type eq 'ssd') {
# if we have an ssd we try to get the wearout indicator
- my $wearval = get_wear_leveling_info($smartdata->{attributes}, $data->{model} || $sysdir->{model});
+ my $wearval = get_wear_leveling_info($smartdata, $data->{model} || $sysdata->{model});
$wearout = $wearval if $wearval;
}
};
$found_zfs = 1;
}
- if ($journalhash->{"$partpath/$part"}) {
- $journal_count++ if $journalhash->{"$partpath/$part"} == 1;
- $db_count++ if $journalhash->{"$partpath/$part"} == 2;
- $wal_count++ if $journalhash->{"$partpath/$part"} == 3;
- $bluestore = 1 if $journalhash->{"$partpath/$part"} == 4;
+ if (my $journal_part = $journalhash->{"$partpath/$part"}) {
+ $journal_count++ if $journal_part == 1;
+ $db_count++ if $journal_part == 2;
+ $wal_count++ if $journal_part == 3;
+ $bluestore = 1 if $journal_part == 4;
}
if (!dir_is_empty("$sysdir/$part/holders") && !$found_lvm) {
}
});
+ if (my $ceph_volume = $ceph_volume_infos->{$devpath}) {
+ $journal_count += $ceph_volume->{journal} // 0;
+ $db_count += $ceph_volume->{db} // 0;
+ $wal_count += $ceph_volume->{wal} // 0;
+ if ($ceph_volume->{osdid}) {
+ $osdid = $ceph_volume->{osdid};
+ $bluestore = 1 if $ceph_volume->{bluestore};
+ }
+ }
+
$used = 'mounted' if $found_mountpoints && !$used;
$used = 'LVM' if $found_lvm && !$used;
$used = 'ZFS' if $found_zfs && !$used;
return undef;
}
+sub append_partition {
+ my ($dev, $size) = @_;
+
+ my $devname = $dev;
+ $devname =~ s|^/dev/||;
+
+ my $newpartid = 1;
+ dir_glob_foreach("/sys/block/$devname", qr/\Q$devname\E.*?(\d+)/, sub {
+ my ($part, $partid) = @_;
+
+ if ($partid >= $newpartid) {
+ $newpartid = $partid + 1;
+ }
+ });
+
+ $size = PVE::Tools::convert_size($size, 'b' => 'mb');
+
+ run_command([ $SGDISK, '-n', "$newpartid:0:+${size}M", $dev ],
+ errmsg => "error creating partition '$newpartid' on '$dev'");
+
+ my $partition;
+
+ # loop again to detect the real partiton device which does not always follow
+ # a strict $devname$partition scheme like /dev/nvme0n1 -> /dev/nvme0n1p1
+ dir_glob_foreach("/sys/block/$devname", qr/\Q$devname\E.*$newpartid/, sub {
+ my ($part) = @_;
+
+ $partition = "/dev/$part";
+ });
+
+ return $partition;
+}
+
1;