]> git.proxmox.com Git - ceph.git/blob - ceph/src/ceph-volume/ceph_volume/util/disk.py
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / ceph-volume / ceph_volume / util / disk.py
1 import logging
2 import os
3 import re
4 import stat
5 from ceph_volume import process
6 from ceph_volume.api import lvm
7 from ceph_volume.util.system import get_file_contents
8
9
10 logger = logging.getLogger(__name__)
11
12
13 # The blkid CLI tool has some oddities which prevents having one common call
14 # to extract the information instead of having separate utilities. The `udev`
15 # type of output is needed in older versions of blkid (v 2.23) that will not
16 # work correctly with just the ``-p`` flag to bypass the cache for example.
17 # Xenial doesn't have this problem as it uses a newer blkid version.
18
19
20 def get_partuuid(device):
21 """
22 If a device is a partition, it will probably have a PARTUUID on it that
23 will persist and can be queried against `blkid` later to detect the actual
24 device
25 """
26 out, err, rc = process.call(
27 ['blkid', '-s', 'PARTUUID', '-o', 'value', device]
28 )
29 return ' '.join(out).strip()
30
31
32 def _blkid_parser(output):
33 """
34 Parses the output from a system ``blkid`` call, requires output to be
35 produced using the ``-p`` flag which bypasses the cache, mangling the
36 names. These names are corrected to what it would look like without the
37 ``-p`` flag.
38
39 Normal output::
40
41 /dev/sdb1: UUID="62416664-cbaf-40bd-9689-10bd337379c3" TYPE="xfs" [...]
42 """
43 # first spaced separated item is garbage, gets tossed:
44 output = ' '.join(output.split()[1:])
45 # split again, respecting possible whitespace in quoted values
46 pairs = output.split('" ')
47 raw = {}
48 processed = {}
49 mapping = {
50 'UUID': 'UUID',
51 'TYPE': 'TYPE',
52 'PART_ENTRY_NAME': 'PARTLABEL',
53 'PART_ENTRY_UUID': 'PARTUUID',
54 'PTTYPE': 'PTTYPE',
55 }
56
57 for pair in pairs:
58 try:
59 column, value = pair.split('=')
60 except ValueError:
61 continue
62 raw[column] = value.strip().strip().strip('"')
63
64 for key, value in raw.items():
65 new_key = mapping.get(key)
66 if not new_key:
67 continue
68 processed[new_key] = value
69
70 return processed
71
72
73 def blkid(device):
74 """
75 The blkid interface to its CLI, creating an output similar to what is
76 expected from ``lsblk``. In most cases, ``lsblk()`` should be the preferred
77 method for extracting information about a device. There are some corner
78 cases where it might provide information that is otherwise unavailable.
79
80 The system call uses the ``-p`` flag which bypasses the cache, the caveat
81 being that the keys produced are named completely different to expected
82 names.
83
84 For example, instead of ``PARTLABEL`` it provides a ``PART_ENTRY_NAME``.
85 A bit of translation between these known keys is done, which is why
86 ``lsblk`` should always be preferred: the output provided here is not as
87 rich, given that a translation of keys is required for a uniform interface
88 with the ``-p`` flag.
89
90 Label name to expected output chart:
91
92 cache bypass name expected name
93
94 UUID UUID
95 TYPE TYPE
96 PART_ENTRY_NAME PARTLABEL
97 PART_ENTRY_UUID PARTUUID
98 """
99 out, err, rc = process.call(
100 ['blkid', '-p', device]
101 )
102 return _blkid_parser(' '.join(out))
103
104
105 def get_part_entry_type(device):
106 """
107 Parses the ``ID_PART_ENTRY_TYPE`` from the "low level" (bypasses the cache)
108 output that uses the ``udev`` type of output. This output is intended to be
109 used for udev rules, but it is useful in this case as it is the only
110 consistent way to retrieve the GUID used by ceph-disk to identify devices.
111 """
112 out, err, rc = process.call(['blkid', '-p', '-o', 'udev', device])
113 for line in out:
114 if 'ID_PART_ENTRY_TYPE=' in line:
115 return line.split('=')[-1].strip()
116 return ''
117
118
119 def get_device_from_partuuid(partuuid):
120 """
121 If a device has a partuuid, query blkid so that it can tell us what that
122 device is
123 """
124 out, err, rc = process.call(
125 ['blkid', '-t', 'PARTUUID="%s"' % partuuid, '-o', 'device']
126 )
127 return ' '.join(out).strip()
128
129
130 def remove_partition(device):
131 """
132 Removes a partition using parted
133
134 :param device: A ``Device()`` object
135 """
136 parent_device = '/dev/%s' % device.disk_api['PKNAME']
137 udev_info = udevadm_property(device.abspath)
138 partition_number = udev_info.get('ID_PART_ENTRY_NUMBER')
139 if not partition_number:
140 raise RuntimeError('Unable to detect the partition number for device: %s' % device.abspath)
141
142 process.run(
143 ['parted', parent_device, '--script', '--', 'rm', partition_number]
144 )
145
146
147 def _stat_is_device(stat_obj):
148 """
149 Helper function that will interpret ``os.stat`` output directly, so that other
150 functions can call ``os.stat`` once and interpret that result several times
151 """
152 return stat.S_ISBLK(stat_obj)
153
154
155 def _lsblk_parser(line):
156 """
157 Parses lines in lsblk output. Requires output to be in pair mode (``-P`` flag). Lines
158 need to be whole strings, the line gets split when processed.
159
160 :param line: A string, with the full line from lsblk output
161 """
162 # parse the COLUMN="value" output to construct the dictionary
163 pairs = line.split('" ')
164 parsed = {}
165 for pair in pairs:
166 try:
167 column, value = pair.split('=')
168 except ValueError:
169 continue
170 parsed[column] = value.strip().strip().strip('"')
171 return parsed
172
173
174 def device_family(device):
175 """
176 Returns a list of associated devices. It assumes that ``device`` is
177 a parent device. It is up to the caller to ensure that the device being
178 used is a parent, not a partition.
179 """
180 labels = ['NAME', 'PARTLABEL', 'TYPE']
181 command = ['lsblk', '-P', '-p', '-o', ','.join(labels), device]
182 out, err, rc = process.call(command)
183 devices = []
184 for line in out:
185 devices.append(_lsblk_parser(line))
186
187 return devices
188
189
190 def udevadm_property(device, properties=[]):
191 """
192 Query udevadm for information about device properties.
193 Optionally pass a list of properties to return. A requested property might
194 not be returned if not present.
195
196 Expected output format::
197 # udevadm info --query=property --name=/dev/sda :(
198 DEVNAME=/dev/sda
199 DEVTYPE=disk
200 ID_ATA=1
201 ID_BUS=ata
202 ID_MODEL=SK_hynix_SC311_SATA_512GB
203 ID_PART_TABLE_TYPE=gpt
204 ID_PART_TABLE_UUID=c8f91d57-b26c-4de1-8884-0c9541da288c
205 ID_PATH=pci-0000:00:17.0-ata-3
206 ID_PATH_TAG=pci-0000_00_17_0-ata-3
207 ID_REVISION=70000P10
208 ID_SERIAL=SK_hynix_SC311_SATA_512GB_MS83N71801150416A
209 TAGS=:systemd:
210 USEC_INITIALIZED=16117769
211 ...
212 """
213 out = _udevadm_info(device)
214 ret = {}
215 for line in out:
216 p, v = line.split('=', 1)
217 if not properties or p in properties:
218 ret[p] = v
219 return ret
220
221
222 def _udevadm_info(device):
223 """
224 Call udevadm and return the output
225 """
226 cmd = ['udevadm', 'info', '--query=property', device]
227 out, _err, _rc = process.call(cmd)
228 return out
229
230
231 def lsblk(device, columns=None, abspath=False):
232 """
233 Create a dictionary of identifying values for a device using ``lsblk``.
234 Each supported column is a key, in its *raw* format (all uppercase
235 usually). ``lsblk`` has support for certain "columns" (in blkid these
236 would be labels), and these columns vary between distributions and
237 ``lsblk`` versions. The newer versions support a richer set of columns,
238 while older ones were a bit limited.
239
240 These are a subset of lsblk columns which are known to work on both CentOS 7 and Xenial:
241
242 NAME device name
243 KNAME internal kernel device name
244 MAJ:MIN major:minor device number
245 FSTYPE filesystem type
246 MOUNTPOINT where the device is mounted
247 LABEL filesystem LABEL
248 UUID filesystem UUID
249 RO read-only device
250 RM removable device
251 MODEL device identifier
252 SIZE size of the device
253 STATE state of the device
254 OWNER user name
255 GROUP group name
256 MODE device node permissions
257 ALIGNMENT alignment offset
258 MIN-IO minimum I/O size
259 OPT-IO optimal I/O size
260 PHY-SEC physical sector size
261 LOG-SEC logical sector size
262 ROTA rotational device
263 SCHED I/O scheduler name
264 RQ-SIZE request queue size
265 TYPE device type
266 PKNAME internal parent kernel device name
267 DISC-ALN discard alignment offset
268 DISC-GRAN discard granularity
269 DISC-MAX discard max bytes
270 DISC-ZERO discard zeroes data
271
272 There is a bug in ``lsblk`` where using all the available (supported)
273 columns will result in no output (!), in order to workaround this the
274 following columns have been removed from the default reporting columns:
275
276 * RQ-SIZE (request queue size)
277 * MIN-IO minimum I/O size
278 * OPT-IO optimal I/O size
279
280 These should be available however when using `columns`. For example::
281
282 >>> lsblk('/dev/sda1', columns=['OPT-IO'])
283 {'OPT-IO': '0'}
284
285 Normal CLI output, as filtered by the flags in this function will look like ::
286
287 $ lsblk --nodeps -P -o NAME,KNAME,MAJ:MIN,FSTYPE,MOUNTPOINT
288 NAME="sda1" KNAME="sda1" MAJ:MIN="8:1" FSTYPE="ext4" MOUNTPOINT="/"
289
290 :param columns: A list of columns to report as keys in its original form.
291 :param abspath: Set the flag for absolute paths on the report
292 """
293 default_columns = [
294 'NAME', 'KNAME', 'MAJ:MIN', 'FSTYPE', 'MOUNTPOINT', 'LABEL', 'UUID',
295 'RO', 'RM', 'MODEL', 'SIZE', 'STATE', 'OWNER', 'GROUP', 'MODE',
296 'ALIGNMENT', 'PHY-SEC', 'LOG-SEC', 'ROTA', 'SCHED', 'TYPE', 'DISC-ALN',
297 'DISC-GRAN', 'DISC-MAX', 'DISC-ZERO', 'PKNAME', 'PARTLABEL'
298 ]
299 device = device.rstrip('/')
300 columns = columns or default_columns
301 # --nodeps -> Avoid adding children/parents to the device, only give information
302 # on the actual device we are querying for
303 # -P -> Produce pairs of COLUMN="value"
304 # -p -> Return full paths to devices, not just the names, when ``abspath`` is set
305 # -o -> Use the columns specified or default ones provided by this function
306 base_command = ['lsblk', '--nodeps', '-P']
307 if abspath:
308 base_command.append('-p')
309 base_command.append('-o')
310 base_command.append(','.join(columns))
311 base_command.append(device)
312 out, err, rc = process.call(base_command)
313
314 if rc != 0:
315 return {}
316
317 return _lsblk_parser(' '.join(out))
318
319
320 def is_device(dev):
321 """
322 Boolean to determine if a given device is a block device (**not**
323 a partition!)
324
325 For example: /dev/sda would return True, but not /dev/sdc1
326 """
327 if not os.path.exists(dev):
328 return False
329 # use lsblk first, fall back to using stat
330 TYPE = lsblk(dev).get('TYPE')
331 if TYPE:
332 return TYPE == 'disk'
333
334 # fallback to stat
335 return _stat_is_device(os.lstat(dev).st_mode)
336 if stat.S_ISBLK(os.lstat(dev)):
337 return True
338 return False
339
340
341 def is_partition(dev):
342 """
343 Boolean to determine if a given device is a partition, like /dev/sda1
344 """
345 if not os.path.exists(dev):
346 return False
347 # use lsblk first, fall back to using stat
348 TYPE = lsblk(dev).get('TYPE')
349 if TYPE:
350 return TYPE == 'part'
351
352 # fallback to stat
353 stat_obj = os.stat(dev)
354 if _stat_is_device(stat_obj.st_mode):
355 return False
356
357 major = os.major(stat_obj.st_rdev)
358 minor = os.minor(stat_obj.st_rdev)
359 if os.path.exists('/sys/dev/block/%d:%d/partition' % (major, minor)):
360 return True
361 return False
362
363
364 def _map_dev_paths(_path, include_abspath=False, include_realpath=False):
365 """
366 Go through all the items in ``_path`` and map them to their absolute path::
367
368 {'sda': '/dev/sda'}
369
370 If ``include_abspath`` is set, then a reverse mapping is set as well::
371
372 {'sda': '/dev/sda', '/dev/sda': 'sda'}
373
374 If ``include_realpath`` is set then the same operation is done for any
375 links found when listing, these are *not* reversed to avoid clashing on
376 existing keys, but both abspath and basename can be included. For example::
377
378 {
379 'ceph-data': '/dev/mapper/ceph-data',
380 '/dev/mapper/ceph-data': 'ceph-data',
381 '/dev/dm-0': '/dev/mapper/ceph-data',
382 'dm-0': '/dev/mapper/ceph-data'
383 }
384
385
386 In case of possible exceptions the mapping is returned empty, and the
387 exception is logged.
388 """
389 mapping = {}
390 try:
391 dev_names = os.listdir(_path)
392 except (OSError, IOError):
393 logger.exception('unable to list block devices from: %s' % _path)
394 return {}
395
396 for dev_name in dev_names:
397 mapping[dev_name] = os.path.join(_path, dev_name)
398
399 if include_abspath:
400 for k, v in list(mapping.items()):
401 mapping[v] = k
402
403 if include_realpath:
404 for abspath in list(mapping.values()):
405 if not os.path.islink(abspath):
406 continue
407
408 realpath = os.path.realpath(abspath)
409 basename = os.path.basename(realpath)
410 mapping[basename] = abspath
411 if include_abspath:
412 mapping[realpath] = abspath
413
414 return mapping
415
416
417 def get_block_devs(sys_block_path="/sys/block", skip_loop=True):
418 """
419 Go through all the items in /sys/block and return them as a list.
420
421 The ``sys_block_path`` argument is set for easier testing and is not
422 required for proper operation.
423 """
424 devices = _map_dev_paths(sys_block_path).keys()
425 if skip_loop:
426 return [d for d in devices if not d.startswith('loop')]
427 return list(devices)
428
429
430 def get_dev_devs(dev_path="/dev"):
431 """
432 Go through all the items in /dev and return them as a list.
433
434 The ``dev_path`` argument is set for easier testing and is not
435 required for proper operation.
436 """
437 return _map_dev_paths(dev_path, include_abspath=True)
438
439
440 def get_mapper_devs(mapper_path="/dev/mapper"):
441 """
442 Go through all the items in /dev and return them as a list.
443
444 The ``dev_path`` argument is set for easier testing and is not
445 required for proper operation.
446 """
447 return _map_dev_paths(mapper_path, include_abspath=True, include_realpath=True)
448
449
450 class BaseFloatUnit(float):
451 """
452 Base class to support float representations of size values. Suffix is
453 computed on child classes by inspecting the class name
454 """
455
456 def __repr__(self):
457 return "<%s(%s)>" % (self.__class__.__name__, self.__float__())
458
459 def __str__(self):
460 return "{size:.2f} {suffix}".format(
461 size=self.__float__(),
462 suffix=self.__class__.__name__.split('Float')[-1]
463 )
464
465 def as_int(self):
466 return int(self.real)
467
468 def as_float(self):
469 return self.real
470
471
472 class FloatB(BaseFloatUnit):
473 pass
474
475
476 class FloatMB(BaseFloatUnit):
477 pass
478
479
480 class FloatGB(BaseFloatUnit):
481 pass
482
483
484 class FloatKB(BaseFloatUnit):
485 pass
486
487
488 class FloatTB(BaseFloatUnit):
489 pass
490
491
492 class Size(object):
493 """
494 Helper to provide an interface for different sizes given a single initial
495 input. Allows for comparison between different size objects, which avoids
496 the need to convert sizes before comparison (e.g. comparing megabytes
497 against gigabytes).
498
499 Common comparison operators are supported::
500
501 >>> hd1 = Size(gb=400)
502 >>> hd2 = Size(gb=500)
503 >>> hd1 > hd2
504 False
505 >>> hd1 < hd2
506 True
507 >>> hd1 == hd2
508 False
509 >>> hd1 == Size(gb=400)
510 True
511
512 The Size object can also be multiplied or divided::
513
514 >>> hd1
515 <Size(400.00 GB)>
516 >>> hd1 * 2
517 <Size(800.00 GB)>
518 >>> hd1
519 <Size(800.00 GB)>
520
521 Additions and subtractions are only supported between Size objects::
522
523 >>> Size(gb=224) - Size(gb=100)
524 <Size(124.00 GB)>
525 >>> Size(gb=1) + Size(mb=300)
526 <Size(1.29 GB)>
527
528 Can also display a human-readable representation, with automatic detection
529 on best suited unit, or alternatively, specific unit representation::
530
531 >>> s = Size(mb=2211)
532 >>> s
533 <Size(2.16 GB)>
534 >>> s.mb
535 <FloatMB(2211.0)>
536 >>> print "Total size: %s" % s.mb
537 Total size: 2211.00 MB
538 >>> print "Total size: %s" % s
539 Total size: 2.16 GB
540 """
541
542 def __init__(self, multiplier=1024, **kw):
543 self._multiplier = multiplier
544 # create a mapping of units-to-multiplier, skip bytes as that is
545 # calculated initially always and does not need to convert
546 aliases = [
547 [('kb', 'kilobytes'), self._multiplier],
548 [('mb', 'megabytes'), self._multiplier ** 2],
549 [('gb', 'gigabytes'), self._multiplier ** 3],
550 [('tb', 'terabytes'), self._multiplier ** 4],
551 ]
552 # and mappings for units-to-formatters, including bytes and aliases for
553 # each
554 format_aliases = [
555 [('b', 'bytes'), FloatB],
556 [('kb', 'kilobytes'), FloatKB],
557 [('mb', 'megabytes'), FloatMB],
558 [('gb', 'gigabytes'), FloatGB],
559 [('tb', 'terabytes'), FloatTB],
560 ]
561 self._formatters = {}
562 for key, value in format_aliases:
563 for alias in key:
564 self._formatters[alias] = value
565 self._factors = {}
566 for key, value in aliases:
567 for alias in key:
568 self._factors[alias] = value
569
570 for k, v in kw.items():
571 self._convert(v, k)
572 # only pursue the first occurrence
573 break
574
575 def _convert(self, size, unit):
576 """
577 Convert any size down to bytes so that other methods can rely on bytes
578 being available always, regardless of what they pass in, avoiding the
579 need for a mapping of every permutation.
580 """
581 if unit in ['b', 'bytes']:
582 self._b = size
583 return
584 factor = self._factors[unit]
585 self._b = float(size * factor)
586
587 def _get_best_format(self):
588 """
589 Go through all the supported units, and use the first one that is less
590 than 1024. This allows to represent size in the most readable format
591 available
592 """
593 for unit in ['b', 'kb', 'mb', 'gb', 'tb']:
594 if getattr(self, unit) > 1024:
595 continue
596 return getattr(self, unit)
597
598 def __repr__(self):
599 return "<Size(%s)>" % self._get_best_format()
600
601 def __str__(self):
602 return "%s" % self._get_best_format()
603
604 def __format__(self, spec):
605 return str(self._get_best_format()).__format__(spec)
606
607 def __lt__(self, other):
608 return self._b < other._b
609
610 def __le__(self, other):
611 return self._b <= other._b
612
613 def __eq__(self, other):
614 return self._b == other._b
615
616 def __ne__(self, other):
617 return self._b != other._b
618
619 def __ge__(self, other):
620 return self._b >= other._b
621
622 def __gt__(self, other):
623 return self._b > other._b
624
625 def __add__(self, other):
626 if isinstance(other, Size):
627 _b = self._b + other._b
628 return Size(b=_b)
629 raise TypeError('Cannot add "Size" object with int')
630
631 def __sub__(self, other):
632 if isinstance(other, Size):
633 _b = self._b - other._b
634 return Size(b=_b)
635 raise TypeError('Cannot subtract "Size" object from int')
636
637 def __mul__(self, other):
638 if isinstance(other, Size):
639 raise TypeError('Cannot multiply with "Size" object')
640 _b = self._b * other
641 return Size(b=_b)
642
643 def __truediv__(self, other):
644 if isinstance(other, Size):
645 return self._b / other._b
646 _b = self._b / other
647 return Size(b=_b)
648
649 def __div__(self, other):
650 if isinstance(other, Size):
651 return self._b / other._b
652 _b = self._b / other
653 return Size(b=_b)
654
655 def __getattr__(self, unit):
656 """
657 Calculate units on the fly, relies on the fact that ``bytes`` has been
658 converted at instantiation. Units that don't exist will trigger an
659 ``AttributeError``
660 """
661 try:
662 formatter = self._formatters[unit]
663 except KeyError:
664 raise AttributeError('Size object has not attribute "%s"' % unit)
665 if unit in ['b', 'bytes']:
666 return formatter(self._b)
667 try:
668 factor = self._factors[unit]
669 except KeyError:
670 raise AttributeError('Size object has not attribute "%s"' % unit)
671 return formatter(float(self._b) / factor)
672
673
674 def human_readable_size(size):
675 """
676 Take a size in bytes, and transform it into a human readable size with up
677 to two decimals of precision.
678 """
679 suffixes = ['B', 'KB', 'MB', 'GB', 'TB']
680 suffix_index = 0
681 while size > 1024:
682 suffix_index += 1
683 size = size / 1024.0
684 return "{size:.2f} {suffix}".format(
685 size=size,
686 suffix=suffixes[suffix_index])
687
688
689 def get_partitions_facts(sys_block_path):
690 partition_metadata = {}
691 for folder in os.listdir(sys_block_path):
692 folder_path = os.path.join(sys_block_path, folder)
693 if os.path.exists(os.path.join(folder_path, 'partition')):
694 contents = get_file_contents(os.path.join(folder_path, 'partition'))
695 if contents:
696 part = {}
697 partname = folder
698 part_sys_block_path = os.path.join(sys_block_path, partname)
699
700 part['start'] = get_file_contents(part_sys_block_path + "/start", 0)
701 part['sectors'] = get_file_contents(part_sys_block_path + "/size", 0)
702
703 part['sectorsize'] = get_file_contents(
704 part_sys_block_path + "/queue/logical_block_size")
705 if not part['sectorsize']:
706 part['sectorsize'] = get_file_contents(
707 part_sys_block_path + "/queue/hw_sector_size", 512)
708 part['size'] = human_readable_size(float(part['sectors']) * 512)
709 part['holders'] = []
710 for holder in os.listdir(part_sys_block_path + '/holders'):
711 part['holders'].append(holder)
712
713 partition_metadata[partname] = part
714 return partition_metadata
715
716
717 def is_mapper_device(device_name):
718 return device_name.startswith(('/dev/mapper', '/dev/dm-'))
719
720
721 def is_locked_raw_device(disk_path):
722 """
723 A device can be locked by a third party software like a database.
724 To detect that case, the device is opened in Read/Write and exclusive mode
725 """
726 open_flags = (os.O_RDWR | os.O_EXCL)
727 open_mode = 0
728 fd = None
729
730 try:
731 fd = os.open(disk_path, open_flags, open_mode)
732 except OSError:
733 return 1
734
735 try:
736 os.close(fd)
737 except OSError:
738 return 1
739
740 return 0
741
742
743 def get_devices(_sys_block_path='/sys/block', _dev_path='/dev', _mapper_path='/dev/mapper'):
744 """
745 Captures all available devices from /sys/block/, including its partitions,
746 along with interesting metadata like sectors, size, vendor,
747 solid/rotational, etc...
748
749 Returns a dictionary, where keys are the full paths to devices.
750
751 ..note:: dmapper devices get their path updated to what they link from, if
752 /dev/dm-0 is linked by /dev/mapper/ceph-data, then the latter gets
753 used as the key.
754
755 ..note:: loop devices, removable media, and logical volumes are never included.
756 """
757 # Portions of this detection process are inspired by some of the fact
758 # gathering done by Ansible in module_utils/facts/hardware/linux.py. The
759 # processing of metadata and final outcome *is very different* and fully
760 # imcompatible. There are ignored devices, and paths get resolved depending
761 # on dm devices, loop, and removable media
762
763 device_facts = {}
764
765 block_devs = get_block_devs(_sys_block_path)
766 dev_devs = get_dev_devs(_dev_path)
767 mapper_devs = get_mapper_devs(_mapper_path)
768
769 for block in block_devs:
770 sysdir = os.path.join(_sys_block_path, block)
771 metadata = {}
772
773 # Ensure that the diskname is an absolute path and that it never points
774 # to a /dev/dm-* device
775 diskname = mapper_devs.get(block) or dev_devs.get(block)
776 if not diskname:
777 continue
778
779 # If the mapper device is a logical volume it gets excluded
780 if is_mapper_device(diskname):
781 if lvm.is_lv(diskname):
782 continue
783
784 metadata['removable'] = get_file_contents(os.path.join(sysdir, 'removable'))
785 # Is the device read-only ?
786 metadata['ro'] = get_file_contents(os.path.join(sysdir, 'ro'))
787
788
789 for key in ['vendor', 'model', 'rev', 'sas_address', 'sas_device_handle']:
790 metadata[key] = get_file_contents(sysdir + "/device/" + key)
791
792 for key in ['sectors', 'size']:
793 metadata[key] = get_file_contents(os.path.join(sysdir, key), 0)
794
795 for key, _file in [('support_discard', '/queue/discard_granularity')]:
796 metadata[key] = get_file_contents(os.path.join(sysdir, _file))
797
798 metadata['partitions'] = get_partitions_facts(sysdir)
799
800 for key in ['rotational', 'nr_requests']:
801 metadata[key] = get_file_contents(sysdir + "/queue/" + key)
802
803 metadata['scheduler_mode'] = ""
804 scheduler = get_file_contents(sysdir + "/queue/scheduler")
805 if scheduler is not None:
806 m = re.match(r".*?(\[(.*)\])", scheduler)
807 if m:
808 metadata['scheduler_mode'] = m.group(2)
809
810 if not metadata['sectors']:
811 metadata['sectors'] = 0
812 size = metadata['sectors'] or metadata['size']
813 metadata['sectorsize'] = get_file_contents(sysdir + "/queue/logical_block_size")
814 if not metadata['sectorsize']:
815 metadata['sectorsize'] = get_file_contents(sysdir + "/queue/hw_sector_size", 512)
816 metadata['human_readable_size'] = human_readable_size(float(size) * 512)
817 metadata['size'] = float(size) * 512
818 metadata['path'] = diskname
819 metadata['locked'] = is_locked_raw_device(metadata['path'])
820
821 device_facts[diskname] = metadata
822 return device_facts