]>
git.proxmox.com Git - ceph.git/blob - ceph/src/ceph-volume/ceph_volume/util/device.py
1 # -*- coding: utf-8 -*-
4 from functools
import total_ordering
5 from ceph_volume
import sys_info
, process
6 from ceph_volume
.api
import lvm
7 from ceph_volume
.util
import disk
, system
8 from ceph_volume
.util
.lsmdisk
import LSMDisk
9 from ceph_volume
.util
.constants
import ceph_disk_guids
12 {dev:<25} {size:<12} {rot!s:<7} {available!s:<9} {model}"""
15 def encryption_status(abspath
):
17 Helper function to run ``encryption.status()``. It is done here to avoid
18 a circular import issue (encryption module imports from this module) and to
19 ease testing by allowing monkeypatching of this function.
21 from ceph_volume
.util
import encryption
22 return encryption
.status(abspath
)
25 class Devices(object):
27 A container for Device instances with reporting
30 def __init__(self
, filter_for_batch
=False, with_lsm
=False):
31 if not sys_info
.devices
:
32 sys_info
.devices
= disk
.get_devices()
33 self
.devices
= [Device(k
, with_lsm
) for k
in
34 sys_info
.devices
.keys()]
36 self
.devices
= [d
for d
in self
.devices
if d
.available_lvm_batch
]
38 def pretty_report(self
):
40 report_template
.format(
45 available
='available',
47 for device
in sorted(self
.devices
):
48 output
.append(device
.report())
49 return ''.join(output
)
51 def json_report(self
):
53 for device
in sorted(self
.devices
):
54 output
.append(device
.json_report())
71 pretty_report_sys_fields
= [
72 'human_readable_size',
82 # define some class variables; mostly to enable the use of autospec in
86 def __init__(self
, path
, with_lsm
=False):
88 # LVs can have a vg/lv path, while disks will have /dev/sda
99 self
._is
_lvm
_member
= None
101 self
.lsm_data
= self
.fetch_lsm(with_lsm
)
103 self
.available_lvm
, self
.rejected_reasons_lvm
= self
._check
_lvm
_reject
_reasons
()
104 self
.available_raw
, self
.rejected_reasons_raw
= self
._check
_raw
_reject
_reasons
()
105 self
.available
= self
.available_lvm
and self
.available_raw
106 self
.rejected_reasons
= list(set(self
.rejected_reasons_lvm
+
107 self
.rejected_reasons_raw
))
109 self
.device_id
= self
._get
_device
_id
()
111 def fetch_lsm(self
, with_lsm
):
113 Attempt to fetch libstoragemgmt (LSM) metadata, and return to the caller
114 as a dict. An empty dict is passed back to the caller if the target path
115 is not a block device, or lsm is unavailable on the host. Otherwise the
116 json returned will provide LSM attributes, and any associated errors that
117 lsm encountered when probing the device.
119 if not with_lsm
or not self
.exists
or not self
.is_device
:
122 lsm_disk
= LSMDisk(self
.path
)
124 return lsm_disk
.json_report()
126 def __lt__(self
, other
):
128 Implementing this method and __eq__ allows the @total_ordering
129 decorator to turn the Device class into a totally ordered type.
130 This can slower then implementing all comparison operations.
131 This sorting should put available devices before unavailable devices
132 and sort on the path otherwise (str sorting).
134 if self
.available
== other
.available
:
135 return self
.path
< other
.path
136 return self
.available
and not other
.available
138 def __eq__(self
, other
):
139 return self
.path
== other
.path
142 return hash(self
.path
)
145 if not sys_info
.devices
:
146 sys_info
.devices
= disk
.get_devices()
147 self
.sys_api
= sys_info
.devices
.get(self
.abspath
, {})
149 # if no device was found check if we are a partition
150 partname
= self
.abspath
.split('/')[-1]
151 for device
, info
in sys_info
.devices
.items():
152 part
= info
['partitions'].get(partname
, {})
157 # if the path is not absolute, we have 'vg/lv', let's use LV name
159 if self
.path
[0] == '/':
160 lv
= lvm
.get_first_lv(filters
={'lv_path': self
.path
})
162 vgname
, lvname
= self
.path
.split('/')
163 lv
= lvm
.get_first_lv(filters
={'lv_name': lvname
,
168 self
.abspath
= lv
.lv_path
169 self
.vg_name
= lv
.vg_name
170 self
.lv_name
= lv
.name
172 dev
= disk
.lsblk(self
.path
)
173 self
.blkid_api
= disk
.blkid(self
.path
)
175 device_type
= dev
.get('TYPE', '')
176 # always check is this is an lvm member
177 if device_type
in ['part', 'disk']:
178 self
._set
_lvm
_membership
()
180 self
.ceph_disk
= CephDiskDevice(self
)
186 elif self
.is_partition
:
189 prefix
= 'Raw Device'
190 return '<%s: %s>' % (prefix
, self
.abspath
)
192 def pretty_report(self
):
194 if isinstance(v
, list):
199 return k
.strip('_').replace('_', ' ')
200 output
= ['\n====== Device report {} ======\n'.format(self
.path
)]
202 [self
.pretty_template
.format(
204 value
=format_value(v
)) for k
, v
in vars(self
).items() if k
in
205 self
.report_fields
and k
!= 'disk_api' and k
!= 'sys_api'] )
207 [self
.pretty_template
.format(
209 value
=format_value(v
)) for k
, v
in self
.sys_api
.items() if k
in
210 self
.pretty_report_sys_fields
])
213 --- Logical Volume ---""")
215 [self
.pretty_template
.format(
217 value
=format_value(v
)) for k
, v
in lv
.report().items()])
218 return ''.join(output
)
221 return report_template
.format(
223 size
=self
.size_human
,
225 available
=self
.available
,
229 def json_report(self
):
230 output
= {k
.strip('_'): v
for k
, v
in vars(self
).items() if k
in
232 output
['lvs'] = [lv
.report() for lv
in self
.lvs
]
235 def _get_device_id(self
):
237 Please keep this implementation in sync with get_device_id() in
240 props
= ['ID_VENDOR', 'ID_MODEL', 'ID_MODEL_ENC', 'ID_SERIAL_SHORT', 'ID_SERIAL',
242 p
= disk
.udevadm_property(self
.abspath
, props
)
243 if p
.get('ID_MODEL','').startswith('LVM PV '):
244 p
['ID_MODEL'] = p
.get('ID_MODEL_ENC', '').replace('\\x20', ' ').strip()
245 if 'ID_VENDOR' in p
and 'ID_MODEL' in p
and 'ID_SCSI_SERIAL' in p
:
246 dev_id
= '_'.join([p
['ID_VENDOR'], p
['ID_MODEL'],
247 p
['ID_SCSI_SERIAL']])
248 elif 'ID_MODEL' in p
and 'ID_SERIAL_SHORT' in p
:
249 dev_id
= '_'.join([p
['ID_MODEL'], p
['ID_SERIAL_SHORT']])
250 elif 'ID_SERIAL' in p
:
251 dev_id
= p
['ID_SERIAL']
252 if dev_id
.startswith('MTFD'):
253 # Micron NVMes hide the vendor
254 dev_id
= 'Micron_' + dev_id
256 # the else branch should fallback to using sysfs and ioctl to
257 # retrieve device_id on FreeBSD. Still figuring out if/how the
258 # python ioctl implementation does that on FreeBSD
260 dev_id
.replace(' ', '_')
263 def _set_lvm_membership(self
):
264 if self
._is
_lvm
_member
is None:
265 # this is contentious, if a PV is recognized by LVM but has no
266 # VGs, should we consider it as part of LVM? We choose not to
267 # here, because most likely, we need to use VGs from this PV.
268 self
._is
_lvm
_member
= False
269 for path
in self
._get
_pv
_paths
():
270 vgs
= lvm
.get_device_vgs(path
)
273 # a pv can only be in one vg, so this should be safe
274 # FIXME: While the above assumption holds, sda1 and sda2
275 # can each host a PV and VG. I think the vg_name property is
276 # actually unused (not 100% sure) and can simply be removed
277 self
.vg_name
= vgs
[0]
278 self
._is
_lvm
_member
= True
279 self
.lvs
.extend(lvm
.get_device_lvs(path
))
280 return self
._is
_lvm
_member
282 def _get_pv_paths(self
):
284 For block devices LVM can reside on the raw block device or on a
285 partition. Return a list of paths to be checked for a pv.
287 paths
= [self
.abspath
]
288 path_dir
= os
.path
.dirname(self
.abspath
)
289 for part
in self
.sys_api
.get('partitions', {}).keys():
290 paths
.append(os
.path
.join(path_dir
, part
))
295 return os
.path
.exists(self
.abspath
)
298 def has_gpt_headers(self
):
299 return self
.blkid_api
.get("PTTYPE") == "gpt"
302 def rotational(self
):
303 rotational
= self
.sys_api
.get('rotational')
304 if rotational
is None:
305 # fall back to lsblk if not found in sys_api
306 # default to '1' if no value is found with lsblk either
307 rotational
= self
.disk_api
.get('ROTA', '1')
308 return rotational
== '1'
312 return self
.sys_api
['model']
315 def size_human(self
):
316 return self
.sys_api
['human_readable_size']
320 return self
.sys_api
['size']
325 If this device was made into a PV it would lose 1GB in total size
326 due to the 1GB physical extent size we set when creating volume groups
328 size
= disk
.Size(b
=self
.size
)
329 lvm_size
= disk
.Size(gb
=size
.gb
.as_int()) - disk
.Size(gb
=1)
333 def is_lvm_member(self
):
334 if self
._is
_lvm
_member
is None:
335 self
._set
_lvm
_membership
()
336 return self
._is
_lvm
_member
339 def is_ceph_disk_member(self
):
340 is_member
= self
.ceph_disk
.is_member
341 if self
.sys_api
.get("partitions"):
342 for part
in self
.sys_api
.get("partitions").keys():
343 part
= Device("/dev/%s" % part
)
344 if part
.is_ceph_disk_member
:
350 def has_bluestore_label(self
):
351 out
, err
, ret
= process
.call([
352 'ceph-bluestore-tool', 'show-label',
353 '--dev', self
.abspath
], verbose_on_failure
=False)
360 return self
.path
.startswith(('/dev/mapper', '/dev/dm-'))
364 return self
.lv_api
is not None
367 def is_partition(self
):
369 return self
.disk_api
['TYPE'] == 'part'
371 return self
.blkid_api
['TYPE'] == 'part'
382 is_device
= api
['TYPE'] == 'device'
383 is_disk
= api
['TYPE'] == 'disk'
384 if is_device
or is_disk
:
389 def is_acceptable_device(self
):
390 return self
.is_device
or self
.is_partition
393 def is_encrypted(self
):
395 Only correct for LVs, device mappers, and partitions. Will report a ``None``
398 crypt_reports
= [self
.blkid_api
.get('TYPE', ''), self
.disk_api
.get('FSTYPE', '')]
400 # if disk APIs are reporting this is encrypted use that:
401 if 'crypto_LUKS' in crypt_reports
:
403 # if ceph-volume created this, then a tag would let us know
404 elif self
.lv_api
.encrypted
:
407 elif self
.is_partition
:
408 return 'crypto_LUKS' in crypt_reports
410 active_mapper
= encryption_status(self
.abspath
)
412 # normalize a bit to ensure same values regardless of source
413 encryption_type
= active_mapper
['type'].lower().strip('12') # turn LUKS1 or LUKS2 into luks
414 return True if encryption_type
in ['plain', 'luks'] else False
421 def used_by_ceph(self
):
422 # only filter out data devices as journals could potentially be reused
423 osd_ids
= [lv
.tags
.get("ceph.osd_id") is not None for lv
in self
.lvs
424 if lv
.tags
.get("ceph.type") in ["data", "block"]]
428 def vg_free_percent(self
):
430 return [vg
.free_percent
for vg
in self
.vgs
]
437 return [vg
.size
for vg
in self
.vgs
]
439 # TODO fix this...we can probably get rid of vg_free
445 Returns the free space in all VGs on this device. If no VGs are
446 present, returns the disk size.
449 return [vg
.free
for vg
in self
.vgs
]
451 # We could also query 'lvmconfig
452 # --typeconfig full' and use allocations -> physical_extent_size
453 # value to project the space for a vg
454 # assuming 4M extents here
455 extent_size
= 4194304
456 vg_free
= int(self
.size
/ extent_size
) * extent_size
457 if self
.size
% extent_size
== 0:
458 # If the extent size divides size exactly, deduct on extent for
460 vg_free
-= extent_size
463 def _check_generic_reject_reasons(self
):
465 ('removable', 1, 'removable'),
466 ('ro', 1, 'read-only'),
467 ('locked', 1, 'locked'),
469 rejected
= [reason
for (k
, v
, reason
) in reasons
if
470 self
.sys_api
.get(k
, '') == v
]
471 if self
.is_acceptable_device
:
472 # reject disks smaller than 5GB
473 if int(self
.sys_api
.get('size', 0)) < 5368709120:
474 rejected
.append('Insufficient space (<5GB)')
476 rejected
.append("Device type is not acceptable. It should be raw device or partition")
477 if self
.is_ceph_disk_member
:
478 rejected
.append("Used by ceph-disk")
479 if self
.has_bluestore_label
:
480 rejected
.append('Has BlueStore device label')
481 if self
.has_gpt_headers
:
482 rejected
.append('Has GPT headers')
485 def _check_lvm_reject_reasons(self
):
488 available_vgs
= [vg
for vg
in self
.vgs
if int(vg
.vg_free_count
) > 10]
489 if not available_vgs
:
490 rejected
.append('Insufficient space (<10 extents) on vgs')
492 # only check generic if no vgs are present. Vgs might hold lvs and
493 # that might cause 'locked' to trigger
494 rejected
.extend(self
._check
_generic
_reject
_reasons
())
496 return len(rejected
) == 0, rejected
498 def _check_raw_reject_reasons(self
):
499 rejected
= self
._check
_generic
_reject
_reasons
()
500 if len(self
.vgs
) > 0:
501 rejected
.append('LVM detected')
503 return len(rejected
) == 0, rejected
506 def available_lvm_batch(self
):
507 if self
.sys_api
.get("partitions"):
509 if system
.device_is_mounted(self
.path
):
511 return self
.is_device
or self
.is_lv
514 class CephDiskDevice(object):
516 Detect devices that have been created by ceph-disk, report their type
517 (journal, data, etc..). Requires a ``Device`` object as input.
520 def __init__(self
, device
):
522 self
._is
_ceph
_disk
_member
= None
527 In containers, the 'PARTLABEL' attribute might not be detected
528 correctly via ``lsblk``, so we poke at the value with ``lsblk`` first,
529 falling back to ``blkid`` (which works correclty in containers).
531 lsblk_partlabel
= self
.device
.disk_api
.get('PARTLABEL')
533 return lsblk_partlabel
534 return self
.device
.blkid_api
.get('PARTLABEL', '')
539 Seems like older version do not detect PARTTYPE correctly (assuming the
540 info in util/disk.py#lsblk is still valid).
541 SImply resolve to using blkid since lsblk will throw an error if asked
542 for an unknown columns
544 return self
.device
.blkid_api
.get('PARTTYPE', '')
548 if self
._is
_ceph
_disk
_member
is None:
549 if 'ceph' in self
.partlabel
:
550 self
._is
_ceph
_disk
_member
= True
552 elif self
.parttype
in ceph_disk_guids
.keys():
555 return self
._is
_ceph
_disk
_member
560 'data', 'wal', 'db', 'lockbox', 'journal',
561 # ceph-disk uses 'ceph block' when placing data in bluestore, but
562 # keeps the regular OSD files in 'ceph data' :( :( :( :(
566 if t
in self
.partlabel
:
568 label
= ceph_disk_guids
.get(self
.parttype
, {})
569 return label
.get('type', 'unknown').split('.')[-1]