]> git.proxmox.com Git - ceph.git/blame - ceph/src/ceph-volume/ceph_volume/util/device.py
update ceph source to reef 18.2.0
[ceph.git] / ceph / src / ceph-volume / ceph_volume / util / device.py
CommitLineData
91327a77
AA
1# -*- coding: utf-8 -*-
2
522d829b 3import logging
1adf2230 4import os
91327a77 5from functools import total_ordering
2a845540 6from ceph_volume import sys_info
1adf2230 7from ceph_volume.api import lvm
f91f0fd5
TL
8from ceph_volume.util import disk, system
9from ceph_volume.util.lsmdisk import LSMDisk
494da23a 10from ceph_volume.util.constants import ceph_disk_guids
2a845540 11from ceph_volume.util.disk import allow_loop_devices
1adf2230 12
522d829b
TL
13
14logger = logging.getLogger(__name__)
15
16
91327a77 17report_template = """
2a845540 18{dev:<25} {size:<12} {device_nodes:<15} {rot!s:<7} {available!s:<9} {model}"""
91327a77
AA
19
20
f64942e4
AA
21def encryption_status(abspath):
22 """
23 Helper function to run ``encryption.status()``. It is done here to avoid
24 a circular import issue (encryption module imports from this module) and to
25 ease testing by allowing monkeypatching of this function.
26 """
27 from ceph_volume.util import encryption
28 return encryption.status(abspath)
29
30
91327a77
AA
31class Devices(object):
32 """
33 A container for Device instances with reporting
34 """
35
f91f0fd5 36 def __init__(self, filter_for_batch=False, with_lsm=False):
2a845540
TL
37 lvs = lvm.get_lvs()
38 lsblk_all = disk.lsblk_all()
39 all_devices_vgs = lvm.get_all_devices_vgs()
91327a77
AA
40 if not sys_info.devices:
41 sys_info.devices = disk.get_devices()
2a845540
TL
42 self.devices = [Device(k,
43 with_lsm,
44 lvs=lvs,
45 lsblk_all=lsblk_all,
46 all_devices_vgs=all_devices_vgs) for k in
47 sys_info.devices.keys()]
f91f0fd5
TL
48 if filter_for_batch:
49 self.devices = [d for d in self.devices if d.available_lvm_batch]
91327a77 50
f91f0fd5 51 def pretty_report(self):
91327a77
AA
52 output = [
53 report_template.format(
54 dev='Device Path',
55 size='Size',
56 rot='rotates',
57 model='Model name',
58 available='available',
2a845540
TL
59 device_nodes='Device nodes',
60
91327a77
AA
61 )]
62 for device in sorted(self.devices):
63 output.append(device.report())
64 return ''.join(output)
1adf2230 65
91327a77
AA
66 def json_report(self):
67 output = []
68 for device in sorted(self.devices):
69 output.append(device.json_report())
70 return output
71
72@total_ordering
1adf2230
AA
73class Device(object):
74
91327a77
AA
75 pretty_template = """
76 {attr:<25} {value}"""
77
78 report_fields = [
33c7a0ef 79 'ceph_device',
91327a77
AA
80 'rejected_reasons',
81 'available',
82 'path',
83 'sys_api',
eafe8130 84 'device_id',
f91f0fd5 85 'lsm_data',
91327a77
AA
86 ]
87 pretty_report_sys_fields = [
1e59de90 88 'actuators',
91327a77
AA
89 'human_readable_size',
90 'model',
91 'removable',
92 'ro',
93 'rotational',
94 'sas_address',
95 'scheduler_mode',
96 'vendor',
97 ]
98
f91f0fd5
TL
99 # define some class variables; mostly to enable the use of autospec in
100 # unittests
101 lvs = []
102
2a845540 103 def __init__(self, path, with_lsm=False, lvs=None, lsblk_all=None, all_devices_vgs=None):
1adf2230
AA
104 self.path = path
105 # LVs can have a vg/lv path, while disks will have /dev/sda
2a845540
TL
106 self.symlink = None
107 # check if we are a symlink
108 if os.path.islink(self.path):
109 self.symlink = self.path
110 real_path = os.path.realpath(self.path)
111 # check if we are not a device mapper
112 if "dm-" not in real_path:
113 self.path = real_path
114 if not sys_info.devices:
1e59de90
TL
115 if self.path:
116 sys_info.devices = disk.get_devices(device=self.path)
117 else:
118 sys_info.devices = disk.get_devices()
2a845540
TL
119 if sys_info.devices.get(self.path, {}):
120 self.device_nodes = sys_info.devices[self.path]['device_nodes']
121 self.sys_api = sys_info.devices.get(self.path, {})
122 self.partitions = self._get_partitions()
1adf2230 123 self.lv_api = None
2a845540
TL
124 self.lvs = [] if not lvs else lvs
125 self.lsblk_all = lsblk_all
126 self.all_devices_vgs = all_devices_vgs
f91f0fd5 127 self.vgs = []
91327a77
AA
128 self.vg_name = None
129 self.lv_name = None
1adf2230 130 self.disk_api = {}
2a845540 131 self.blkid_api = None
1adf2230
AA
132 self._exists = None
133 self._is_lvm_member = None
2a845540 134 self.ceph_device = False
1adf2230 135 self._parse()
f91f0fd5 136 self.lsm_data = self.fetch_lsm(with_lsm)
92f5a8d4
TL
137
138 self.available_lvm, self.rejected_reasons_lvm = self._check_lvm_reject_reasons()
139 self.available_raw, self.rejected_reasons_raw = self._check_raw_reject_reasons()
140 self.available = self.available_lvm and self.available_raw
141 self.rejected_reasons = list(set(self.rejected_reasons_lvm +
142 self.rejected_reasons_raw))
143
f64942e4 144 self.device_id = self._get_device_id()
91327a77 145
f91f0fd5
TL
146 def fetch_lsm(self, with_lsm):
147 '''
148 Attempt to fetch libstoragemgmt (LSM) metadata, and return to the caller
149 as a dict. An empty dict is passed back to the caller if the target path
150 is not a block device, or lsm is unavailable on the host. Otherwise the
151 json returned will provide LSM attributes, and any associated errors that
152 lsm encountered when probing the device.
153 '''
154 if not with_lsm or not self.exists or not self.is_device:
155 return {}
156
157 lsm_disk = LSMDisk(self.path)
158
159 return lsm_disk.json_report()
160
91327a77
AA
161 def __lt__(self, other):
162 '''
163 Implementing this method and __eq__ allows the @total_ordering
164 decorator to turn the Device class into a totally ordered type.
165 This can slower then implementing all comparison operations.
166 This sorting should put available devices before unavailable devices
167 and sort on the path otherwise (str sorting).
168 '''
169 if self.available == other.available:
170 return self.path < other.path
171 return self.available and not other.available
172
173 def __eq__(self, other):
174 return self.path == other.path
1adf2230 175
11fdf7f2
TL
176 def __hash__(self):
177 return hash(self.path)
178
2a845540
TL
179 def load_blkid_api(self):
180 if self.blkid_api is None:
181 self.blkid_api = disk.blkid(self.path)
182
1adf2230 183 def _parse(self):
2a845540 184 lv = None
a8e16298
TL
185 if not self.sys_api:
186 # if no device was found check if we are a partition
2a845540 187 partname = self.path.split('/')[-1]
a8e16298
TL
188 for device, info in sys_info.devices.items():
189 part = info['partitions'].get(partname, {})
190 if part:
191 self.sys_api = part
192 break
91327a77 193
2a845540
TL
194 if self.lvs:
195 for _lv in self.lvs:
196 # if the path is not absolute, we have 'vg/lv', let's use LV name
197 # to get the LV.
198 if self.path[0] == '/':
199 if _lv.lv_path == self.path:
200 lv = _lv
201 break
202 else:
203 vgname, lvname = self.path.split('/')
204 if _lv.lv_name == lvname and _lv.vg_name == vgname:
205 lv = _lv
206 break
f6b5b4d7 207 else:
2a845540
TL
208 if self.path[0] == '/':
209 lv = lvm.get_single_lv(filters={'lv_path': self.path})
210 else:
211 vgname, lvname = self.path.split('/')
212 lv = lvm.get_single_lv(filters={'lv_name': lvname,
213 'vg_name': vgname})
214
1adf2230
AA
215 if lv:
216 self.lv_api = lv
91327a77 217 self.lvs = [lv]
2a845540 218 self.path = lv.lv_path
91327a77
AA
219 self.vg_name = lv.vg_name
220 self.lv_name = lv.name
33c7a0ef 221 self.ceph_device = lvm.is_ceph_device(lv)
1adf2230 222 else:
2a845540
TL
223 self.lvs = []
224 if self.lsblk_all:
225 for dev in self.lsblk_all:
226 if dev['NAME'] == os.path.basename(self.path):
227 break
228 else:
229 dev = disk.lsblk(self.path)
1adf2230
AA
230 self.disk_api = dev
231 device_type = dev.get('TYPE', '')
232 # always check is this is an lvm member
05a536ef 233 valid_types = ['part', 'disk', 'mpath']
2a845540
TL
234 if allow_loop_devices():
235 valid_types.append('loop')
236 if device_type in valid_types:
1adf2230
AA
237 self._set_lvm_membership()
238
91327a77 239 self.ceph_disk = CephDiskDevice(self)
1adf2230
AA
240
241 def __repr__(self):
242 prefix = 'Unknown'
243 if self.is_lv:
244 prefix = 'LV'
245 elif self.is_partition:
246 prefix = 'Partition'
247 elif self.is_device:
248 prefix = 'Raw Device'
2a845540 249 return '<%s: %s>' % (prefix, self.path)
1adf2230 250
91327a77
AA
251 def pretty_report(self):
252 def format_value(v):
253 if isinstance(v, list):
254 return ', '.join(v)
1adf2230 255 else:
91327a77
AA
256 return v
257 def format_key(k):
258 return k.strip('_').replace('_', ' ')
259 output = ['\n====== Device report {} ======\n'.format(self.path)]
260 output.extend(
261 [self.pretty_template.format(
262 attr=format_key(k),
263 value=format_value(v)) for k, v in vars(self).items() if k in
264 self.report_fields and k != 'disk_api' and k != 'sys_api'] )
265 output.extend(
266 [self.pretty_template.format(
267 attr=format_key(k),
268 value=format_value(v)) for k, v in self.sys_api.items() if k in
269 self.pretty_report_sys_fields])
270 for lv in self.lvs:
271 output.append("""
272 --- Logical Volume ---""")
273 output.extend(
274 [self.pretty_template.format(
275 attr=format_key(k),
276 value=format_value(v)) for k, v in lv.report().items()])
277 return ''.join(output)
278
279 def report(self):
280 return report_template.format(
2a845540 281 dev=self.path,
91327a77
AA
282 size=self.size_human,
283 rot=self.rotational,
284 available=self.available,
285 model=self.model,
2a845540 286 device_nodes=self.device_nodes
91327a77 287 )
1adf2230 288
91327a77
AA
289 def json_report(self):
290 output = {k.strip('_'): v for k, v in vars(self).items() if k in
291 self.report_fields}
292 output['lvs'] = [lv.report() for lv in self.lvs]
293 return output
294
f64942e4
AA
295 def _get_device_id(self):
296 """
297 Please keep this implementation in sync with get_device_id() in
298 src/common/blkdev.cc
299 """
11fdf7f2 300 props = ['ID_VENDOR', 'ID_MODEL', 'ID_MODEL_ENC', 'ID_SERIAL_SHORT', 'ID_SERIAL',
f64942e4 301 'ID_SCSI_SERIAL']
2a845540 302 p = disk.udevadm_property(self.path, props)
11fdf7f2
TL
303 if p.get('ID_MODEL','').startswith('LVM PV '):
304 p['ID_MODEL'] = p.get('ID_MODEL_ENC', '').replace('\\x20', ' ').strip()
f64942e4
AA
305 if 'ID_VENDOR' in p and 'ID_MODEL' in p and 'ID_SCSI_SERIAL' in p:
306 dev_id = '_'.join([p['ID_VENDOR'], p['ID_MODEL'],
307 p['ID_SCSI_SERIAL']])
308 elif 'ID_MODEL' in p and 'ID_SERIAL_SHORT' in p:
309 dev_id = '_'.join([p['ID_MODEL'], p['ID_SERIAL_SHORT']])
310 elif 'ID_SERIAL' in p:
311 dev_id = p['ID_SERIAL']
312 if dev_id.startswith('MTFD'):
313 # Micron NVMes hide the vendor
314 dev_id = 'Micron_' + dev_id
315 else:
316 # the else branch should fallback to using sysfs and ioctl to
317 # retrieve device_id on FreeBSD. Still figuring out if/how the
318 # python ioctl implementation does that on FreeBSD
319 dev_id = ''
20effc67
TL
320 dev_id = dev_id.replace(' ', '_')
321 while '__' in dev_id:
322 dev_id = dev_id.replace('__', '_')
f64942e4
AA
323 return dev_id
324
91327a77
AA
325 def _set_lvm_membership(self):
326 if self._is_lvm_member is None:
327 # this is contentious, if a PV is recognized by LVM but has no
328 # VGs, should we consider it as part of LVM? We choose not to
329 # here, because most likely, we need to use VGs from this PV.
330 self._is_lvm_member = False
2a845540
TL
331 device_to_check = [self.path]
332 device_to_check.extend(self.partitions)
333
334 # a pv can only be in one vg, so this should be safe
335 # FIXME: While the above assumption holds, sda1 and sda2
336 # can each host a PV and VG. I think the vg_name property is
337 # actually unused (not 100% sure) and can simply be removed
338 vgs = None
339 if not self.all_devices_vgs:
340 self.all_devices_vgs = lvm.get_all_devices_vgs()
341 for path in device_to_check:
342 for dev_vg in self.all_devices_vgs:
343 if dev_vg.pv_name == path:
344 vgs = [dev_vg]
92f5a8d4
TL
345 if vgs:
346 self.vgs.extend(vgs)
92f5a8d4 347 self.vg_name = vgs[0]
91327a77 348 self._is_lvm_member = True
92f5a8d4 349 self.lvs.extend(lvm.get_device_lvs(path))
2a845540
TL
350 if self.lvs:
351 self.ceph_device = any([True if lv.tags.get('ceph.osd_id') else False for lv in self.lvs])
1adf2230 352
2a845540 353 def _get_partitions(self):
91327a77
AA
354 """
355 For block devices LVM can reside on the raw block device or on a
356 partition. Return a list of paths to be checked for a pv.
357 """
2a845540
TL
358 partitions = []
359 path_dir = os.path.dirname(self.path)
360 for partition in self.sys_api.get('partitions', {}).keys():
361 partitions.append(os.path.join(path_dir, partition))
362 return partitions
91327a77 363
1adf2230
AA
364 @property
365 def exists(self):
2a845540 366 return os.path.exists(self.path)
1adf2230 367
33c7a0ef
TL
368 @property
369 def has_fs(self):
2a845540 370 self.load_blkid_api()
33c7a0ef
TL
371 return 'TYPE' in self.blkid_api
372
91327a77
AA
373 @property
374 def has_gpt_headers(self):
2a845540 375 self.load_blkid_api()
91327a77
AA
376 return self.blkid_api.get("PTTYPE") == "gpt"
377
378 @property
379 def rotational(self):
81eedcae
TL
380 rotational = self.sys_api.get('rotational')
381 if rotational is None:
382 # fall back to lsblk if not found in sys_api
383 # default to '1' if no value is found with lsblk either
384 rotational = self.disk_api.get('ROTA', '1')
385 return rotational == '1'
91327a77
AA
386
387 @property
388 def model(self):
389 return self.sys_api['model']
390
391 @property
392 def size_human(self):
393 return self.sys_api['human_readable_size']
394
395 @property
396 def size(self):
11fdf7f2
TL
397 return self.sys_api['size']
398
522d829b
TL
399 @property
400 def parent_device(self):
401 if 'PKNAME' in self.disk_api:
402 return '/dev/%s' % self.disk_api['PKNAME']
403 return None
404
11fdf7f2
TL
405 @property
406 def lvm_size(self):
407 """
408 If this device was made into a PV it would lose 1GB in total size
409 due to the 1GB physical extent size we set when creating volume groups
410 """
411 size = disk.Size(b=self.size)
412 lvm_size = disk.Size(gb=size.gb.as_int()) - disk.Size(gb=1)
413 return lvm_size
91327a77 414
1adf2230
AA
415 @property
416 def is_lvm_member(self):
417 if self._is_lvm_member is None:
418 self._set_lvm_membership()
419 return self._is_lvm_member
420
91327a77
AA
421 @property
422 def is_ceph_disk_member(self):
2a845540
TL
423 def is_member(device):
424 return 'ceph' in device.get('PARTLABEL', '') or \
425 device.get('PARTTYPE', '') in ceph_disk_guids.keys()
426 # If we come from Devices(), self.lsblk_all is set already.
427 # Otherwise, we have to grab the data.
428 details = self.lsblk_all or disk.lsblk_all()
429 _is_member = False
f64942e4
AA
430 if self.sys_api.get("partitions"):
431 for part in self.sys_api.get("partitions").keys():
2a845540
TL
432 for dev in details:
433 if part.startswith(dev['NAME']):
434 if is_member(dev):
435 _is_member = True
436 return _is_member
437 else:
438 return is_member(self.disk_api)
439 raise RuntimeError(f"Couln't check if device {self.path} is a ceph-disk member.")
91327a77 440
92f5a8d4
TL
441 @property
442 def has_bluestore_label(self):
2a845540 443 return disk.has_bluestore_label(self.path)
92f5a8d4 444
1adf2230
AA
445 @property
446 def is_mapper(self):
f64942e4 447 return self.path.startswith(('/dev/mapper', '/dev/dm-'))
1adf2230 448
20effc67
TL
449 @property
450 def device_type(self):
2a845540
TL
451 self.load_blkid_api()
452 if 'type' in self.sys_api:
453 return self.sys_api['type']
454 elif self.disk_api:
20effc67
TL
455 return self.disk_api['TYPE']
456 elif self.blkid_api:
457 return self.blkid_api['TYPE']
458
459 @property
460 def is_mpath(self):
461 return self.device_type == 'mpath'
462
1adf2230
AA
463 @property
464 def is_lv(self):
465 return self.lv_api is not None
466
467 @property
468 def is_partition(self):
2a845540 469 self.load_blkid_api()
1adf2230
AA
470 if self.disk_api:
471 return self.disk_api['TYPE'] == 'part'
f91f0fd5
TL
472 elif self.blkid_api:
473 return self.blkid_api['TYPE'] == 'part'
1adf2230
AA
474 return False
475
476 @property
477 def is_device(self):
2a845540 478 self.load_blkid_api()
f91f0fd5 479 api = None
1adf2230 480 if self.disk_api:
f91f0fd5
TL
481 api = self.disk_api
482 elif self.blkid_api:
483 api = self.blkid_api
484 if api:
2a845540
TL
485 valid_types = ['disk', 'device', 'mpath']
486 if allow_loop_devices():
487 valid_types.append('loop')
488 return self.device_type in valid_types
1adf2230 489 return False
91327a77 490
f91f0fd5
TL
491 @property
492 def is_acceptable_device(self):
493 return self.is_device or self.is_partition
494
f64942e4
AA
495 @property
496 def is_encrypted(self):
497 """
498 Only correct for LVs, device mappers, and partitions. Will report a ``None``
499 for raw devices.
500 """
2a845540 501 self.load_blkid_api()
f64942e4
AA
502 crypt_reports = [self.blkid_api.get('TYPE', ''), self.disk_api.get('FSTYPE', '')]
503 if self.is_lv:
504 # if disk APIs are reporting this is encrypted use that:
505 if 'crypto_LUKS' in crypt_reports:
506 return True
507 # if ceph-volume created this, then a tag would let us know
508 elif self.lv_api.encrypted:
509 return True
510 return False
511 elif self.is_partition:
512 return 'crypto_LUKS' in crypt_reports
513 elif self.is_mapper:
2a845540 514 active_mapper = encryption_status(self.path)
f64942e4
AA
515 if active_mapper:
516 # normalize a bit to ensure same values regardless of source
517 encryption_type = active_mapper['type'].lower().strip('12') # turn LUKS1 or LUKS2 into luks
518 return True if encryption_type in ['plain', 'luks'] else False
519 else:
520 return False
521 else:
522 return None
523
91327a77
AA
524 @property
525 def used_by_ceph(self):
526 # only filter out data devices as journals could potentially be reused
527 osd_ids = [lv.tags.get("ceph.osd_id") is not None for lv in self.lvs
528 if lv.tags.get("ceph.type") in ["data", "block"]]
529 return any(osd_ids)
530
05a536ef
TL
531 @property
532 def journal_used_by_ceph(self):
533 # similar to used_by_ceph() above. This is for 'journal' devices (db/wal/..)
534 # needed by get_lvm_fast_allocs() in devices/lvm/batch.py
535 # see https://tracker.ceph.com/issues/59640
536 osd_ids = [lv.tags.get("ceph.osd_id") is not None for lv in self.lvs
537 if lv.tags.get("ceph.type") in ["db", "wal"]]
538 return any(osd_ids)
539
f91f0fd5
TL
540 @property
541 def vg_free_percent(self):
542 if self.vgs:
543 return [vg.free_percent for vg in self.vgs]
544 else:
545 return [1]
546
547 @property
548 def vg_size(self):
549 if self.vgs:
550 return [vg.size for vg in self.vgs]
551 else:
552 # TODO fix this...we can probably get rid of vg_free
553 return self.vg_free
554
555 @property
556 def vg_free(self):
557 '''
558 Returns the free space in all VGs on this device. If no VGs are
559 present, returns the disk size.
560 '''
561 if self.vgs:
562 return [vg.free for vg in self.vgs]
563 else:
564 # We could also query 'lvmconfig
565 # --typeconfig full' and use allocations -> physical_extent_size
566 # value to project the space for a vg
567 # assuming 4M extents here
568 extent_size = 4194304
569 vg_free = int(self.size / extent_size) * extent_size
cd265ab1 570 if self.size % extent_size == 0:
f91f0fd5
TL
571 # If the extent size divides size exactly, deduct on extent for
572 # LVM metadata
573 vg_free -= extent_size
574 return [vg_free]
92f5a8d4 575
1d09f67e
TL
576 @property
577 def has_partitions(self):
578 '''
579 Boolean to determine if a given device has partitions.
580 '''
581 if self.sys_api.get('partitions'):
582 return True
583 return False
584
92f5a8d4 585 def _check_generic_reject_reasons(self):
91327a77
AA
586 reasons = [
587 ('removable', 1, 'removable'),
588 ('ro', 1, 'read-only'),
589 ('locked', 1, 'locked'),
590 ]
591 rejected = [reason for (k, v, reason) in reasons if
592 self.sys_api.get(k, '') == v]
f91f0fd5
TL
593 if self.is_acceptable_device:
594 # reject disks smaller than 5GB
595 if int(self.sys_api.get('size', 0)) < 5368709120:
596 rejected.append('Insufficient space (<5GB)')
597 else:
598 rejected.append("Device type is not acceptable. It should be raw device or partition")
f64942e4
AA
599 if self.is_ceph_disk_member:
600 rejected.append("Used by ceph-disk")
522d829b
TL
601
602 try:
603 if self.has_bluestore_label:
604 rejected.append('Has BlueStore device label')
605 except OSError as e:
606 # likely failed to open the device. assuming it is BlueStore is the safest option
607 # so that a possibly-already-existing OSD doesn't get overwritten
2a845540 608 logger.error('failed to determine if device {} is BlueStore. device should not be used to avoid false negatives. err: {}'.format(self.path, e))
522d829b
TL
609 rejected.append('Failed to determine if device is BlueStore')
610
611 if self.is_partition:
612 try:
613 if disk.has_bluestore_label(self.parent_device):
614 rejected.append('Parent has BlueStore device label')
615 except OSError as e:
616 # likely failed to open the device. assuming the parent is BlueStore is the safest
617 # option so that a possibly-already-existing OSD doesn't get overwritten
2a845540 618 logger.error('failed to determine if partition {} (parent: {}) has a BlueStore parent. partition should not be used to avoid false negatives. err: {}'.format(self.path, self.parent_device, e))
522d829b
TL
619 rejected.append('Failed to determine if parent device is BlueStore')
620
f67539c2
TL
621 if self.has_gpt_headers:
622 rejected.append('Has GPT headers')
1d09f67e
TL
623 if self.has_partitions:
624 rejected.append('Has partitions')
92f5a8d4
TL
625 return rejected
626
627 def _check_lvm_reject_reasons(self):
f6b5b4d7
TL
628 rejected = []
629 if self.vgs:
f91f0fd5 630 available_vgs = [vg for vg in self.vgs if int(vg.vg_free_count) > 10]
f6b5b4d7 631 if not available_vgs:
f91f0fd5 632 rejected.append('Insufficient space (<10 extents) on vgs')
f6b5b4d7
TL
633 else:
634 # only check generic if no vgs are present. Vgs might hold lvs and
635 # that might cause 'locked' to trigger
636 rejected.extend(self._check_generic_reject_reasons())
f67539c2 637
92f5a8d4
TL
638 return len(rejected) == 0, rejected
639
640 def _check_raw_reject_reasons(self):
641 rejected = self._check_generic_reject_reasons()
642 if len(self.vgs) > 0:
643 rejected.append('LVM detected')
f64942e4 644
91327a77
AA
645 return len(rejected) == 0, rejected
646
f91f0fd5
TL
647 @property
648 def available_lvm_batch(self):
649 if self.sys_api.get("partitions"):
650 return False
651 if system.device_is_mounted(self.path):
652 return False
653 return self.is_device or self.is_lv
654
91327a77
AA
655
656class CephDiskDevice(object):
657 """
658 Detect devices that have been created by ceph-disk, report their type
659 (journal, data, etc..). Requires a ``Device`` object as input.
660 """
661
662 def __init__(self, device):
663 self.device = device
664 self._is_ceph_disk_member = None
665
666 @property
667 def partlabel(self):
668 """
669 In containers, the 'PARTLABEL' attribute might not be detected
670 correctly via ``lsblk``, so we poke at the value with ``lsblk`` first,
671 falling back to ``blkid`` (which works correclty in containers).
672 """
673 lsblk_partlabel = self.device.disk_api.get('PARTLABEL')
674 if lsblk_partlabel:
675 return lsblk_partlabel
676 return self.device.blkid_api.get('PARTLABEL', '')
677
494da23a
TL
678 @property
679 def parttype(self):
680 """
681 Seems like older version do not detect PARTTYPE correctly (assuming the
682 info in util/disk.py#lsblk is still valid).
683 SImply resolve to using blkid since lsblk will throw an error if asked
684 for an unknown columns
685 """
686 return self.device.blkid_api.get('PARTTYPE', '')
687
91327a77
AA
688 @property
689 def is_member(self):
690 if self._is_ceph_disk_member is None:
691 if 'ceph' in self.partlabel:
692 self._is_ceph_disk_member = True
693 return True
494da23a
TL
694 elif self.parttype in ceph_disk_guids.keys():
695 return True
91327a77
AA
696 return False
697 return self._is_ceph_disk_member
698
699 @property
700 def type(self):
701 types = [
702 'data', 'wal', 'db', 'lockbox', 'journal',
703 # ceph-disk uses 'ceph block' when placing data in bluestore, but
704 # keeps the regular OSD files in 'ceph data' :( :( :( :(
705 'block',
706 ]
707 for t in types:
708 if t in self.partlabel:
709 return t
494da23a
TL
710 label = ceph_disk_guids.get(self.parttype, {})
711 return label.get('type', 'unknown').split('.')[-1]