]>
git.proxmox.com Git - ceph.git/blob - ceph/src/ceph-volume/ceph_volume/devices/lvm/strategies/validators.py
1 from ceph_volume
.util
import disk
2 from ceph_volume
.api
import lvm
5 def minimum_device_size(devices
, osds_per_device
=1):
7 Ensure that the minimum requirements for this type of scenario is
8 met, raise an error if the provided devices would not work
10 msg
= 'Unable to use device %s %s, LVs would be smaller than 5GB'
11 for device
in devices
:
12 device_size
= disk
.Size(b
=device
.sys_api
['size'])
13 lv_size
= device_size
/ osds_per_device
14 if lv_size
< disk
.Size(gb
=5):
15 raise RuntimeError(msg
% (device_size
, device
.path
))
18 def minimum_device_collocated_size(devices
, journal_size
, osds_per_device
=1):
20 Similar to ``minimum_device_size``, but take into account that the size of
21 the journal affects the size left of the device
23 msg
= 'Unable to use device %s %s, LVs would be smaller than 5GB'
24 for device
in devices
:
25 device_size
= disk
.Size(b
=device
.sys_api
['size'])
26 lv_size
= (device_size
/ osds_per_device
) - journal_size
27 if lv_size
< disk
.Size(gb
=5):
28 raise RuntimeError(msg
% (device_size
, device
.path
))
31 def no_lvm_membership(devices
):
33 Do not allow devices that are part of LVM
35 msg
= 'Unable to use device, already a member of LVM: %s'
36 for device
in devices
:
37 if device
.is_lvm_member
:
38 raise RuntimeError(msg
% device
.abspath
)
41 def has_common_vg(ssd_devices
):
43 Ensure that devices have a common VG between them
45 msg
= 'Could not find a common VG between devices: %s'
46 system_vgs
= lvm
.VolumeGroups()
49 for ssd_device
in ssd_devices
:
50 for pv
in ssd_device
.pvs_api
:
51 vg
= system_vgs
.get(vg_name
=pv
.vg_name
)
55 ssd_vgs
[vg
.name
].append(ssd_device
.abspath
)
57 ssd_vgs
[vg
.name
] = [ssd_device
.abspath
]
58 # len of 1 means they all have a common vg, and len of 0 means that these
62 raise RuntimeError(msg
% ', '.join(ssd_vgs
.keys()))