]>
git.proxmox.com Git - ceph.git/blob - ceph/src/ceph-volume/ceph_volume/tests/conftest.py
3 from mock
.mock
import patch
, PropertyMock
, create_autospec
4 from ceph_volume
.api
import lvm
5 from ceph_volume
.util
import disk
6 from ceph_volume
.util
import device
7 from ceph_volume
.util
.constants
import ceph_disk_guids
8 from ceph_volume
import conf
, configuration
11 class Capture(object):
13 def __init__(self
, *a
, **kw
):
17 self
.return_values
= kw
.get('return_values', False)
18 self
.always_returns
= kw
.get('always_returns', False)
20 def __call__(self
, *a
, **kw
):
21 self
.calls
.append({'args': a
, 'kwargs': kw
})
22 if self
.always_returns
:
23 return self
.always_returns
24 if self
.return_values
:
25 return self
.return_values
.pop()
28 class Factory(object):
30 def __init__(self
, **kw
):
31 for k
, v
in kw
.items():
45 def mock_lv_device_generator():
48 dev
= create_autospec(device
.Device
)
51 dev
.path
= '{}/{}'.format(dev
.vg_name
, dev
.lv_name
)
52 dev
.used_by_ceph
= False
54 dev
.vg_free
= dev
.vg_size
55 dev
.available_lvm
= True
57 dev
.lvs
= [lvm
.Volume(vg_name
=dev
.vg_name
, lv_name
=dev
.lv_name
, lv_size
=size
, lv_tags
='')]
62 dev
= create_autospec(device
.Device
)
64 dev
.vg_name
= 'vg_foo'
65 dev
.lv_name
= 'lv_foo'
67 dev
.vgs
= [lvm
.VolumeGroup(vg_name
=dev
.vg_name
, lv_name
=dev
.lv_name
)]
68 dev
.available_lvm
= True
69 dev
.vg_size
= [21474836480]
70 dev
.vg_free
= dev
.vg_size
74 @pytest.fixture(params
=range(1,3))
75 def mock_devices_available(request
):
77 for _
in range(request
.param
):
78 ret
.append(mock_device())
82 def mock_device_generator():
86 @pytest.fixture(params
=range(1,11))
87 def osds_per_device(request
):
92 def fake_run(monkeypatch
):
94 monkeypatch
.setattr('ceph_volume.process.run', fake_run
)
99 def fake_call(monkeypatch
):
100 fake_call
= Capture(always_returns
=([], [], 0))
101 monkeypatch
.setattr('ceph_volume.process.call', fake_call
)
106 def fakedevice(factory
):
118 params
.update(dict(kw
))
119 params
['lvm_size'] = disk
.Size(b
=params
['sys_api'].get("size", 0))
120 return factory(**params
)
125 def stub_call(monkeypatch
):
127 Monkeypatches process.call, so that a caller can add behavior to the response
129 def apply(return_values
):
130 if isinstance(return_values
, tuple):
131 return_values
= [return_values
]
132 stubbed_call
= Capture(return_values
=return_values
)
133 monkeypatch
.setattr('ceph_volume.process.call', stubbed_call
)
139 @pytest.fixture(autouse
=True)
140 def reset_cluster_name(request
, monkeypatch
):
142 The globally available ``ceph_volume.conf.cluster`` might get mangled in
143 tests, make sure that after evert test, it gets reset, preventing pollution
144 going into other tests later.
149 os
.environ
.pop('CEPH_CONF')
152 request
.addfinalizer(fin
)
156 def conf_ceph(monkeypatch
):
158 Monkeypatches ceph_volume.conf.ceph, which is meant to parse/read
159 a ceph.conf. The patching is naive, it allows one to set return values for
160 specific method calls.
164 monkeypatch
.setattr(conf
, 'ceph', stub
)
170 def conf_ceph_stub(monkeypatch
, tmpfile
):
172 Monkeypatches ceph_volume.conf.ceph with contents from a string that are
173 written to a temporary file and then is fed through the same ceph.conf
174 loading mechanisms for testing. Unlike ``conf_ceph`` which is just a fake,
175 we are actually loading values as seen on a ceph.conf file
177 This is useful when more complex ceph.conf's are needed. In the case of
178 just trying to validate a key/value behavior ``conf_ceph`` is better
182 conf_path
= tmpfile(contents
=contents
)
183 parser
= configuration
.load(conf_path
)
184 monkeypatch
.setattr(conf
, 'ceph', parser
)
190 def is_root(monkeypatch
):
192 Patch ``os.getuid()`` so that ceph-volume's decorators that ensure a user
193 is root (or is sudoing to superuser) can continue as-is
195 monkeypatch
.setattr('os.getuid', lambda: 0)
201 Create a temporary file, optionally filling it with contents, returns an
202 absolute path to the file when called
204 def generate_file(name
='file', contents
='', directory
=None):
205 directory
= directory
or str(tmpdir
)
206 path
= os
.path
.join(directory
, name
)
207 with
open(path
, 'w') as fp
:
214 def disable_kernel_queries(monkeypatch
):
216 This speeds up calls to Device and Disk
218 monkeypatch
.setattr("ceph_volume.util.device.disk.get_devices", lambda: {})
219 monkeypatch
.setattr("ceph_volume.util.disk.udevadm_property", lambda *a
, **kw
: {})
222 @pytest.fixture(params
=[
223 '', 'ceph data', 'ceph journal', 'ceph block',
224 'ceph block.wal', 'ceph block.db', 'ceph lockbox'])
225 def ceph_partlabel(request
):
229 @pytest.fixture(params
=list(ceph_disk_guids
.keys()))
230 def ceph_parttype(request
):
235 def lsblk_ceph_disk_member(monkeypatch
, request
, ceph_partlabel
, ceph_parttype
):
236 monkeypatch
.setattr("ceph_volume.util.device.disk.lsblk",
237 lambda path
: {'TYPE': 'disk',
239 'PARTLABEL': ceph_partlabel
,
240 'PARTTYPE': ceph_parttype
})
241 monkeypatch
.setattr("ceph_volume.util.device.disk.lsblk_all",
242 lambda: [{'TYPE': 'disk',
244 'PARTLABEL': ceph_partlabel
,
245 'PARTTYPE': ceph_parttype
}])
248 def blkid_ceph_disk_member(monkeypatch
, request
, ceph_partlabel
, ceph_parttype
):
249 monkeypatch
.setattr("ceph_volume.util.device.disk.blkid",
250 lambda path
: {'TYPE': 'disk',
251 'PARTLABEL': ceph_partlabel
,
252 'PARTTYPE': ceph_parttype
})
255 @pytest.fixture(params
=[
256 ('gluster partition', 'gluster partition'),
257 # falls back to blkid
258 ('', 'gluster partition'),
259 ('gluster partition', ''),
261 def device_info_not_ceph_disk_member(monkeypatch
, request
):
262 monkeypatch
.setattr("ceph_volume.util.device.disk.lsblk",
263 lambda path
: {'TYPE': 'disk',
265 'PARTLABEL': request
.param
[0]})
266 monkeypatch
.setattr("ceph_volume.util.device.disk.lsblk_all",
267 lambda: [{'TYPE': 'disk',
269 'PARTLABEL': request
.param
[0]}])
270 monkeypatch
.setattr("ceph_volume.util.device.disk.blkid",
271 lambda path
: {'TYPE': 'disk',
272 'PARTLABEL': request
.param
[1]})
275 def patched_get_block_devs_sysfs():
276 with
patch('ceph_volume.util.disk.get_block_devs_sysfs') as p
:
280 def patch_bluestore_label():
281 with
patch('ceph_volume.util.device.Device.has_bluestore_label',
282 new_callable
=PropertyMock
) as p
:
283 p
.return_value
= False
287 def device_info(monkeypatch
, patch_bluestore_label
):
288 def apply(devices
=None, lsblk
=None, lv
=None, blkid
=None, udevadm
=None,
289 has_bluestore_label
=False):
291 for dev
in devices
.keys():
292 devices
[dev
]['device_nodes'] = os
.path
.basename(dev
)
295 lsblk
= lsblk
if lsblk
else {}
296 blkid
= blkid
if blkid
else {}
297 udevadm
= udevadm
if udevadm
else {}
298 lv
= Factory(**lv
) if lv
else None
299 monkeypatch
.setattr("ceph_volume.sys_info.devices", {})
300 monkeypatch
.setattr("ceph_volume.util.device.disk.get_devices", lambda: devices
)
302 monkeypatch
.setattr("ceph_volume.util.device.lvm.get_single_lv", lambda filters
: lv
)
304 monkeypatch
.setattr("ceph_volume.util.device.lvm.get_device_lvs",
306 monkeypatch
.setattr("ceph_volume.util.device.disk.lsblk", lambda path
: lsblk
)
307 monkeypatch
.setattr("ceph_volume.util.device.disk.blkid", lambda path
: blkid
)
308 monkeypatch
.setattr("ceph_volume.util.disk.udevadm_property", lambda *a
, **kw
: udevadm
)
311 @pytest.fixture(params
=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.999, 1.0])
312 def data_allocate_fraction(request
):
316 def fake_filesystem(fs
):
318 fs
.create_dir('/sys/block/sda/slaves')
319 fs
.create_dir('/sys/block/sda/queue')
320 fs
.create_dir('/sys/block/rbd0')