]> git.proxmox.com Git - ceph.git/blob - ceph/src/ceph-volume/ceph_volume/tests/conftest.py
import ceph quincy 17.2.4
[ceph.git] / ceph / src / ceph-volume / ceph_volume / tests / conftest.py
1 import os
2 import pytest
3 from mock.mock import patch, PropertyMock, create_autospec
4 from ceph_volume.api import lvm
5 from ceph_volume.util import disk
6 from ceph_volume.util import device
7 from ceph_volume.util.constants import ceph_disk_guids
8 from ceph_volume import conf, configuration
9
10
11 class Capture(object):
12
13 def __init__(self, *a, **kw):
14 self.a = a
15 self.kw = kw
16 self.calls = []
17 self.return_values = kw.get('return_values', False)
18 self.always_returns = kw.get('always_returns', False)
19
20 def __call__(self, *a, **kw):
21 self.calls.append({'args': a, 'kwargs': kw})
22 if self.always_returns:
23 return self.always_returns
24 if self.return_values:
25 return self.return_values.pop()
26
27
28 class Factory(object):
29
30 def __init__(self, **kw):
31 for k, v in kw.items():
32 setattr(self, k, v)
33
34
35 @pytest.fixture
36 def factory():
37 return Factory
38
39
40 @pytest.fixture
41 def capture():
42 return Capture()
43
44 @pytest.fixture
45 def mock_lv_device_generator():
46 def mock_lv():
47 size = 21474836480
48 dev = create_autospec(device.Device)
49 dev.lv_name = 'lv'
50 dev.vg_name = 'vg'
51 dev.path = '{}/{}'.format(dev.vg_name, dev.lv_name)
52 dev.used_by_ceph = False
53 dev.vg_size = [size]
54 dev.vg_free = dev.vg_size
55 dev.available_lvm = True
56 dev.is_device = False
57 dev.lvs = [lvm.Volume(vg_name=dev.vg_name, lv_name=dev.lv_name, lv_size=size, lv_tags='')]
58 return dev
59 return mock_lv
60
61 def mock_device():
62 dev = create_autospec(device.Device)
63 dev.path = '/dev/foo'
64 dev.vg_name = 'vg_foo'
65 dev.lv_name = 'lv_foo'
66 dev.symlink = None
67 dev.vgs = [lvm.VolumeGroup(vg_name=dev.vg_name, lv_name=dev.lv_name)]
68 dev.available_lvm = True
69 dev.vg_size = [21474836480]
70 dev.vg_free = dev.vg_size
71 dev.lvs = []
72 return dev
73
74 @pytest.fixture(params=range(1,3))
75 def mock_devices_available(request):
76 ret = []
77 for _ in range(request.param):
78 ret.append(mock_device())
79 return ret
80
81 @pytest.fixture
82 def mock_device_generator():
83 return mock_device
84
85
86 @pytest.fixture(params=range(1,11))
87 def osds_per_device(request):
88 return request.param
89
90
91 @pytest.fixture
92 def fake_run(monkeypatch):
93 fake_run = Capture()
94 monkeypatch.setattr('ceph_volume.process.run', fake_run)
95 return fake_run
96
97
98 @pytest.fixture
99 def fake_call(monkeypatch):
100 fake_call = Capture(always_returns=([], [], 0))
101 monkeypatch.setattr('ceph_volume.process.call', fake_call)
102 return fake_call
103
104
105 @pytest.fixture
106 def fakedevice(factory):
107 def apply(**kw):
108 params = dict(
109 path='/dev/sda',
110 abspath='/dev/sda',
111 lv_api=None,
112 pvs_api=[],
113 disk_api={},
114 sys_api={},
115 exists=True,
116 is_lvm_member=True,
117 )
118 params.update(dict(kw))
119 params['lvm_size'] = disk.Size(b=params['sys_api'].get("size", 0))
120 return factory(**params)
121 return apply
122
123
124 @pytest.fixture
125 def stub_call(monkeypatch):
126 """
127 Monkeypatches process.call, so that a caller can add behavior to the response
128 """
129 def apply(return_values):
130 if isinstance(return_values, tuple):
131 return_values = [return_values]
132 stubbed_call = Capture(return_values=return_values)
133 monkeypatch.setattr('ceph_volume.process.call', stubbed_call)
134 return stubbed_call
135
136 return apply
137
138
139 @pytest.fixture(autouse=True)
140 def reset_cluster_name(request, monkeypatch):
141 """
142 The globally available ``ceph_volume.conf.cluster`` might get mangled in
143 tests, make sure that after evert test, it gets reset, preventing pollution
144 going into other tests later.
145 """
146 def fin():
147 conf.cluster = None
148 try:
149 os.environ.pop('CEPH_CONF')
150 except KeyError:
151 pass
152 request.addfinalizer(fin)
153
154
155 @pytest.fixture
156 def conf_ceph(monkeypatch):
157 """
158 Monkeypatches ceph_volume.conf.ceph, which is meant to parse/read
159 a ceph.conf. The patching is naive, it allows one to set return values for
160 specific method calls.
161 """
162 def apply(**kw):
163 stub = Factory(**kw)
164 monkeypatch.setattr(conf, 'ceph', stub)
165 return stub
166 return apply
167
168
169 @pytest.fixture
170 def conf_ceph_stub(monkeypatch, tmpfile):
171 """
172 Monkeypatches ceph_volume.conf.ceph with contents from a string that are
173 written to a temporary file and then is fed through the same ceph.conf
174 loading mechanisms for testing. Unlike ``conf_ceph`` which is just a fake,
175 we are actually loading values as seen on a ceph.conf file
176
177 This is useful when more complex ceph.conf's are needed. In the case of
178 just trying to validate a key/value behavior ``conf_ceph`` is better
179 suited.
180 """
181 def apply(contents):
182 conf_path = tmpfile(contents=contents)
183 parser = configuration.load(conf_path)
184 monkeypatch.setattr(conf, 'ceph', parser)
185 return parser
186 return apply
187
188
189 @pytest.fixture
190 def is_root(monkeypatch):
191 """
192 Patch ``os.getuid()`` so that ceph-volume's decorators that ensure a user
193 is root (or is sudoing to superuser) can continue as-is
194 """
195 monkeypatch.setattr('os.getuid', lambda: 0)
196
197
198 @pytest.fixture
199 def tmpfile(tmpdir):
200 """
201 Create a temporary file, optionally filling it with contents, returns an
202 absolute path to the file when called
203 """
204 def generate_file(name='file', contents='', directory=None):
205 directory = directory or str(tmpdir)
206 path = os.path.join(directory, name)
207 with open(path, 'w') as fp:
208 fp.write(contents)
209 return path
210 return generate_file
211
212
213 @pytest.fixture
214 def disable_kernel_queries(monkeypatch):
215 '''
216 This speeds up calls to Device and Disk
217 '''
218 monkeypatch.setattr("ceph_volume.util.device.disk.get_devices", lambda: {})
219 monkeypatch.setattr("ceph_volume.util.disk.udevadm_property", lambda *a, **kw: {})
220
221
222 @pytest.fixture(params=[
223 '', 'ceph data', 'ceph journal', 'ceph block',
224 'ceph block.wal', 'ceph block.db', 'ceph lockbox'])
225 def ceph_partlabel(request):
226 return request.param
227
228
229 @pytest.fixture(params=list(ceph_disk_guids.keys()))
230 def ceph_parttype(request):
231 return request.param
232
233
234 @pytest.fixture
235 def lsblk_ceph_disk_member(monkeypatch, request, ceph_partlabel, ceph_parttype):
236 monkeypatch.setattr("ceph_volume.util.device.disk.lsblk",
237 lambda path: {'TYPE': 'disk',
238 'NAME': 'sda',
239 'PARTLABEL': ceph_partlabel,
240 'PARTTYPE': ceph_parttype})
241 monkeypatch.setattr("ceph_volume.util.device.disk.lsblk_all",
242 lambda: [{'TYPE': 'disk',
243 'NAME': 'sda',
244 'PARTLABEL': ceph_partlabel,
245 'PARTTYPE': ceph_parttype}])
246
247 @pytest.fixture
248 def blkid_ceph_disk_member(monkeypatch, request, ceph_partlabel, ceph_parttype):
249 monkeypatch.setattr("ceph_volume.util.device.disk.blkid",
250 lambda path: {'TYPE': 'disk',
251 'PARTLABEL': ceph_partlabel,
252 'PARTTYPE': ceph_parttype})
253
254
255 @pytest.fixture(params=[
256 ('gluster partition', 'gluster partition'),
257 # falls back to blkid
258 ('', 'gluster partition'),
259 ('gluster partition', ''),
260 ])
261 def device_info_not_ceph_disk_member(monkeypatch, request):
262 monkeypatch.setattr("ceph_volume.util.device.disk.lsblk",
263 lambda path: {'TYPE': 'disk',
264 'NAME': 'sda',
265 'PARTLABEL': request.param[0]})
266 monkeypatch.setattr("ceph_volume.util.device.disk.lsblk_all",
267 lambda: [{'TYPE': 'disk',
268 'NAME': 'sda',
269 'PARTLABEL': request.param[0]}])
270 monkeypatch.setattr("ceph_volume.util.device.disk.blkid",
271 lambda path: {'TYPE': 'disk',
272 'PARTLABEL': request.param[1]})
273
274 @pytest.fixture
275 def patched_get_block_devs_sysfs():
276 with patch('ceph_volume.util.disk.get_block_devs_sysfs') as p:
277 yield p
278
279 @pytest.fixture
280 def patch_bluestore_label():
281 with patch('ceph_volume.util.device.Device.has_bluestore_label',
282 new_callable=PropertyMock) as p:
283 p.return_value = False
284 yield p
285
286 @pytest.fixture
287 def device_info(monkeypatch, patch_bluestore_label):
288 def apply(devices=None, lsblk=None, lv=None, blkid=None, udevadm=None,
289 has_bluestore_label=False):
290 if devices:
291 for dev in devices.keys():
292 devices[dev]['device_nodes'] = os.path.basename(dev)
293 else:
294 devices = {}
295 lsblk = lsblk if lsblk else {}
296 blkid = blkid if blkid else {}
297 udevadm = udevadm if udevadm else {}
298 lv = Factory(**lv) if lv else None
299 monkeypatch.setattr("ceph_volume.sys_info.devices", {})
300 monkeypatch.setattr("ceph_volume.util.device.disk.get_devices", lambda: devices)
301 if not devices:
302 monkeypatch.setattr("ceph_volume.util.device.lvm.get_single_lv", lambda filters: lv)
303 else:
304 monkeypatch.setattr("ceph_volume.util.device.lvm.get_device_lvs",
305 lambda path: [lv])
306 monkeypatch.setattr("ceph_volume.util.device.disk.lsblk", lambda path: lsblk)
307 monkeypatch.setattr("ceph_volume.util.device.disk.blkid", lambda path: blkid)
308 monkeypatch.setattr("ceph_volume.util.disk.udevadm_property", lambda *a, **kw: udevadm)
309 return apply
310
311 @pytest.fixture(params=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.999, 1.0])
312 def data_allocate_fraction(request):
313 return request.param
314
315 @pytest.fixture
316 def fake_filesystem(fs):
317
318 fs.create_dir('/sys/block/sda/slaves')
319 fs.create_dir('/sys/block/sda/queue')
320 fs.create_dir('/sys/block/rbd0')
321 yield fs