]> git.proxmox.com Git - ceph.git/blame - ceph/qa/workunits/ceph-disk/ceph-disk-test.py
update sources to v12.1.0
[ceph.git] / ceph / qa / workunits / ceph-disk / ceph-disk-test.py
CommitLineData
7c673cae
FG
1#
2# Copyright (C) 2015, 2016 Red Hat <contact@redhat.com>
3#
4# Author: Loic Dachary <loic@dachary.org>
5#
6# This program is free software; you can redistribute it and/or modify
7# it under the terms of the GNU Library Public License as published by
8# the Free Software Foundation; either version 2, or (at your option)
9# any later version.
10#
11# This program is distributed in the hope that it will be useful,
12# but WITHOUT ANY WARRANTY; without even the implied warranty of
13# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14# GNU Library Public License for more details.
15#
16# When debugging these tests (must be root), here are a few useful commands:
17#
18# export PATH=.:..:$PATH
19# ceph-disk.sh # run once to prepare the environment as it would be by teuthology
20# ln -sf /home/ubuntu/ceph/src/ceph-disk/ceph_disk/main.py $(which ceph-disk)
21# ln -sf /home/ubuntu/ceph/udev/95-ceph-osd.rules /lib/udev/rules.d/95-ceph-osd.rules
22# ln -sf /home/ubuntu/ceph/systemd/ceph-disk@.service /usr/lib/systemd/system/ceph-disk@.service
23# ceph-disk.conf will be silently ignored if it is a symbolic link or a hard link /var/log/upstart for logs
24# cp /home/ubuntu/ceph/src/upstart/ceph-disk.conf /etc/init/ceph-disk.conf
25# id=3 ; ceph-disk deactivate --deactivate-by-id $id ; ceph-disk destroy --zap --destroy-by-id $id
26# py.test -s -v -k test_activate_dmcrypt_luks ceph-disk-test.py
27#
28# CentOS 7
29# udevadm monitor --property & tail -f /var/log/messages
30# udev rules messages are logged in /var/log/messages
31# systemctl stop ceph-osd@2
32# systemctl start ceph-osd@2
33#
34# udevadm monitor --property & tail -f /var/log/syslog /var/log/upstart/* # on Ubuntu 14.04
35# udevadm test --action=add /block/vdb/vdb1 # verify the udev rule is run as expected
36# udevadm control --reload # when changing the udev rules
37# sudo /usr/sbin/ceph-disk -v trigger /dev/vdb1 # activates if vdb1 is data
38#
39# integration tests coverage
40# pip install coverage
41# perl -pi -e 's|"ceph-disk |"coverage run --source=/usr/sbin/ceph-disk --append /usr/sbin/ceph-disk |' ceph-disk-test.py
42# rm -f .coverage ; py.test -s -v ceph-disk-test.py
43# coverage report --show-missing
44#
45import argparse
46import json
47import logging
48import configobj
49import os
50import pytest
51import re
52import subprocess
53import sys
54import tempfile
55import time
56import uuid
57
58LOG = logging.getLogger('CephDisk')
59
60
61class CephDisk:
62
63 def __init__(self):
64 self.conf = configobj.ConfigObj('/etc/ceph/ceph.conf')
65
66 def save_conf(self):
67 self.conf.write(open('/etc/ceph/ceph.conf', 'wb'))
68
69 @staticmethod
70 def helper(command):
71 command = "ceph-helpers-root.sh " + command
72 return CephDisk.sh(command)
73
74 @staticmethod
75 def sh(command):
76 LOG.debug(":sh: " + command)
77 proc = subprocess.Popen(
78 args=command,
79 stdout=subprocess.PIPE,
80 stderr=subprocess.STDOUT,
81 shell=True,
82 bufsize=1)
83 lines = []
84 with proc.stdout:
85 for line in iter(proc.stdout.readline, b''):
86 line = line.decode('utf-8')
87 if 'dangerous and experimental' in line:
88 LOG.debug('SKIP dangerous and experimental')
89 continue
90 lines.append(line)
91 LOG.debug(line.strip().encode('ascii', 'ignore'))
92 if proc.wait() != 0:
93 raise subprocess.CalledProcessError(
94 returncode=proc.returncode,
95 cmd=command
96 )
97 return "".join(lines)
98
99 def unused_disks(self, pattern='[vs]d.'):
100 names = [x for x in os.listdir("/sys/block") if re.match(pattern, x)]
101 if not names:
102 return []
103 disks = json.loads(
104 self.sh("ceph-disk list --format json " + " ".join(names)))
105 unused = []
106 for disk in disks:
107 if 'partitions' not in disk:
108 unused.append(disk['path'])
109 return unused
110
111 def ensure_sd(self):
112 LOG.debug(self.unused_disks('sd.'))
113 if self.unused_disks('sd.'):
114 return
115 modprobe = "modprobe scsi_debug vpd_use_hostno=0 add_host=1 dev_size_mb=200 ; udevadm settle"
116 try:
117 self.sh(modprobe)
118 except:
119 self.helper("install linux-image-extra-3.13.0-61-generic")
120 self.sh(modprobe)
121
122 def unload_scsi_debug(self):
123 self.sh("rmmod scsi_debug || true")
124
125 def get_lockbox(self):
126 disks = json.loads(self.sh("ceph-disk list --format json"))
127 for disk in disks:
128 if 'partitions' in disk:
129 for partition in disk['partitions']:
130 if partition.get('type') == 'lockbox':
131 return partition
132 raise Exception("no lockbox found " + str(disks))
133
134 def get_osd_partition(self, uuid):
135 disks = json.loads(self.sh("ceph-disk list --format json"))
136 for disk in disks:
137 if 'partitions' in disk:
138 for partition in disk['partitions']:
139 if partition.get('uuid') == uuid:
140 return partition
141 raise Exception("uuid = " + uuid + " not found in " + str(disks))
142
143 def get_journal_partition(self, uuid):
144 return self.get_space_partition('journal', uuid)
145
146 def get_block_partition(self, uuid):
147 return self.get_space_partition('block', uuid)
148
149 def get_blockdb_partition(self, uuid):
150 return self.get_space_partition('block.db', uuid)
151
152 def get_blockwal_partition(self, uuid):
153 return self.get_space_partition('block.wal', uuid)
154
155 def get_space_partition(self, name, uuid):
156 data_partition = self.get_osd_partition(uuid)
157 space_dev = data_partition[name + '_dev']
158 disks = json.loads(self.sh("ceph-disk list --format json"))
159 for disk in disks:
160 if 'partitions' in disk:
161 for partition in disk['partitions']:
162 if partition['path'] == space_dev:
163 if name + '_for' in partition:
164 assert partition[
165 name + '_for'] == data_partition['path']
166 return partition
167 raise Exception(
168 name + " for uuid = " + uuid + " not found in " + str(disks))
169
170 def destroy_osd(self, uuid):
171 id = self.sh("ceph osd create " + uuid).strip()
172 self.sh("""
173 set -xe
174 ceph-disk --verbose deactivate --deactivate-by-id {id}
175 ceph-disk --verbose destroy --destroy-by-id {id} --zap
176 """.format(id=id))
177
178 def deactivate_osd(self, uuid):
179 id = self.sh("ceph osd create " + uuid).strip()
180 self.sh("""
181 set -xe
182 ceph-disk --verbose deactivate --once --deactivate-by-id {id}
183 """.format(id=id))
184
185 @staticmethod
186 def osd_up_predicate(osds, uuid):
187 for osd in osds:
188 if osd['uuid'] == uuid and 'up' in osd['state']:
189 return True
190 return False
191
192 @staticmethod
193 def wait_for_osd_up(uuid):
194 CephDisk.wait_for_osd(uuid, CephDisk.osd_up_predicate, 'up')
195
196 @staticmethod
197 def osd_down_predicate(osds, uuid):
198 found = False
199 for osd in osds:
200 if osd['uuid'] == uuid:
201 found = True
202 if 'down' in osd['state'] or ['exists'] == osd['state']:
203 return True
204 return not found
205
206 @staticmethod
207 def wait_for_osd_down(uuid):
208 CephDisk.wait_for_osd(uuid, CephDisk.osd_down_predicate, 'down')
209
210 @staticmethod
211 def wait_for_osd(uuid, predicate, info):
212 LOG.info("wait_for_osd " + info + " " + uuid)
213 for delay in (1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024):
214 dump = json.loads(CephDisk.sh("ceph osd dump -f json"))
215 if predicate(dump['osds'], uuid):
216 return True
217 time.sleep(delay)
218 raise Exception('timeout waiting for osd ' + uuid + ' to be ' + info)
219
220 def check_osd_status(self, uuid, space_name=None):
221 data_partition = self.get_osd_partition(uuid)
222 assert data_partition['type'] == 'data'
223 assert data_partition['state'] == 'active'
224 if space_name is not None:
225 space_partition = self.get_space_partition(space_name, uuid)
226 assert space_partition
227
228
229class TestCephDisk(object):
230
231 def setup_class(self):
232 logging.basicConfig(level=logging.DEBUG)
233 c = CephDisk()
234 if c.sh("lsb_release -si").strip() == 'CentOS':
235 c.helper("install multipath-tools device-mapper-multipath")
236 c.conf['global']['pid file'] = '/var/run/ceph/$cluster-$name.pid'
237 #
238 # Avoid json parsing interference
239 #
240 c.conf['global']['debug monc'] = 0
241 #
242 # objecstore
243 #
244 c.conf['global']['osd journal size'] = 100
245 #
246 # bluestore
247 #
7c673cae
FG
248 c.conf['global']['bluestore fsck on mount'] = 'true'
249 c.save_conf()
250
251 def setup(self):
252 c = CephDisk()
253 for key in ('osd objectstore', 'osd dmcrypt type'):
254 if key in c.conf['global']:
255 del c.conf['global'][key]
256 c.save_conf()
257
258 def test_deactivate_reactivate_osd(self):
259 c = CephDisk()
260 disk = c.unused_disks()[0]
261 osd_uuid = str(uuid.uuid1())
262 c.sh("ceph-disk --verbose zap " + disk)
31f18b77 263 c.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid +
7c673cae
FG
264 " " + disk)
265 c.wait_for_osd_up(osd_uuid)
266 device = json.loads(c.sh("ceph-disk list --format json " + disk))[0]
267 assert len(device['partitions']) == 2
268 c.check_osd_status(osd_uuid, 'journal')
269 data_partition = c.get_osd_partition(osd_uuid)
270 c.sh("ceph-disk --verbose deactivate " + data_partition['path'])
271 c.wait_for_osd_down(osd_uuid)
272 c.sh("ceph-disk --verbose activate " + data_partition['path'] + " --reactivate")
273 # check again
274 c.wait_for_osd_up(osd_uuid)
275 device = json.loads(c.sh("ceph-disk list --format json " + disk))[0]
276 assert len(device['partitions']) == 2
277 c.check_osd_status(osd_uuid, 'journal')
278 c.helper("pool_read_write")
279 c.destroy_osd(osd_uuid)
280
281 def test_destroy_osd_by_id(self):
282 c = CephDisk()
283 disk = c.unused_disks()[0]
284 osd_uuid = str(uuid.uuid1())
31f18b77 285 c.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid + " " + disk)
7c673cae
FG
286 c.wait_for_osd_up(osd_uuid)
287 c.check_osd_status(osd_uuid)
288 c.destroy_osd(osd_uuid)
289
290 def test_destroy_osd_by_dev_path(self):
291 c = CephDisk()
292 disk = c.unused_disks()[0]
293 osd_uuid = str(uuid.uuid1())
31f18b77 294 c.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid + " " + disk)
7c673cae
FG
295 c.wait_for_osd_up(osd_uuid)
296 partition = c.get_osd_partition(osd_uuid)
297 assert partition['type'] == 'data'
298 assert partition['state'] == 'active'
299 c.sh("ceph-disk --verbose deactivate " + partition['path'])
300 c.wait_for_osd_down(osd_uuid)
301 c.sh("ceph-disk --verbose destroy " + partition['path'] + " --zap")
302
303 def test_deactivate_reactivate_dmcrypt_plain(self):
304 c = CephDisk()
305 c.conf['global']['osd dmcrypt type'] = 'plain'
306 c.save_conf()
307 osd_uuid = self.activate_dmcrypt('ceph-disk-no-lockbox')
308 data_partition = c.get_osd_partition(osd_uuid)
309 c.sh("ceph-disk --verbose deactivate " + data_partition['path'])
310 c.wait_for_osd_down(osd_uuid)
311 c.sh("ceph-disk --verbose activate-journal " + data_partition['journal_dev'] +
312 " --reactivate" + " --dmcrypt")
313 c.wait_for_osd_up(osd_uuid)
314 c.check_osd_status(osd_uuid, 'journal')
315 c.destroy_osd(osd_uuid)
316 c.save_conf()
317
318 def test_deactivate_reactivate_dmcrypt_luks(self):
319 c = CephDisk()
320 osd_uuid = self.activate_dmcrypt('ceph-disk')
321 data_partition = c.get_osd_partition(osd_uuid)
322 lockbox_partition = c.get_lockbox()
323 c.sh("ceph-disk --verbose deactivate " + data_partition['path'])
324 c.wait_for_osd_down(osd_uuid)
325 c.sh("ceph-disk --verbose trigger --sync " + lockbox_partition['path'])
326 c.sh("ceph-disk --verbose activate-journal " + data_partition['journal_dev'] +
327 " --reactivate" + " --dmcrypt")
328 c.wait_for_osd_up(osd_uuid)
329 c.check_osd_status(osd_uuid, 'journal')
330 c.destroy_osd(osd_uuid)
331
332 def test_activate_dmcrypt_plain_no_lockbox(self):
333 c = CephDisk()
334 c.conf['global']['osd dmcrypt type'] = 'plain'
335 c.save_conf()
336 osd_uuid = self.activate_dmcrypt('ceph-disk-no-lockbox')
337 c.destroy_osd(osd_uuid)
338 c.save_conf()
339
340 def test_activate_dmcrypt_luks_no_lockbox(self):
341 c = CephDisk()
342 osd_uuid = self.activate_dmcrypt('ceph-disk-no-lockbox')
343 c.destroy_osd(osd_uuid)
344
345 def test_activate_dmcrypt_luks_with_lockbox(self):
346 c = CephDisk()
347 osd_uuid = self.activate_dmcrypt('ceph-disk')
348 c.destroy_osd(osd_uuid)
349
350 def test_activate_lockbox(self):
351 c = CephDisk()
352 osd_uuid = self.activate_dmcrypt('ceph-disk')
353 lockbox = c.get_lockbox()
354 assert lockbox['state'] == 'active'
355 c.sh("umount " + lockbox['path'])
356 lockbox = c.get_lockbox()
357 assert lockbox['state'] == 'prepared'
358 c.sh("ceph-disk --verbose trigger " + lockbox['path'])
359 success = False
360 for delay in (1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024):
361 lockbox = c.get_lockbox()
362 if lockbox['state'] == 'active':
363 success = True
364 break
365 time.sleep(delay)
366 if not success:
367 raise Exception('timeout waiting for lockbox ' + lockbox['path'])
368 c.destroy_osd(osd_uuid)
369
370 def activate_dmcrypt(self, ceph_disk):
371 c = CephDisk()
372 disk = c.unused_disks()[0]
373 osd_uuid = str(uuid.uuid1())
374 journal_uuid = str(uuid.uuid1())
375 c.sh("ceph-disk --verbose zap " + disk)
31f18b77 376 c.sh(ceph_disk + " --verbose prepare --filestore " +
7c673cae
FG
377 " --osd-uuid " + osd_uuid +
378 " --journal-uuid " + journal_uuid +
379 " --dmcrypt " +
380 " " + disk)
381 c.wait_for_osd_up(osd_uuid)
382 c.check_osd_status(osd_uuid, 'journal')
383 return osd_uuid
384
385 def test_trigger_dmcrypt_journal_lockbox(self):
386 c = CephDisk()
387 osd_uuid = self.activate_dmcrypt('ceph-disk')
388 data_partition = c.get_osd_partition(osd_uuid)
389 lockbox_partition = c.get_lockbox()
390 c.deactivate_osd(osd_uuid)
391 c.wait_for_osd_down(osd_uuid)
392 with pytest.raises(subprocess.CalledProcessError):
393 # fails because the lockbox is not mounted yet
394 c.sh("ceph-disk --verbose trigger --sync " + data_partition['journal_dev'])
395 c.sh("ceph-disk --verbose trigger --sync " + lockbox_partition['path'])
396 c.wait_for_osd_up(osd_uuid)
397 c.destroy_osd(osd_uuid)
398
399 def test_trigger_dmcrypt_data_lockbox(self):
400 c = CephDisk()
401 osd_uuid = self.activate_dmcrypt('ceph-disk')
402 data_partition = c.get_osd_partition(osd_uuid)
403 lockbox_partition = c.get_lockbox()
404 c.deactivate_osd(osd_uuid)
405 c.wait_for_osd_down(osd_uuid)
406 with pytest.raises(subprocess.CalledProcessError):
407 # fails because the lockbox is not mounted yet
408 c.sh("ceph-disk --verbose trigger --sync " + data_partition['path'])
409 c.sh("ceph-disk --verbose trigger --sync " + lockbox_partition['path'])
410 c.wait_for_osd_up(osd_uuid)
411 c.destroy_osd(osd_uuid)
412
413 def test_trigger_dmcrypt_lockbox(self):
414 c = CephDisk()
415 osd_uuid = self.activate_dmcrypt('ceph-disk')
416 data_partition = c.get_osd_partition(osd_uuid)
417 lockbox_partition = c.get_lockbox()
418 c.deactivate_osd(osd_uuid)
419 c.wait_for_osd_down(osd_uuid)
420 c.sh("ceph-disk --verbose trigger --sync " + lockbox_partition['path'])
421 c.wait_for_osd_up(osd_uuid)
422 c.destroy_osd(osd_uuid)
423
424 def test_activate_no_journal(self):
425 c = CephDisk()
426 disk = c.unused_disks()[0]
427 osd_uuid = str(uuid.uuid1())
428 c.sh("ceph-disk --verbose zap " + disk)
429 c.conf['global']['osd objectstore'] = 'memstore'
430 c.save_conf()
31f18b77 431 c.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid +
7c673cae
FG
432 " " + disk)
433 c.wait_for_osd_up(osd_uuid)
434 device = json.loads(c.sh("ceph-disk list --format json " + disk))[0]
435 assert len(device['partitions']) == 1
436 partition = device['partitions'][0]
437 assert partition['type'] == 'data'
438 assert partition['state'] == 'active'
439 assert 'journal_dev' not in partition
440 c.helper("pool_read_write")
441 c.destroy_osd(osd_uuid)
442 c.save_conf()
443
444 def test_activate_with_journal_dev_no_symlink(self):
445 c = CephDisk()
446 disk = c.unused_disks()[0]
447 osd_uuid = str(uuid.uuid1())
448 c.sh("ceph-disk --verbose zap " + disk)
31f18b77 449 c.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid +
7c673cae
FG
450 " " + disk)
451 c.wait_for_osd_up(osd_uuid)
452 device = json.loads(c.sh("ceph-disk list --format json " + disk))[0]
453 assert len(device['partitions']) == 2
454 c.check_osd_status(osd_uuid, 'journal')
455 c.helper("pool_read_write")
456 c.destroy_osd(osd_uuid)
457
458 def test_activate_bluestore(self):
459 c = CephDisk()
460 disk = c.unused_disks()[0]
461 osd_uuid = str(uuid.uuid1())
462 c.sh("ceph-disk --verbose zap " + disk)
463 c.conf['global']['osd objectstore'] = 'bluestore'
464 c.save_conf()
465 c.sh("ceph-disk --verbose prepare --bluestore --osd-uuid " + osd_uuid +
466 " " + disk)
467 c.wait_for_osd_up(osd_uuid)
468 device = json.loads(c.sh("ceph-disk list --format json " + disk))[0]
469 assert len(device['partitions']) == 2
470 c.check_osd_status(osd_uuid, 'block')
471 c.helper("pool_read_write")
472 c.destroy_osd(osd_uuid)
473 c.sh("ceph-disk --verbose zap " + disk)
474
475 def test_activate_bluestore_seperated_block_db_wal(self):
476 c = CephDisk()
477 disk1 = c.unused_disks()[0]
478 disk2 = c.unused_disks()[1]
479 osd_uuid = str(uuid.uuid1())
480 c.sh("ceph-disk --verbose zap " + disk1 + " " + disk2)
481 c.conf['global']['osd objectstore'] = 'bluestore'
482 c.save_conf()
483 c.sh("ceph-disk --verbose prepare --bluestore --osd-uuid " + osd_uuid +
484 " " + disk1 + " --block.db " + disk2 + " --block.wal " + disk2)
485 c.wait_for_osd_up(osd_uuid)
486 device = json.loads(c.sh("ceph-disk list --format json " + disk1))[0]
487 assert len(device['partitions']) == 2
488 device = json.loads(c.sh("ceph-disk list --format json " + disk2))[0]
489 assert len(device['partitions']) == 2
490 c.check_osd_status(osd_uuid, 'block')
491 c.check_osd_status(osd_uuid, 'block.wal')
492 c.check_osd_status(osd_uuid, 'block.db')
493 c.helper("pool_read_write")
494 c.destroy_osd(osd_uuid)
495 c.sh("ceph-disk --verbose zap " + disk1 + " " + disk2)
496
497 def test_activate_bluestore_reuse_db_wal_partition(self):
498 c = CephDisk()
499 disks = c.unused_disks()
500 block_disk = disks[0]
501 db_wal_disk = disks[1]
502 #
503 # Create an OSD with two disks (one for block,
504 # the other for block.db and block.wal ) and then destroy osd.
505 #
506 osd_uuid1 = str(uuid.uuid1())
507 c.sh("ceph-disk --verbose zap " + block_disk + " " + db_wal_disk)
508 c.conf['global']['osd objectstore'] = 'bluestore'
509 c.save_conf()
510 c.sh("ceph-disk --verbose prepare --bluestore --osd-uuid " +
511 osd_uuid1 + " " + block_disk + " --block.db " + db_wal_disk +
512 " --block.wal " + db_wal_disk)
513 c.wait_for_osd_up(osd_uuid1)
514 blockdb_partition = c.get_blockdb_partition(osd_uuid1)
515 blockdb_path = blockdb_partition['path']
516 blockwal_partition = c.get_blockwal_partition(osd_uuid1)
517 blockwal_path = blockwal_partition['path']
518 c.destroy_osd(osd_uuid1)
519 c.sh("ceph-disk --verbose zap " + block_disk)
520 #
521 # Create another OSD with the block.db and block.wal partition
522 # of the previous OSD
523 #
524 osd_uuid2 = str(uuid.uuid1())
525 c.sh("ceph-disk --verbose prepare --bluestore --osd-uuid " +
526 osd_uuid2 + " " + block_disk + " --block.db " + blockdb_path +
527 " --block.wal " + blockwal_path)
528 c.wait_for_osd_up(osd_uuid2)
529 device = json.loads(c.sh("ceph-disk list --format json " + block_disk))[0]
530 assert len(device['partitions']) == 2
531 device = json.loads(c.sh("ceph-disk list --format json " + db_wal_disk))[0]
532 assert len(device['partitions']) == 2
533 c.check_osd_status(osd_uuid2, 'block')
534 c.check_osd_status(osd_uuid2, 'block.wal')
535 c.check_osd_status(osd_uuid2, 'block.db')
536 blockdb_partition = c.get_blockdb_partition(osd_uuid2)
537 blockwal_partition = c.get_blockwal_partition(osd_uuid2)
538 #
539 # Verify the previous OSD partition has been reused
540 #
541 assert blockdb_partition['path'] == blockdb_path
542 assert blockwal_partition['path'] == blockwal_path
543 c.destroy_osd(osd_uuid2)
544 c.sh("ceph-disk --verbose zap " + block_disk + " " + db_wal_disk)
545
546 def test_activate_with_journal_dev_is_symlink(self):
547 c = CephDisk()
548 disk = c.unused_disks()[0]
549 osd_uuid = str(uuid.uuid1())
550 tempdir = tempfile.mkdtemp()
551 symlink = os.path.join(tempdir, 'osd')
552 os.symlink(disk, symlink)
553 c.sh("ceph-disk --verbose zap " + symlink)
31f18b77 554 c.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid +
7c673cae
FG
555 " " + symlink)
556 c.wait_for_osd_up(osd_uuid)
557 device = json.loads(c.sh("ceph-disk list --format json " + symlink))[0]
558 assert len(device['partitions']) == 2
559 data_partition = c.get_osd_partition(osd_uuid)
560 assert data_partition['type'] == 'data'
561 assert data_partition['state'] == 'active'
562 journal_partition = c.get_journal_partition(osd_uuid)
563 assert journal_partition
564 c.helper("pool_read_write")
565 c.destroy_osd(osd_uuid)
566 c.sh("ceph-disk --verbose zap " + symlink)
567 os.unlink(symlink)
568 os.rmdir(tempdir)
569
570 def test_activate_journal_file(self):
571 c = CephDisk()
572 disks = c.unused_disks()
573 data_disk = disks[0]
574 #
575 # /var/lib/ceph/osd is required otherwise it may violate
576 # restrictions enforced by systemd regarding the directories
577 # which ceph-osd is allowed to read/write
578 #
579 tempdir = tempfile.mkdtemp(dir='/var/lib/ceph/osd')
580 c.sh("chown ceph:ceph " + tempdir + " || true")
581 journal_file = os.path.join(tempdir, 'journal')
582 osd_uuid = str(uuid.uuid1())
31f18b77 583 c.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid +
7c673cae
FG
584 " " + data_disk + " " + journal_file)
585 c.wait_for_osd_up(osd_uuid)
586 device = json.loads(
587 c.sh("ceph-disk list --format json " + data_disk))[0]
588 assert len(device['partitions']) == 1
589 partition = device['partitions'][0]
590 assert journal_file == os.readlink(
591 os.path.join(partition['mount'], 'journal'))
592 c.check_osd_status(osd_uuid)
593 c.helper("pool_read_write 1") # 1 == pool size
594 c.destroy_osd(osd_uuid)
595 c.sh("ceph-disk --verbose zap " + data_disk)
596 os.unlink(journal_file)
597 os.rmdir(tempdir)
598
599 def test_activate_separated_journal(self):
600 c = CephDisk()
601 disks = c.unused_disks()
602 data_disk = disks[0]
603 journal_disk = disks[1]
604 osd_uuid = self.activate_separated_journal(data_disk, journal_disk)
605 c.helper("pool_read_write 1") # 1 == pool size
606 c.destroy_osd(osd_uuid)
607 c.sh("ceph-disk --verbose zap " + data_disk + " " + journal_disk)
608
609 def test_activate_separated_journal_dev_is_symlink(self):
610 c = CephDisk()
611 disks = c.unused_disks()
612 data_disk = disks[0]
613 journal_disk = disks[1]
614 tempdir = tempfile.mkdtemp()
615 data_symlink = os.path.join(tempdir, 'osd')
616 os.symlink(data_disk, data_symlink)
617 journal_symlink = os.path.join(tempdir, 'journal')
618 os.symlink(journal_disk, journal_symlink)
619 osd_uuid = self.activate_separated_journal(
620 data_symlink, journal_symlink)
621 c.helper("pool_read_write 1") # 1 == pool size
622 c.destroy_osd(osd_uuid)
623 c.sh("ceph-disk --verbose zap " + data_symlink + " " + journal_symlink)
624 os.unlink(data_symlink)
625 os.unlink(journal_symlink)
626 os.rmdir(tempdir)
627
628 def activate_separated_journal(self, data_disk, journal_disk):
629 c = CephDisk()
630 osd_uuid = str(uuid.uuid1())
31f18b77 631 c.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid +
7c673cae
FG
632 " " + data_disk + " " + journal_disk)
633 c.wait_for_osd_up(osd_uuid)
634 device = json.loads(
635 c.sh("ceph-disk list --format json " + data_disk))[0]
636 assert len(device['partitions']) == 1
637 c.check_osd_status(osd_uuid, 'journal')
638 return osd_uuid
639
640 #
641 # Create an OSD and get a journal partition from a disk that
642 # already contains a journal partition which is in use. Updates of
643 # the kernel partition table may behave differently when a
644 # partition is in use. See http://tracker.ceph.com/issues/7334 for
645 # more information.
646 #
647 def test_activate_two_separated_journal(self):
648 c = CephDisk()
649 disks = c.unused_disks()
650 data_disk = disks[0]
651 other_data_disk = disks[1]
652 journal_disk = disks[2]
653 osd_uuid = self.activate_separated_journal(data_disk, journal_disk)
654 other_osd_uuid = self.activate_separated_journal(
655 other_data_disk, journal_disk)
656 #
657 # read/write can only succeed if the two osds are up because
658 # the pool needs two OSD
659 #
660 c.helper("pool_read_write 2") # 2 == pool size
661 c.destroy_osd(osd_uuid)
662 c.destroy_osd(other_osd_uuid)
663 c.sh("ceph-disk --verbose zap " + data_disk + " " +
664 journal_disk + " " + other_data_disk)
665
666 #
667 # Create an OSD and reuse an existing journal partition
668 #
669 def test_activate_reuse_journal(self):
670 c = CephDisk()
671 disks = c.unused_disks()
672 data_disk = disks[0]
673 journal_disk = disks[1]
674 #
675 # Create an OSD with a separated journal and destroy it.
676 #
677 osd_uuid = self.activate_separated_journal(data_disk, journal_disk)
678 journal_partition = c.get_journal_partition(osd_uuid)
679 journal_path = journal_partition['path']
680 c.destroy_osd(osd_uuid)
681 c.sh("ceph-disk --verbose zap " + data_disk)
682 osd_uuid = str(uuid.uuid1())
683 #
684 # Create another OSD with the journal partition of the previous OSD
685 #
31f18b77 686 c.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid +
7c673cae
FG
687 " " + data_disk + " " + journal_path)
688 c.helper("pool_read_write 1") # 1 == pool size
689 c.wait_for_osd_up(osd_uuid)
690 device = json.loads(
691 c.sh("ceph-disk list --format json " + data_disk))[0]
692 assert len(device['partitions']) == 1
693 c.check_osd_status(osd_uuid)
694 journal_partition = c.get_journal_partition(osd_uuid)
695 #
696 # Verify the previous OSD partition has been reused
697 #
698 assert journal_partition['path'] == journal_path
699 c.destroy_osd(osd_uuid)
700 c.sh("ceph-disk --verbose zap " + data_disk + " " + journal_disk)
701
702 def test_activate_multipath(self):
703 c = CephDisk()
704 if c.sh("lsb_release -si").strip() != 'CentOS':
705 pytest.skip(
706 "see issue https://bugs.launchpad.net/ubuntu/+source/multipath-tools/+bug/1488688")
707 c.ensure_sd()
708 #
709 # Figure out the name of the multipath device
710 #
711 disk = c.unused_disks('sd.')[0]
712 c.sh("mpathconf --enable || true")
713 c.sh("multipath " + disk)
714 holders = os.listdir(
715 "/sys/block/" + os.path.basename(disk) + "/holders")
716 assert 1 == len(holders)
717 name = open("/sys/block/" + holders[0] + "/dm/name").read()
718 multipath = "/dev/mapper/" + name
719 #
720 # Prepare the multipath device
721 #
722 osd_uuid = str(uuid.uuid1())
723 c.sh("ceph-disk --verbose zap " + multipath)
31f18b77 724 c.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid +
7c673cae
FG
725 " " + multipath)
726 c.wait_for_osd_up(osd_uuid)
727 device = json.loads(
728 c.sh("ceph-disk list --format json " + multipath))[0]
729 assert len(device['partitions']) == 2
730 data_partition = c.get_osd_partition(osd_uuid)
731 assert data_partition['type'] == 'data'
732 assert data_partition['state'] == 'active'
733 journal_partition = c.get_journal_partition(osd_uuid)
734 assert journal_partition
735 c.helper("pool_read_write")
736 c.destroy_osd(osd_uuid)
737 c.sh("udevadm settle")
738 c.sh("multipath -F")
739 c.unload_scsi_debug()
740
741
742class CephDiskTest(CephDisk):
743
744 def main(self, argv):
745 parser = argparse.ArgumentParser(
746 'ceph-disk-test',
747 )
748 parser.add_argument(
749 '-v', '--verbose',
750 action='store_true', default=None,
751 help='be more verbose',
752 )
753 parser.add_argument(
754 '--destroy-osd',
755 help='stop, umount and destroy',
756 )
757 args = parser.parse_args(argv)
758
759 if args.verbose:
760 logging.basicConfig(level=logging.DEBUG)
761
762 if args.destroy_osd:
763 dump = json.loads(CephDisk.sh("ceph osd dump -f json"))
764 osd_uuid = None
765 for osd in dump['osds']:
766 if str(osd['osd']) == args.destroy_osd:
767 osd_uuid = osd['uuid']
768 if osd_uuid:
769 self.destroy_osd(osd_uuid)
770 else:
771 raise Exception("cannot find OSD " + args.destroy_osd +
772 " ceph osd dump -f json")
773 return
774
775if __name__ == '__main__':
776 sys.exit(CephDiskTest().main(sys.argv[1:]))