]>
git.proxmox.com Git - ceph.git/blob - ceph/qa/workunits/ceph-disk/ceph-disk-test.py
b80e1da897128e1e5628dd822030f1a79faa9287
2 # Copyright (C) 2015, 2016 Red Hat <contact@redhat.com>
4 # Author: Loic Dachary <loic@dachary.org>
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU Library Public License as published by
8 # the Free Software Foundation; either version 2, or (at your option)
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU Library Public License for more details.
16 # When debugging these tests (must be root), here are a few useful commands:
18 # export PATH=.:..:$PATH
19 # ceph-disk.sh # run once to prepare the environment as it would be by teuthology
20 # ln -sf /home/ubuntu/ceph/src/ceph-disk/ceph_disk/main.py $(which ceph-disk)
21 # ln -sf /home/ubuntu/ceph/udev/95-ceph-osd.rules /lib/udev/rules.d/95-ceph-osd.rules
22 # ln -sf /home/ubuntu/ceph/systemd/ceph-disk@.service /usr/lib/systemd/system/ceph-disk@.service
23 # ceph-disk.conf will be silently ignored if it is a symbolic link or a hard link /var/log/upstart for logs
24 # cp /home/ubuntu/ceph/src/upstart/ceph-disk.conf /etc/init/ceph-disk.conf
25 # id=3 ; ceph-disk deactivate --deactivate-by-id $id ; ceph-disk destroy --zap --destroy-by-id $id
26 # py.test -s -v -k test_activate_dmcrypt_luks ceph-disk-test.py
29 # udevadm monitor --property & tail -f /var/log/messages
30 # udev rules messages are logged in /var/log/messages
31 # systemctl stop ceph-osd@2
32 # systemctl start ceph-osd@2
34 # udevadm monitor --property & tail -f /var/log/syslog /var/log/upstart/* # on Ubuntu 14.04
35 # udevadm test --action=add /block/vdb/vdb1 # verify the udev rule is run as expected
36 # udevadm control --reload # when changing the udev rules
37 # sudo /usr/sbin/ceph-disk -v trigger /dev/vdb1 # activates if vdb1 is data
39 # integration tests coverage
40 # pip install coverage
41 # perl -pi -e 's|"ceph-disk |"coverage run --source=/usr/sbin/ceph-disk --append /usr/sbin/ceph-disk |' ceph-disk-test.py
42 # rm -f .coverage ; py.test -s -v ceph-disk-test.py
43 # coverage report --show-missing
58 LOG
= logging
.getLogger('CephDisk')
64 self
.conf
= configobj
.ConfigObj('/etc/ceph/ceph.conf')
67 self
.conf
.write(open('/etc/ceph/ceph.conf', 'wb'))
71 command
= "ceph-helpers-root.sh " + command
72 return CephDisk
.sh(command
)
76 LOG
.debug(":sh: " + command
)
77 proc
= subprocess
.Popen(
79 stdout
=subprocess
.PIPE
,
80 stderr
=subprocess
.STDOUT
,
85 for line
in iter(proc
.stdout
.readline
, b
''):
86 line
= line
.decode('utf-8')
87 if 'dangerous and experimental' in line
:
88 LOG
.debug('SKIP dangerous and experimental')
91 LOG
.debug(line
.strip().encode('ascii', 'ignore'))
93 raise subprocess
.CalledProcessError(
94 returncode
=proc
.returncode
,
99 def unused_disks(self
, pattern
='[vs]d.'):
100 names
= [x
for x
in os
.listdir("/sys/block") if re
.match(pattern
, x
)]
104 self
.sh("ceph-disk list --format json " + " ".join(names
)))
107 if 'partitions' not in disk
:
108 unused
.append(disk
['path'])
112 LOG
.debug(self
.unused_disks('sd.'))
113 if self
.unused_disks('sd.'):
115 modprobe
= "modprobe scsi_debug vpd_use_hostno=0 add_host=1 dev_size_mb=200 ; udevadm settle"
119 self
.helper("install linux-image-extra-3.13.0-61-generic")
122 def unload_scsi_debug(self
):
123 self
.sh("rmmod scsi_debug || true")
125 def get_lockbox(self
):
126 disks
= json
.loads(self
.sh("ceph-disk list --format json"))
128 if 'partitions' in disk
:
129 for partition
in disk
['partitions']:
130 if partition
.get('type') == 'lockbox':
132 raise Exception("no lockbox found " + str(disks
))
134 def get_osd_partition(self
, uuid
):
135 disks
= json
.loads(self
.sh("ceph-disk list --format json"))
137 if 'partitions' in disk
:
138 for partition
in disk
['partitions']:
139 if partition
.get('uuid') == uuid
:
141 raise Exception("uuid = " + uuid
+ " not found in " + str(disks
))
143 def get_journal_partition(self
, uuid
):
144 return self
.get_space_partition('journal', uuid
)
146 def get_block_partition(self
, uuid
):
147 return self
.get_space_partition('block', uuid
)
149 def get_blockdb_partition(self
, uuid
):
150 return self
.get_space_partition('block.db', uuid
)
152 def get_blockwal_partition(self
, uuid
):
153 return self
.get_space_partition('block.wal', uuid
)
155 def get_space_partition(self
, name
, uuid
):
156 data_partition
= self
.get_osd_partition(uuid
)
157 space_dev
= data_partition
[name
+ '_dev']
158 disks
= json
.loads(self
.sh("ceph-disk list --format json"))
160 if 'partitions' in disk
:
161 for partition
in disk
['partitions']:
162 if partition
['path'] == space_dev
:
163 if name
+ '_for' in partition
:
165 name
+ '_for'] == data_partition
['path']
168 name
+ " for uuid = " + uuid
+ " not found in " + str(disks
))
170 def destroy_osd(self
, uuid
):
171 id = self
.sh("ceph osd create " + uuid
).strip()
174 ceph-disk --verbose deactivate --deactivate-by-id {id}
175 ceph-disk --verbose destroy --destroy-by-id {id} --zap
178 def deactivate_osd(self
, uuid
):
179 id = self
.sh("ceph osd create " + uuid
).strip()
182 ceph-disk --verbose deactivate --once --deactivate-by-id {id}
186 def osd_up_predicate(osds
, uuid
):
188 if osd
['uuid'] == uuid
and 'up' in osd
['state']:
193 def wait_for_osd_up(uuid
):
194 CephDisk
.wait_for_osd(uuid
, CephDisk
.osd_up_predicate
, 'up')
197 def osd_down_predicate(osds
, uuid
):
200 if osd
['uuid'] == uuid
:
202 if 'down' in osd
['state'] or ['exists'] == osd
['state']:
207 def wait_for_osd_down(uuid
):
208 CephDisk
.wait_for_osd(uuid
, CephDisk
.osd_down_predicate
, 'down')
211 def wait_for_osd(uuid
, predicate
, info
):
212 LOG
.info("wait_for_osd " + info
+ " " + uuid
)
213 for delay
in (1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024):
214 dump
= json
.loads(CephDisk
.sh("ceph osd dump -f json"))
215 if predicate(dump
['osds'], uuid
):
218 raise Exception('timeout waiting for osd ' + uuid
+ ' to be ' + info
)
220 def check_osd_status(self
, uuid
, space_name
=None):
221 data_partition
= self
.get_osd_partition(uuid
)
222 assert data_partition
['type'] == 'data'
223 assert data_partition
['state'] == 'active'
224 if space_name
is not None:
225 space_partition
= self
.get_space_partition(space_name
, uuid
)
226 assert space_partition
229 class TestCephDisk(object):
231 def setup_class(self
):
232 logging
.basicConfig(level
=logging
.DEBUG
)
234 if c
.sh("lsb_release -si").strip() == 'CentOS':
235 c
.helper("install multipath-tools device-mapper-multipath")
236 c
.conf
['global']['pid file'] = '/var/run/ceph/$cluster-$name.pid'
238 # Avoid json parsing interference
240 c
.conf
['global']['debug monc'] = 0
244 c
.conf
['global']['osd journal size'] = 100
248 c
.conf
['global']['bluestore fsck on mount'] = 'true'
253 for key
in ('osd objectstore', 'osd dmcrypt type'):
254 if key
in c
.conf
['global']:
255 del c
.conf
['global'][key
]
258 def test_deactivate_reactivate_osd(self
):
260 disk
= c
.unused_disks()[0]
261 osd_uuid
= str(uuid
.uuid1())
262 c
.sh("ceph-disk --verbose zap " + disk
)
263 c
.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid
+
265 c
.wait_for_osd_up(osd_uuid
)
266 device
= json
.loads(c
.sh("ceph-disk list --format json " + disk
))[0]
267 assert len(device
['partitions']) == 2
268 c
.check_osd_status(osd_uuid
, 'journal')
269 data_partition
= c
.get_osd_partition(osd_uuid
)
270 c
.sh("ceph-disk --verbose deactivate " + data_partition
['path'])
271 c
.wait_for_osd_down(osd_uuid
)
272 c
.sh("ceph-disk --verbose activate " + data_partition
['path'] + " --reactivate")
274 c
.wait_for_osd_up(osd_uuid
)
275 device
= json
.loads(c
.sh("ceph-disk list --format json " + disk
))[0]
276 assert len(device
['partitions']) == 2
277 c
.check_osd_status(osd_uuid
, 'journal')
278 c
.helper("pool_read_write")
279 c
.destroy_osd(osd_uuid
)
281 def test_destroy_osd_by_id(self
):
283 disk
= c
.unused_disks()[0]
284 osd_uuid
= str(uuid
.uuid1())
285 c
.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid
+ " " + disk
)
286 c
.wait_for_osd_up(osd_uuid
)
287 c
.check_osd_status(osd_uuid
)
288 c
.destroy_osd(osd_uuid
)
290 def test_destroy_osd_by_dev_path(self
):
292 disk
= c
.unused_disks()[0]
293 osd_uuid
= str(uuid
.uuid1())
294 c
.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid
+ " " + disk
)
295 c
.wait_for_osd_up(osd_uuid
)
296 partition
= c
.get_osd_partition(osd_uuid
)
297 assert partition
['type'] == 'data'
298 assert partition
['state'] == 'active'
299 c
.sh("ceph-disk --verbose deactivate " + partition
['path'])
300 c
.wait_for_osd_down(osd_uuid
)
301 c
.sh("ceph-disk --verbose destroy " + partition
['path'] + " --zap")
303 def test_deactivate_reactivate_dmcrypt_plain(self
):
305 c
.conf
['global']['osd dmcrypt type'] = 'plain'
307 osd_uuid
= self
.activate_dmcrypt('ceph-disk-no-lockbox')
308 data_partition
= c
.get_osd_partition(osd_uuid
)
309 c
.sh("ceph-disk --verbose deactivate " + data_partition
['path'])
310 c
.wait_for_osd_down(osd_uuid
)
311 c
.sh("ceph-disk --verbose activate-journal " + data_partition
['journal_dev'] +
312 " --reactivate" + " --dmcrypt")
313 c
.wait_for_osd_up(osd_uuid
)
314 c
.check_osd_status(osd_uuid
, 'journal')
315 c
.destroy_osd(osd_uuid
)
318 def test_deactivate_reactivate_dmcrypt_luks(self
):
320 osd_uuid
= self
.activate_dmcrypt('ceph-disk')
321 data_partition
= c
.get_osd_partition(osd_uuid
)
322 lockbox_partition
= c
.get_lockbox()
323 c
.sh("ceph-disk --verbose deactivate " + data_partition
['path'])
324 c
.wait_for_osd_down(osd_uuid
)
325 c
.sh("ceph-disk --verbose trigger --sync " + lockbox_partition
['path'])
326 c
.sh("ceph-disk --verbose activate-journal " + data_partition
['journal_dev'] +
327 " --reactivate" + " --dmcrypt")
328 c
.wait_for_osd_up(osd_uuid
)
329 c
.check_osd_status(osd_uuid
, 'journal')
330 c
.destroy_osd(osd_uuid
)
332 def test_activate_dmcrypt_plain_no_lockbox(self
):
334 c
.conf
['global']['osd dmcrypt type'] = 'plain'
336 osd_uuid
= self
.activate_dmcrypt('ceph-disk-no-lockbox')
337 c
.destroy_osd(osd_uuid
)
340 def test_activate_dmcrypt_luks_no_lockbox(self
):
342 osd_uuid
= self
.activate_dmcrypt('ceph-disk-no-lockbox')
343 c
.destroy_osd(osd_uuid
)
345 def test_activate_dmcrypt_luks_with_lockbox(self
):
347 osd_uuid
= self
.activate_dmcrypt('ceph-disk')
348 c
.destroy_osd(osd_uuid
)
350 def test_activate_lockbox(self
):
352 osd_uuid
= self
.activate_dmcrypt('ceph-disk')
353 lockbox
= c
.get_lockbox()
354 assert lockbox
['state'] == 'active'
355 c
.sh("umount " + lockbox
['path'])
356 lockbox
= c
.get_lockbox()
357 assert lockbox
['state'] == 'prepared'
358 c
.sh("ceph-disk --verbose trigger " + lockbox
['path'])
360 for delay
in (1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024):
361 lockbox
= c
.get_lockbox()
362 if lockbox
['state'] == 'active':
367 raise Exception('timeout waiting for lockbox ' + lockbox
['path'])
368 c
.destroy_osd(osd_uuid
)
370 def activate_dmcrypt(self
, ceph_disk
):
372 disk
= c
.unused_disks()[0]
373 osd_uuid
= str(uuid
.uuid1())
374 journal_uuid
= str(uuid
.uuid1())
375 c
.sh("ceph-disk --verbose zap " + disk
)
376 c
.sh(ceph_disk
+ " --verbose prepare --filestore " +
377 " --osd-uuid " + osd_uuid
+
378 " --journal-uuid " + journal_uuid
+
381 c
.wait_for_osd_up(osd_uuid
)
382 c
.check_osd_status(osd_uuid
, 'journal')
385 def test_trigger_dmcrypt_journal_lockbox(self
):
387 osd_uuid
= self
.activate_dmcrypt('ceph-disk')
388 data_partition
= c
.get_osd_partition(osd_uuid
)
389 lockbox_partition
= c
.get_lockbox()
390 c
.deactivate_osd(osd_uuid
)
391 c
.wait_for_osd_down(osd_uuid
)
392 with pytest
.raises(subprocess
.CalledProcessError
):
393 # fails because the lockbox is not mounted yet
394 c
.sh("ceph-disk --verbose trigger --sync " + data_partition
['journal_dev'])
395 c
.sh("ceph-disk --verbose trigger --sync " + lockbox_partition
['path'])
396 c
.wait_for_osd_up(osd_uuid
)
397 c
.destroy_osd(osd_uuid
)
399 def test_trigger_dmcrypt_data_lockbox(self
):
401 osd_uuid
= self
.activate_dmcrypt('ceph-disk')
402 data_partition
= c
.get_osd_partition(osd_uuid
)
403 lockbox_partition
= c
.get_lockbox()
404 c
.deactivate_osd(osd_uuid
)
405 c
.wait_for_osd_down(osd_uuid
)
406 with pytest
.raises(subprocess
.CalledProcessError
):
407 # fails because the lockbox is not mounted yet
408 c
.sh("ceph-disk --verbose trigger --sync " + data_partition
['path'])
409 c
.sh("ceph-disk --verbose trigger --sync " + lockbox_partition
['path'])
410 c
.wait_for_osd_up(osd_uuid
)
411 c
.destroy_osd(osd_uuid
)
413 def test_trigger_dmcrypt_lockbox(self
):
415 osd_uuid
= self
.activate_dmcrypt('ceph-disk')
416 data_partition
= c
.get_osd_partition(osd_uuid
)
417 lockbox_partition
= c
.get_lockbox()
418 c
.deactivate_osd(osd_uuid
)
419 c
.wait_for_osd_down(osd_uuid
)
420 c
.sh("ceph-disk --verbose trigger --sync " + lockbox_partition
['path'])
421 c
.wait_for_osd_up(osd_uuid
)
422 c
.destroy_osd(osd_uuid
)
424 def test_activate_no_journal(self
):
426 disk
= c
.unused_disks()[0]
427 osd_uuid
= str(uuid
.uuid1())
428 c
.sh("ceph-disk --verbose zap " + disk
)
429 c
.conf
['global']['osd objectstore'] = 'memstore'
431 c
.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid
+
433 c
.wait_for_osd_up(osd_uuid
)
434 device
= json
.loads(c
.sh("ceph-disk list --format json " + disk
))[0]
435 assert len(device
['partitions']) == 1
436 partition
= device
['partitions'][0]
437 assert partition
['type'] == 'data'
438 assert partition
['state'] == 'active'
439 assert 'journal_dev' not in partition
440 c
.helper("pool_read_write")
441 c
.destroy_osd(osd_uuid
)
444 def test_activate_with_journal_dev_no_symlink(self
):
446 disk
= c
.unused_disks()[0]
447 osd_uuid
= str(uuid
.uuid1())
448 c
.sh("ceph-disk --verbose zap " + disk
)
449 c
.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid
+
451 c
.wait_for_osd_up(osd_uuid
)
452 device
= json
.loads(c
.sh("ceph-disk list --format json " + disk
))[0]
453 assert len(device
['partitions']) == 2
454 c
.check_osd_status(osd_uuid
, 'journal')
455 c
.helper("pool_read_write")
456 c
.destroy_osd(osd_uuid
)
458 def test_activate_bluestore(self
):
460 disk
= c
.unused_disks()[0]
461 osd_uuid
= str(uuid
.uuid1())
462 c
.sh("ceph-disk --verbose zap " + disk
)
463 c
.conf
['global']['osd objectstore'] = 'bluestore'
465 c
.sh("ceph-disk --verbose prepare --bluestore --osd-uuid " + osd_uuid
+
467 c
.wait_for_osd_up(osd_uuid
)
468 device
= json
.loads(c
.sh("ceph-disk list --format json " + disk
))[0]
469 assert len(device
['partitions']) == 2
470 c
.check_osd_status(osd_uuid
, 'block')
471 c
.helper("pool_read_write")
472 c
.destroy_osd(osd_uuid
)
473 c
.sh("ceph-disk --verbose zap " + disk
)
475 def test_activate_bluestore_seperated_block_db_wal(self
):
477 disk1
= c
.unused_disks()[0]
478 disk2
= c
.unused_disks()[1]
479 osd_uuid
= str(uuid
.uuid1())
480 c
.sh("ceph-disk --verbose zap " + disk1
+ " " + disk2
)
481 c
.conf
['global']['osd objectstore'] = 'bluestore'
483 c
.sh("ceph-disk --verbose prepare --bluestore --osd-uuid " + osd_uuid
+
484 " " + disk1
+ " --block.db " + disk2
+ " --block.wal " + disk2
)
485 c
.wait_for_osd_up(osd_uuid
)
486 device
= json
.loads(c
.sh("ceph-disk list --format json " + disk1
))[0]
487 assert len(device
['partitions']) == 2
488 device
= json
.loads(c
.sh("ceph-disk list --format json " + disk2
))[0]
489 assert len(device
['partitions']) == 2
490 c
.check_osd_status(osd_uuid
, 'block')
491 c
.check_osd_status(osd_uuid
, 'block.wal')
492 c
.check_osd_status(osd_uuid
, 'block.db')
493 c
.helper("pool_read_write")
494 c
.destroy_osd(osd_uuid
)
495 c
.sh("ceph-disk --verbose zap " + disk1
+ " " + disk2
)
497 def test_activate_bluestore_reuse_db_wal_partition(self
):
499 disks
= c
.unused_disks()
500 block_disk
= disks
[0]
501 db_wal_disk
= disks
[1]
503 # Create an OSD with two disks (one for block,
504 # the other for block.db and block.wal ) and then destroy osd.
506 osd_uuid1
= str(uuid
.uuid1())
507 c
.sh("ceph-disk --verbose zap " + block_disk
+ " " + db_wal_disk
)
508 c
.conf
['global']['osd objectstore'] = 'bluestore'
510 c
.sh("ceph-disk --verbose prepare --bluestore --osd-uuid " +
511 osd_uuid1
+ " " + block_disk
+ " --block.db " + db_wal_disk
+
512 " --block.wal " + db_wal_disk
)
513 c
.wait_for_osd_up(osd_uuid1
)
514 blockdb_partition
= c
.get_blockdb_partition(osd_uuid1
)
515 blockdb_path
= blockdb_partition
['path']
516 blockwal_partition
= c
.get_blockwal_partition(osd_uuid1
)
517 blockwal_path
= blockwal_partition
['path']
518 c
.destroy_osd(osd_uuid1
)
519 c
.sh("ceph-disk --verbose zap " + block_disk
)
521 # Create another OSD with the block.db and block.wal partition
522 # of the previous OSD
524 osd_uuid2
= str(uuid
.uuid1())
525 c
.sh("ceph-disk --verbose prepare --bluestore --osd-uuid " +
526 osd_uuid2
+ " " + block_disk
+ " --block.db " + blockdb_path
+
527 " --block.wal " + blockwal_path
)
528 c
.wait_for_osd_up(osd_uuid2
)
529 device
= json
.loads(c
.sh("ceph-disk list --format json " + block_disk
))[0]
530 assert len(device
['partitions']) == 2
531 device
= json
.loads(c
.sh("ceph-disk list --format json " + db_wal_disk
))[0]
532 assert len(device
['partitions']) == 2
533 c
.check_osd_status(osd_uuid2
, 'block')
534 c
.check_osd_status(osd_uuid2
, 'block.wal')
535 c
.check_osd_status(osd_uuid2
, 'block.db')
536 blockdb_partition
= c
.get_blockdb_partition(osd_uuid2
)
537 blockwal_partition
= c
.get_blockwal_partition(osd_uuid2
)
539 # Verify the previous OSD partition has been reused
541 assert blockdb_partition
['path'] == blockdb_path
542 assert blockwal_partition
['path'] == blockwal_path
543 c
.destroy_osd(osd_uuid2
)
544 c
.sh("ceph-disk --verbose zap " + block_disk
+ " " + db_wal_disk
)
546 def test_activate_with_journal_dev_is_symlink(self
):
548 disk
= c
.unused_disks()[0]
549 osd_uuid
= str(uuid
.uuid1())
550 tempdir
= tempfile
.mkdtemp()
551 symlink
= os
.path
.join(tempdir
, 'osd')
552 os
.symlink(disk
, symlink
)
553 c
.sh("ceph-disk --verbose zap " + symlink
)
554 c
.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid
+
556 c
.wait_for_osd_up(osd_uuid
)
557 device
= json
.loads(c
.sh("ceph-disk list --format json " + symlink
))[0]
558 assert len(device
['partitions']) == 2
559 data_partition
= c
.get_osd_partition(osd_uuid
)
560 assert data_partition
['type'] == 'data'
561 assert data_partition
['state'] == 'active'
562 journal_partition
= c
.get_journal_partition(osd_uuid
)
563 assert journal_partition
564 c
.helper("pool_read_write")
565 c
.destroy_osd(osd_uuid
)
566 c
.sh("ceph-disk --verbose zap " + symlink
)
570 def test_activate_journal_file(self
):
572 disks
= c
.unused_disks()
575 # /var/lib/ceph/osd is required otherwise it may violate
576 # restrictions enforced by systemd regarding the directories
577 # which ceph-osd is allowed to read/write
579 tempdir
= tempfile
.mkdtemp(dir='/var/lib/ceph/osd')
580 c
.sh("chown ceph:ceph " + tempdir
+ " || true")
581 journal_file
= os
.path
.join(tempdir
, 'journal')
582 osd_uuid
= str(uuid
.uuid1())
583 c
.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid
+
584 " " + data_disk
+ " " + journal_file
)
585 c
.wait_for_osd_up(osd_uuid
)
587 c
.sh("ceph-disk list --format json " + data_disk
))[0]
588 assert len(device
['partitions']) == 1
589 partition
= device
['partitions'][0]
590 assert journal_file
== os
.readlink(
591 os
.path
.join(partition
['mount'], 'journal'))
592 c
.check_osd_status(osd_uuid
)
593 c
.helper("pool_read_write 1") # 1 == pool size
594 c
.destroy_osd(osd_uuid
)
595 c
.sh("ceph-disk --verbose zap " + data_disk
)
596 os
.unlink(journal_file
)
599 def test_activate_separated_journal(self
):
601 disks
= c
.unused_disks()
603 journal_disk
= disks
[1]
604 osd_uuid
= self
.activate_separated_journal(data_disk
, journal_disk
)
605 c
.helper("pool_read_write 1") # 1 == pool size
606 c
.destroy_osd(osd_uuid
)
607 c
.sh("ceph-disk --verbose zap " + data_disk
+ " " + journal_disk
)
609 def test_activate_separated_journal_dev_is_symlink(self
):
611 disks
= c
.unused_disks()
613 journal_disk
= disks
[1]
614 tempdir
= tempfile
.mkdtemp()
615 data_symlink
= os
.path
.join(tempdir
, 'osd')
616 os
.symlink(data_disk
, data_symlink
)
617 journal_symlink
= os
.path
.join(tempdir
, 'journal')
618 os
.symlink(journal_disk
, journal_symlink
)
619 osd_uuid
= self
.activate_separated_journal(
620 data_symlink
, journal_symlink
)
621 c
.helper("pool_read_write 1") # 1 == pool size
622 c
.destroy_osd(osd_uuid
)
623 c
.sh("ceph-disk --verbose zap " + data_symlink
+ " " + journal_symlink
)
624 os
.unlink(data_symlink
)
625 os
.unlink(journal_symlink
)
628 def activate_separated_journal(self
, data_disk
, journal_disk
):
630 osd_uuid
= str(uuid
.uuid1())
631 c
.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid
+
632 " " + data_disk
+ " " + journal_disk
)
633 c
.wait_for_osd_up(osd_uuid
)
635 c
.sh("ceph-disk list --format json " + data_disk
))[0]
636 assert len(device
['partitions']) == 1
637 c
.check_osd_status(osd_uuid
, 'journal')
641 # Create an OSD and get a journal partition from a disk that
642 # already contains a journal partition which is in use. Updates of
643 # the kernel partition table may behave differently when a
644 # partition is in use. See http://tracker.ceph.com/issues/7334 for
647 def test_activate_two_separated_journal(self
):
649 disks
= c
.unused_disks()
651 other_data_disk
= disks
[1]
652 journal_disk
= disks
[2]
653 osd_uuid
= self
.activate_separated_journal(data_disk
, journal_disk
)
654 other_osd_uuid
= self
.activate_separated_journal(
655 other_data_disk
, journal_disk
)
657 # read/write can only succeed if the two osds are up because
658 # the pool needs two OSD
660 c
.helper("pool_read_write 2") # 2 == pool size
661 c
.destroy_osd(osd_uuid
)
662 c
.destroy_osd(other_osd_uuid
)
663 c
.sh("ceph-disk --verbose zap " + data_disk
+ " " +
664 journal_disk
+ " " + other_data_disk
)
667 # Create an OSD and reuse an existing journal partition
669 def test_activate_reuse_journal(self
):
671 disks
= c
.unused_disks()
673 journal_disk
= disks
[1]
675 # Create an OSD with a separated journal and destroy it.
677 osd_uuid
= self
.activate_separated_journal(data_disk
, journal_disk
)
678 journal_partition
= c
.get_journal_partition(osd_uuid
)
679 journal_path
= journal_partition
['path']
680 c
.destroy_osd(osd_uuid
)
681 c
.sh("ceph-disk --verbose zap " + data_disk
)
682 osd_uuid
= str(uuid
.uuid1())
684 # Create another OSD with the journal partition of the previous OSD
686 c
.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid
+
687 " " + data_disk
+ " " + journal_path
)
688 c
.helper("pool_read_write 1") # 1 == pool size
689 c
.wait_for_osd_up(osd_uuid
)
691 c
.sh("ceph-disk list --format json " + data_disk
))[0]
692 assert len(device
['partitions']) == 1
693 c
.check_osd_status(osd_uuid
)
694 journal_partition
= c
.get_journal_partition(osd_uuid
)
696 # Verify the previous OSD partition has been reused
698 assert journal_partition
['path'] == journal_path
699 c
.destroy_osd(osd_uuid
)
700 c
.sh("ceph-disk --verbose zap " + data_disk
+ " " + journal_disk
)
702 def test_activate_multipath(self
):
704 if c
.sh("lsb_release -si").strip() != 'CentOS':
706 "see issue https://bugs.launchpad.net/ubuntu/+source/multipath-tools/+bug/1488688")
709 # Figure out the name of the multipath device
711 disk
= c
.unused_disks('sd.')[0]
712 c
.sh("mpathconf --enable || true")
713 c
.sh("multipath " + disk
)
714 holders
= os
.listdir(
715 "/sys/block/" + os
.path
.basename(disk
) + "/holders")
716 assert 1 == len(holders
)
717 name
= open("/sys/block/" + holders
[0] + "/dm/name").read()
718 multipath
= "/dev/mapper/" + name
720 # Prepare the multipath device
722 osd_uuid
= str(uuid
.uuid1())
723 c
.sh("ceph-disk --verbose zap " + multipath
)
724 c
.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid
+
726 c
.wait_for_osd_up(osd_uuid
)
728 c
.sh("ceph-disk list --format json " + multipath
))[0]
729 assert len(device
['partitions']) == 2
730 data_partition
= c
.get_osd_partition(osd_uuid
)
731 assert data_partition
['type'] == 'data'
732 assert data_partition
['state'] == 'active'
733 journal_partition
= c
.get_journal_partition(osd_uuid
)
734 assert journal_partition
735 c
.helper("pool_read_write")
736 c
.destroy_osd(osd_uuid
)
737 c
.sh("udevadm settle")
739 c
.unload_scsi_debug()
742 class CephDiskTest(CephDisk
):
744 def main(self
, argv
):
745 parser
= argparse
.ArgumentParser(
750 action
='store_true', default
=None,
751 help='be more verbose',
755 help='stop, umount and destroy',
757 args
= parser
.parse_args(argv
)
760 logging
.basicConfig(level
=logging
.DEBUG
)
763 dump
= json
.loads(CephDisk
.sh("ceph osd dump -f json"))
765 for osd
in dump
['osds']:
766 if str(osd
['osd']) == args
.destroy_osd
:
767 osd_uuid
= osd
['uuid']
769 self
.destroy_osd(osd_uuid
)
771 raise Exception("cannot find OSD " + args
.destroy_osd
+
772 " ceph osd dump -f json")
775 if __name__
== '__main__':
776 sys
.exit(CephDiskTest().main(sys
.argv
[1:]))