]>
git.proxmox.com Git - ceph.git/blob - ceph/qa/workunits/ceph-disk/ceph-disk-test.py
6c476ead9e6ec0bf9459a7dc828acc9b47b5ff3b
2 # Copyright (C) 2015, 2016 Red Hat <contact@redhat.com>
4 # Author: Loic Dachary <loic@dachary.org>
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU Library Public License as published by
8 # the Free Software Foundation; either version 2, or (at your option)
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU Library Public License for more details.
16 # When debugging these tests (must be root), here are a few useful commands:
18 # export PATH=.:..:$PATH
19 # ceph-disk.sh # run once to prepare the environment as it would be by teuthology
20 # ln -sf /home/ubuntu/ceph/src/ceph-disk/ceph_disk/main.py $(which ceph-disk)
21 # ln -sf /home/ubuntu/ceph/udev/95-ceph-osd.rules /lib/udev/rules.d/95-ceph-osd.rules
22 # ln -sf /home/ubuntu/ceph/systemd/ceph-disk@.service /usr/lib/systemd/system/ceph-disk@.service
23 # ceph-disk.conf will be silently ignored if it is a symbolic link or a hard link /var/log/upstart for logs
24 # cp /home/ubuntu/ceph/src/upstart/ceph-disk.conf /etc/init/ceph-disk.conf
25 # id=3 ; ceph-disk deactivate --deactivate-by-id $id ; ceph-disk destroy --zap --destroy-by-id $id
26 # py.test -s -v -k test_activate_dmcrypt_luks ceph-disk-test.py
29 # udevadm monitor --property & tail -f /var/log/messages
30 # udev rules messages are logged in /var/log/messages
31 # systemctl stop ceph-osd@2
32 # systemctl start ceph-osd@2
34 # udevadm monitor --property & tail -f /var/log/syslog /var/log/upstart/* # on Ubuntu 14.04
35 # udevadm test --action=add /block/vdb/vdb1 # verify the udev rule is run as expected
36 # udevadm control --reload # when changing the udev rules
37 # sudo /usr/sbin/ceph-disk -v trigger /dev/vdb1 # activates if vdb1 is data
39 # integration tests coverage
40 # pip install coverage
41 # perl -pi -e 's|"ceph-disk |"coverage run --source=/usr/sbin/ceph-disk --append /usr/sbin/ceph-disk |' ceph-disk-test.py
42 # rm -f .coverage ; py.test -s -v ceph-disk-test.py
43 # coverage report --show-missing
58 LOG
= logging
.getLogger('CephDisk')
64 self
.conf
= configobj
.ConfigObj('/etc/ceph/ceph.conf')
67 self
.conf
.write(open('/etc/ceph/ceph.conf', 'wb'))
71 command
= "ceph-helpers-root.sh " + command
72 return CephDisk
.sh(command
)
76 LOG
.debug(":sh: " + command
)
77 proc
= subprocess
.Popen(
79 stdout
=subprocess
.PIPE
,
80 stderr
=subprocess
.STDOUT
,
83 output
, _
= proc
.communicate()
85 LOG
.warning(output
.decode('utf-8'))
86 raise subprocess
.CalledProcessError(
87 returncode
=proc
.returncode
,
92 for line
in output
.decode('utf-8').split('\n'):
93 if 'dangerous and experimental' in line
:
94 LOG
.debug('SKIP dangerous and experimental')
97 LOG
.debug(line
.strip().encode('ascii', 'ignore'))
100 def unused_disks(self
, pattern
='[vs]d.'):
101 names
= [x
for x
in os
.listdir("/sys/block") if re
.match(pattern
, x
)]
105 self
.sh("ceph-disk list --format json " + " ".join(names
)))
108 if 'partitions' not in disk
:
109 unused
.append(disk
['path'])
113 LOG
.debug(self
.unused_disks('sd.'))
114 if self
.unused_disks('sd.'):
116 modprobe
= "modprobe scsi_debug vpd_use_hostno=0 add_host=1 dev_size_mb=200 ; udevadm settle"
120 self
.helper("install linux-image-extra-3.13.0-61-generic")
123 def unload_scsi_debug(self
):
124 self
.sh("rmmod scsi_debug || true")
126 def get_lockbox(self
):
127 disks
= json
.loads(self
.sh("ceph-disk list --format json"))
129 if 'partitions' in disk
:
130 for partition
in disk
['partitions']:
131 if partition
.get('type') == 'lockbox':
133 raise Exception("no lockbox found " + str(disks
))
135 def get_osd_partition(self
, uuid
):
136 disks
= json
.loads(self
.sh("ceph-disk list --format json"))
138 if 'partitions' in disk
:
139 for partition
in disk
['partitions']:
140 if partition
.get('uuid') == uuid
:
142 raise Exception("uuid = " + uuid
+ " not found in " + str(disks
))
144 def get_journal_partition(self
, uuid
):
145 return self
.get_space_partition('journal', uuid
)
147 def get_block_partition(self
, uuid
):
148 return self
.get_space_partition('block', uuid
)
150 def get_blockdb_partition(self
, uuid
):
151 return self
.get_space_partition('block.db', uuid
)
153 def get_blockwal_partition(self
, uuid
):
154 return self
.get_space_partition('block.wal', uuid
)
156 def get_space_partition(self
, name
, uuid
):
157 data_partition
= self
.get_osd_partition(uuid
)
158 space_dev
= data_partition
[name
+ '_dev']
159 disks
= json
.loads(self
.sh("ceph-disk list --format json"))
161 if 'partitions' in disk
:
162 for partition
in disk
['partitions']:
163 if partition
['path'] == space_dev
:
164 if name
+ '_for' in partition
:
166 name
+ '_for'] == data_partition
['path']
169 name
+ " for uuid = " + uuid
+ " not found in " + str(disks
))
171 def destroy_osd(self
, uuid
):
172 id = self
.sh("ceph osd create " + uuid
).strip()
175 ceph-disk --verbose deactivate --deactivate-by-id {id}
176 ceph-disk --verbose destroy --destroy-by-id {id} --zap
179 def deactivate_osd(self
, uuid
):
180 id = self
.sh("ceph osd create " + uuid
).strip()
183 ceph-disk --verbose deactivate --once --deactivate-by-id {id}
187 def osd_up_predicate(osds
, uuid
):
189 if osd
['uuid'] == uuid
and 'up' in osd
['state']:
194 def wait_for_osd_up(uuid
):
195 CephDisk
.wait_for_osd(uuid
, CephDisk
.osd_up_predicate
, 'up')
198 def osd_down_predicate(osds
, uuid
):
201 if osd
['uuid'] == uuid
:
203 if 'down' in osd
['state'] or ['exists'] == osd
['state']:
208 def wait_for_osd_down(uuid
):
209 CephDisk
.wait_for_osd(uuid
, CephDisk
.osd_down_predicate
, 'down')
212 def wait_for_osd(uuid
, predicate
, info
):
213 LOG
.info("wait_for_osd " + info
+ " " + uuid
)
214 for delay
in (1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024):
215 dump
= json
.loads(CephDisk
.sh("ceph osd dump -f json"))
216 if predicate(dump
['osds'], uuid
):
219 raise Exception('timeout waiting for osd ' + uuid
+ ' to be ' + info
)
221 def check_osd_status(self
, uuid
, space_name
=None):
222 data_partition
= self
.get_osd_partition(uuid
)
223 assert data_partition
['type'] == 'data'
224 assert data_partition
['state'] == 'active'
225 if space_name
is not None:
226 space_partition
= self
.get_space_partition(space_name
, uuid
)
227 assert space_partition
230 class TestCephDisk(object):
232 def setup_class(self
):
233 logging
.basicConfig(level
=logging
.DEBUG
)
235 if c
.sh("lsb_release -si").strip() == 'CentOS':
236 c
.helper("install multipath-tools device-mapper-multipath")
237 c
.conf
['global']['pid file'] = '/var/run/ceph/$cluster-$name.pid'
239 # Avoid json parsing interference
241 c
.conf
['global']['debug monc'] = 0
245 c
.conf
['global']['osd journal size'] = 100
249 c
.conf
['global']['bluestore fsck on mount'] = 'true'
254 for key
in ('osd objectstore', 'osd dmcrypt type'):
255 if key
in c
.conf
['global']:
256 del c
.conf
['global'][key
]
259 def test_deactivate_reactivate_osd(self
):
261 disk
= c
.unused_disks()[0]
262 osd_uuid
= str(uuid
.uuid1())
263 c
.sh("ceph-disk --verbose zap " + disk
)
264 c
.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid
+
266 c
.wait_for_osd_up(osd_uuid
)
267 device
= json
.loads(c
.sh("ceph-disk list --format json " + disk
))[0]
268 assert len(device
['partitions']) == 2
269 c
.check_osd_status(osd_uuid
, 'journal')
270 data_partition
= c
.get_osd_partition(osd_uuid
)
271 c
.sh("ceph-disk --verbose deactivate " + data_partition
['path'])
272 c
.wait_for_osd_down(osd_uuid
)
273 c
.sh("ceph-disk --verbose activate " + data_partition
['path'] + " --reactivate")
275 c
.wait_for_osd_up(osd_uuid
)
276 device
= json
.loads(c
.sh("ceph-disk list --format json " + disk
))[0]
277 assert len(device
['partitions']) == 2
278 c
.check_osd_status(osd_uuid
, 'journal')
279 c
.helper("pool_read_write")
280 c
.destroy_osd(osd_uuid
)
282 def test_destroy_osd_by_id(self
):
284 disk
= c
.unused_disks()[0]
285 osd_uuid
= str(uuid
.uuid1())
286 c
.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid
+ " " + disk
)
287 c
.wait_for_osd_up(osd_uuid
)
288 c
.check_osd_status(osd_uuid
)
289 c
.destroy_osd(osd_uuid
)
291 def test_destroy_osd_by_dev_path(self
):
293 disk
= c
.unused_disks()[0]
294 osd_uuid
= str(uuid
.uuid1())
295 c
.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid
+ " " + disk
)
296 c
.wait_for_osd_up(osd_uuid
)
297 partition
= c
.get_osd_partition(osd_uuid
)
298 assert partition
['type'] == 'data'
299 assert partition
['state'] == 'active'
300 c
.sh("ceph-disk --verbose deactivate " + partition
['path'])
301 c
.wait_for_osd_down(osd_uuid
)
302 c
.sh("ceph-disk --verbose destroy " + partition
['path'] + " --zap")
304 def test_deactivate_reactivate_dmcrypt_plain(self
):
306 c
.conf
['global']['osd dmcrypt type'] = 'plain'
308 osd_uuid
= self
.activate_dmcrypt('ceph-disk-no-lockbox')
309 data_partition
= c
.get_osd_partition(osd_uuid
)
310 c
.sh("ceph-disk --verbose deactivate " + data_partition
['path'])
311 c
.wait_for_osd_down(osd_uuid
)
312 c
.sh("ceph-disk --verbose activate-journal " + data_partition
['journal_dev'] +
313 " --reactivate" + " --dmcrypt")
314 c
.wait_for_osd_up(osd_uuid
)
315 c
.check_osd_status(osd_uuid
, 'journal')
316 c
.destroy_osd(osd_uuid
)
319 def test_deactivate_reactivate_dmcrypt_luks(self
):
321 osd_uuid
= self
.activate_dmcrypt('ceph-disk')
322 data_partition
= c
.get_osd_partition(osd_uuid
)
323 lockbox_partition
= c
.get_lockbox()
324 c
.sh("ceph-disk --verbose deactivate " + data_partition
['path'])
325 c
.wait_for_osd_down(osd_uuid
)
326 c
.sh("ceph-disk --verbose trigger --sync " + lockbox_partition
['path'])
327 c
.sh("ceph-disk --verbose activate-journal " + data_partition
['journal_dev'] +
328 " --reactivate" + " --dmcrypt")
329 c
.wait_for_osd_up(osd_uuid
)
330 c
.check_osd_status(osd_uuid
, 'journal')
331 c
.destroy_osd(osd_uuid
)
333 def test_activate_dmcrypt_plain_no_lockbox(self
):
335 c
.conf
['global']['osd dmcrypt type'] = 'plain'
337 osd_uuid
= self
.activate_dmcrypt('ceph-disk-no-lockbox')
338 c
.destroy_osd(osd_uuid
)
341 def test_activate_dmcrypt_luks_no_lockbox(self
):
343 osd_uuid
= self
.activate_dmcrypt('ceph-disk-no-lockbox')
344 c
.destroy_osd(osd_uuid
)
346 def test_activate_dmcrypt_luks_with_lockbox(self
):
348 osd_uuid
= self
.activate_dmcrypt('ceph-disk')
349 c
.destroy_osd(osd_uuid
)
351 def test_activate_lockbox(self
):
353 osd_uuid
= self
.activate_dmcrypt('ceph-disk')
354 lockbox
= c
.get_lockbox()
355 assert lockbox
['state'] == 'active'
356 c
.sh("umount " + lockbox
['path'])
357 lockbox
= c
.get_lockbox()
358 assert lockbox
['state'] == 'prepared'
359 c
.sh("ceph-disk --verbose trigger " + lockbox
['path'])
361 for delay
in (1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024):
362 lockbox
= c
.get_lockbox()
363 if lockbox
['state'] == 'active':
368 raise Exception('timeout waiting for lockbox ' + lockbox
['path'])
369 c
.destroy_osd(osd_uuid
)
371 def activate_dmcrypt(self
, ceph_disk
):
373 disk
= c
.unused_disks()[0]
374 osd_uuid
= str(uuid
.uuid1())
375 journal_uuid
= str(uuid
.uuid1())
376 c
.sh("ceph-disk --verbose zap " + disk
)
377 c
.sh(ceph_disk
+ " --verbose prepare --filestore " +
378 " --osd-uuid " + osd_uuid
+
379 " --journal-uuid " + journal_uuid
+
382 c
.wait_for_osd_up(osd_uuid
)
383 c
.check_osd_status(osd_uuid
, 'journal')
386 def test_trigger_dmcrypt_journal_lockbox(self
):
388 osd_uuid
= self
.activate_dmcrypt('ceph-disk')
389 data_partition
= c
.get_osd_partition(osd_uuid
)
390 lockbox_partition
= c
.get_lockbox()
391 c
.deactivate_osd(osd_uuid
)
392 c
.wait_for_osd_down(osd_uuid
)
393 with pytest
.raises(subprocess
.CalledProcessError
):
394 # fails because the lockbox is not mounted yet
395 c
.sh("ceph-disk --verbose trigger --sync " + data_partition
['journal_dev'])
396 c
.sh("ceph-disk --verbose trigger --sync " + lockbox_partition
['path'])
397 c
.wait_for_osd_up(osd_uuid
)
398 c
.destroy_osd(osd_uuid
)
400 def test_trigger_dmcrypt_data_lockbox(self
):
402 osd_uuid
= self
.activate_dmcrypt('ceph-disk')
403 data_partition
= c
.get_osd_partition(osd_uuid
)
404 lockbox_partition
= c
.get_lockbox()
405 c
.deactivate_osd(osd_uuid
)
406 c
.wait_for_osd_down(osd_uuid
)
407 with pytest
.raises(subprocess
.CalledProcessError
):
408 # fails because the lockbox is not mounted yet
409 c
.sh("ceph-disk --verbose trigger --sync " + data_partition
['path'])
410 c
.sh("ceph-disk --verbose trigger --sync " + lockbox_partition
['path'])
411 c
.wait_for_osd_up(osd_uuid
)
412 c
.destroy_osd(osd_uuid
)
414 def test_trigger_dmcrypt_lockbox(self
):
416 osd_uuid
= self
.activate_dmcrypt('ceph-disk')
417 data_partition
= c
.get_osd_partition(osd_uuid
)
418 lockbox_partition
= c
.get_lockbox()
419 c
.deactivate_osd(osd_uuid
)
420 c
.wait_for_osd_down(osd_uuid
)
421 c
.sh("ceph-disk --verbose trigger --sync " + lockbox_partition
['path'])
422 c
.wait_for_osd_up(osd_uuid
)
423 c
.destroy_osd(osd_uuid
)
425 def test_activate_no_journal(self
):
427 disk
= c
.unused_disks()[0]
428 osd_uuid
= str(uuid
.uuid1())
429 c
.sh("ceph-disk --verbose zap " + disk
)
430 c
.conf
['global']['osd objectstore'] = 'memstore'
432 c
.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid
+
434 c
.wait_for_osd_up(osd_uuid
)
435 device
= json
.loads(c
.sh("ceph-disk list --format json " + disk
))[0]
436 assert len(device
['partitions']) == 1
437 partition
= device
['partitions'][0]
438 assert partition
['type'] == 'data'
439 assert partition
['state'] == 'active'
440 assert 'journal_dev' not in partition
441 c
.helper("pool_read_write")
442 c
.destroy_osd(osd_uuid
)
445 def test_activate_with_journal_dev_no_symlink(self
):
447 disk
= c
.unused_disks()[0]
448 osd_uuid
= str(uuid
.uuid1())
449 c
.sh("ceph-disk --verbose zap " + disk
)
450 c
.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid
+
452 c
.wait_for_osd_up(osd_uuid
)
453 device
= json
.loads(c
.sh("ceph-disk list --format json " + disk
))[0]
454 assert len(device
['partitions']) == 2
455 c
.check_osd_status(osd_uuid
, 'journal')
456 c
.helper("pool_read_write")
457 c
.destroy_osd(osd_uuid
)
459 def test_activate_bluestore(self
):
461 disk
= c
.unused_disks()[0]
462 osd_uuid
= str(uuid
.uuid1())
463 c
.sh("ceph-disk --verbose zap " + disk
)
464 c
.conf
['global']['osd objectstore'] = 'bluestore'
466 c
.sh("ceph-disk --verbose prepare --bluestore --osd-uuid " + osd_uuid
+
468 c
.wait_for_osd_up(osd_uuid
)
469 device
= json
.loads(c
.sh("ceph-disk list --format json " + disk
))[0]
470 assert len(device
['partitions']) == 2
471 c
.check_osd_status(osd_uuid
, 'block')
472 c
.helper("pool_read_write")
473 c
.destroy_osd(osd_uuid
)
474 c
.sh("ceph-disk --verbose zap " + disk
)
476 def test_activate_bluestore_seperated_block_db_wal(self
):
478 disk1
= c
.unused_disks()[0]
479 disk2
= c
.unused_disks()[1]
480 osd_uuid
= str(uuid
.uuid1())
481 c
.sh("ceph-disk --verbose zap " + disk1
+ " " + disk2
)
482 c
.conf
['global']['osd objectstore'] = 'bluestore'
484 c
.sh("ceph-disk --verbose prepare --bluestore --osd-uuid " + osd_uuid
+
485 " " + disk1
+ " --block.db " + disk2
+ " --block.wal " + disk2
)
486 c
.wait_for_osd_up(osd_uuid
)
487 device
= json
.loads(c
.sh("ceph-disk list --format json " + disk1
))[0]
488 assert len(device
['partitions']) == 2
489 device
= json
.loads(c
.sh("ceph-disk list --format json " + disk2
))[0]
490 assert len(device
['partitions']) == 2
491 c
.check_osd_status(osd_uuid
, 'block')
492 c
.check_osd_status(osd_uuid
, 'block.wal')
493 c
.check_osd_status(osd_uuid
, 'block.db')
494 c
.helper("pool_read_write")
495 c
.destroy_osd(osd_uuid
)
496 c
.sh("ceph-disk --verbose zap " + disk1
+ " " + disk2
)
498 def test_activate_bluestore_reuse_db_wal_partition(self
):
500 disks
= c
.unused_disks()
501 block_disk
= disks
[0]
502 db_wal_disk
= disks
[1]
504 # Create an OSD with two disks (one for block,
505 # the other for block.db and block.wal ) and then destroy osd.
507 osd_uuid1
= str(uuid
.uuid1())
508 c
.sh("ceph-disk --verbose zap " + block_disk
+ " " + db_wal_disk
)
509 c
.conf
['global']['osd objectstore'] = 'bluestore'
511 c
.sh("ceph-disk --verbose prepare --bluestore --osd-uuid " +
512 osd_uuid1
+ " " + block_disk
+ " --block.db " + db_wal_disk
+
513 " --block.wal " + db_wal_disk
)
514 c
.wait_for_osd_up(osd_uuid1
)
515 blockdb_partition
= c
.get_blockdb_partition(osd_uuid1
)
516 blockdb_path
= blockdb_partition
['path']
517 blockwal_partition
= c
.get_blockwal_partition(osd_uuid1
)
518 blockwal_path
= blockwal_partition
['path']
519 c
.destroy_osd(osd_uuid1
)
520 c
.sh("ceph-disk --verbose zap " + block_disk
)
522 # Create another OSD with the block.db and block.wal partition
523 # of the previous OSD
525 osd_uuid2
= str(uuid
.uuid1())
526 c
.sh("ceph-disk --verbose prepare --bluestore --osd-uuid " +
527 osd_uuid2
+ " " + block_disk
+ " --block.db " + blockdb_path
+
528 " --block.wal " + blockwal_path
)
529 c
.wait_for_osd_up(osd_uuid2
)
530 device
= json
.loads(c
.sh("ceph-disk list --format json " + block_disk
))[0]
531 assert len(device
['partitions']) == 2
532 device
= json
.loads(c
.sh("ceph-disk list --format json " + db_wal_disk
))[0]
533 assert len(device
['partitions']) == 2
534 c
.check_osd_status(osd_uuid2
, 'block')
535 c
.check_osd_status(osd_uuid2
, 'block.wal')
536 c
.check_osd_status(osd_uuid2
, 'block.db')
537 blockdb_partition
= c
.get_blockdb_partition(osd_uuid2
)
538 blockwal_partition
= c
.get_blockwal_partition(osd_uuid2
)
540 # Verify the previous OSD partition has been reused
542 assert blockdb_partition
['path'] == blockdb_path
543 assert blockwal_partition
['path'] == blockwal_path
544 c
.destroy_osd(osd_uuid2
)
545 c
.sh("ceph-disk --verbose zap " + block_disk
+ " " + db_wal_disk
)
547 def test_activate_with_journal_dev_is_symlink(self
):
549 disk
= c
.unused_disks()[0]
550 osd_uuid
= str(uuid
.uuid1())
551 tempdir
= tempfile
.mkdtemp()
552 symlink
= os
.path
.join(tempdir
, 'osd')
553 os
.symlink(disk
, symlink
)
554 c
.sh("ceph-disk --verbose zap " + symlink
)
555 c
.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid
+
557 c
.wait_for_osd_up(osd_uuid
)
558 device
= json
.loads(c
.sh("ceph-disk list --format json " + symlink
))[0]
559 assert len(device
['partitions']) == 2
560 data_partition
= c
.get_osd_partition(osd_uuid
)
561 assert data_partition
['type'] == 'data'
562 assert data_partition
['state'] == 'active'
563 journal_partition
= c
.get_journal_partition(osd_uuid
)
564 assert journal_partition
565 c
.helper("pool_read_write")
566 c
.destroy_osd(osd_uuid
)
567 c
.sh("ceph-disk --verbose zap " + symlink
)
571 def test_activate_journal_file(self
):
573 disks
= c
.unused_disks()
576 # /var/lib/ceph/osd is required otherwise it may violate
577 # restrictions enforced by systemd regarding the directories
578 # which ceph-osd is allowed to read/write
580 tempdir
= tempfile
.mkdtemp(dir='/var/lib/ceph/osd')
581 c
.sh("chown ceph:ceph " + tempdir
+ " || true")
582 journal_file
= os
.path
.join(tempdir
, 'journal')
583 osd_uuid
= str(uuid
.uuid1())
584 c
.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid
+
585 " " + data_disk
+ " " + journal_file
)
586 c
.wait_for_osd_up(osd_uuid
)
588 c
.sh("ceph-disk list --format json " + data_disk
))[0]
589 assert len(device
['partitions']) == 1
590 partition
= device
['partitions'][0]
591 assert journal_file
== os
.readlink(
592 os
.path
.join(partition
['mount'], 'journal'))
593 c
.check_osd_status(osd_uuid
)
594 c
.helper("pool_read_write 1") # 1 == pool size
595 c
.destroy_osd(osd_uuid
)
596 c
.sh("ceph-disk --verbose zap " + data_disk
)
597 os
.unlink(journal_file
)
600 def test_activate_separated_journal(self
):
602 disks
= c
.unused_disks()
604 journal_disk
= disks
[1]
605 osd_uuid
= self
.activate_separated_journal(data_disk
, journal_disk
)
606 c
.helper("pool_read_write 1") # 1 == pool size
607 c
.destroy_osd(osd_uuid
)
608 c
.sh("ceph-disk --verbose zap " + data_disk
+ " " + journal_disk
)
610 def test_activate_separated_journal_dev_is_symlink(self
):
612 disks
= c
.unused_disks()
614 journal_disk
= disks
[1]
615 tempdir
= tempfile
.mkdtemp()
616 data_symlink
= os
.path
.join(tempdir
, 'osd')
617 os
.symlink(data_disk
, data_symlink
)
618 journal_symlink
= os
.path
.join(tempdir
, 'journal')
619 os
.symlink(journal_disk
, journal_symlink
)
620 osd_uuid
= self
.activate_separated_journal(
621 data_symlink
, journal_symlink
)
622 c
.helper("pool_read_write 1") # 1 == pool size
623 c
.destroy_osd(osd_uuid
)
624 c
.sh("ceph-disk --verbose zap " + data_symlink
+ " " + journal_symlink
)
625 os
.unlink(data_symlink
)
626 os
.unlink(journal_symlink
)
629 def activate_separated_journal(self
, data_disk
, journal_disk
):
631 osd_uuid
= str(uuid
.uuid1())
632 c
.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid
+
633 " " + data_disk
+ " " + journal_disk
)
634 c
.wait_for_osd_up(osd_uuid
)
636 c
.sh("ceph-disk list --format json " + data_disk
))[0]
637 assert len(device
['partitions']) == 1
638 c
.check_osd_status(osd_uuid
, 'journal')
642 # Create an OSD and get a journal partition from a disk that
643 # already contains a journal partition which is in use. Updates of
644 # the kernel partition table may behave differently when a
645 # partition is in use. See http://tracker.ceph.com/issues/7334 for
648 def test_activate_two_separated_journal(self
):
650 disks
= c
.unused_disks()
652 other_data_disk
= disks
[1]
653 journal_disk
= disks
[2]
654 osd_uuid
= self
.activate_separated_journal(data_disk
, journal_disk
)
655 other_osd_uuid
= self
.activate_separated_journal(
656 other_data_disk
, journal_disk
)
658 # read/write can only succeed if the two osds are up because
659 # the pool needs two OSD
661 c
.helper("pool_read_write 2") # 2 == pool size
662 c
.destroy_osd(osd_uuid
)
663 c
.destroy_osd(other_osd_uuid
)
664 c
.sh("ceph-disk --verbose zap " + data_disk
+ " " +
665 journal_disk
+ " " + other_data_disk
)
668 # Create an OSD and reuse an existing journal partition
670 def test_activate_reuse_journal(self
):
672 disks
= c
.unused_disks()
674 journal_disk
= disks
[1]
676 # Create an OSD with a separated journal and destroy it.
678 osd_uuid
= self
.activate_separated_journal(data_disk
, journal_disk
)
679 journal_partition
= c
.get_journal_partition(osd_uuid
)
680 journal_path
= journal_partition
['path']
681 c
.destroy_osd(osd_uuid
)
682 c
.sh("ceph-disk --verbose zap " + data_disk
)
683 osd_uuid
= str(uuid
.uuid1())
685 # Create another OSD with the journal partition of the previous OSD
687 c
.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid
+
688 " " + data_disk
+ " " + journal_path
)
689 c
.helper("pool_read_write 1") # 1 == pool size
690 c
.wait_for_osd_up(osd_uuid
)
692 c
.sh("ceph-disk list --format json " + data_disk
))[0]
693 assert len(device
['partitions']) == 1
694 c
.check_osd_status(osd_uuid
)
695 journal_partition
= c
.get_journal_partition(osd_uuid
)
697 # Verify the previous OSD partition has been reused
699 assert journal_partition
['path'] == journal_path
700 c
.destroy_osd(osd_uuid
)
701 c
.sh("ceph-disk --verbose zap " + data_disk
+ " " + journal_disk
)
703 def test_activate_multipath(self
):
705 if c
.sh("lsb_release -si").strip() != 'CentOS':
707 "see issue https://bugs.launchpad.net/ubuntu/+source/multipath-tools/+bug/1488688")
710 # Figure out the name of the multipath device
712 disk
= c
.unused_disks('sd.')[0]
713 c
.sh("mpathconf --enable || true")
714 c
.sh("multipath " + disk
)
715 holders
= os
.listdir(
716 "/sys/block/" + os
.path
.basename(disk
) + "/holders")
717 assert 1 == len(holders
)
718 name
= open("/sys/block/" + holders
[0] + "/dm/name").read()
719 multipath
= "/dev/mapper/" + name
721 # Prepare the multipath device
723 osd_uuid
= str(uuid
.uuid1())
724 c
.sh("ceph-disk --verbose zap " + multipath
)
725 c
.sh("ceph-disk --verbose prepare --filestore --osd-uuid " + osd_uuid
+
727 c
.wait_for_osd_up(osd_uuid
)
729 c
.sh("ceph-disk list --format json " + multipath
))[0]
730 assert len(device
['partitions']) == 2
731 data_partition
= c
.get_osd_partition(osd_uuid
)
732 assert data_partition
['type'] == 'data'
733 assert data_partition
['state'] == 'active'
734 journal_partition
= c
.get_journal_partition(osd_uuid
)
735 assert journal_partition
736 c
.helper("pool_read_write")
737 c
.destroy_osd(osd_uuid
)
738 c
.sh("udevadm settle")
740 c
.unload_scsi_debug()
743 class CephDiskTest(CephDisk
):
745 def main(self
, argv
):
746 parser
= argparse
.ArgumentParser(
751 action
='store_true', default
=None,
752 help='be more verbose',
756 help='stop, umount and destroy',
758 args
= parser
.parse_args(argv
)
761 logging
.basicConfig(level
=logging
.DEBUG
)
764 dump
= json
.loads(CephDisk
.sh("ceph osd dump -f json"))
766 for osd
in dump
['osds']:
767 if str(osd
['osd']) == args
.destroy_osd
:
768 osd_uuid
= osd
['uuid']
770 self
.destroy_osd(osd_uuid
)
772 raise Exception("cannot find OSD " + args
.destroy_osd
+
773 " ceph osd dump -f json")
776 if __name__
== '__main__':
777 sys
.exit(CephDiskTest().main(sys
.argv
[1:]))