tasks:
- name: destroy osd.2
- command: "ceph osd destroy osd.2 --yes-i-really-mean-it"
+ command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
- name: destroy osd.0
- command: "ceph osd destroy osd.0 --yes-i-really-mean-it"
+ command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
- hosts: osds
# osd.2 device
- name: zap /dev/sdd1
- command: "ceph-volume lvm zap /dev/sdd1 --destroy"
+ command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy"
environment:
CEPH_VOLUME_DEBUG: 1
# osd.2 journal
- name: zap /dev/sdd2
- command: "ceph-volume lvm zap /dev/sdd2 --destroy"
+ command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd2 --destroy"
environment:
CEPH_VOLUME_DEBUG: 1
- name: redeploy osd.2 using /dev/sdd1
- command: "ceph-volume lvm create --filestore --data /dev/sdd1 --journal /dev/sdd2 --osd-id 2"
+ command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/sdd1 --journal /dev/sdd2 --osd-id 2"
environment:
CEPH_VOLUME_DEBUG: 1
# osd.0 data lv
+ # note: we don't use --destroy here to test this works without that flag.
+ # --destroy is used in the bluestore tests
- name: zap test_group/data-lv1
- command: "ceph-volume lvm zap test_group/data-lv1"
+ command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
environment:
CEPH_VOLUME_DEBUG: 1
- # osd.0 journal device
+ # osd.0 journal device (zap without --destroy that removes the LV)
- name: zap /dev/sdc1
- command: "ceph-volume lvm zap /dev/sdc1 --destroy"
+ command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdc1"
environment:
CEPH_VOLUME_DEBUG: 1
- name: prepare osd.0 again using test_group/data-lv1
- command: "ceph-volume lvm prepare --filestore --data test_group/data-lv1 --journal /dev/sdc1 --osd-id 0"
+ command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/sdc1 --osd-id 0"
environment:
CEPH_VOLUME_DEBUG: 1
command: "ceph-volume lvm list"
environment:
CEPH_VOLUME_DEBUG: 1
+
+ - name: create temporary directory
+ tempfile:
+ state: directory
+ suffix: sparse
+ register: tmpdir
+
+ - name: create a 5GB sparse file
+ command: fallocate -l 5G {{ tmpdir.path }}/sparse.file
+
+ - name: find an empty loop device
+ command: losetup -f
+ register: losetup_list
+
+ - name: setup loop device with sparse file
+ command: losetup {{ losetup_list.stdout }} {{ tmpdir.path }}/sparse.file
+
+ - name: create volume group
+ command: vgcreate test_zap {{ losetup_list.stdout }}
+ failed_when: false
+
+ - name: create logical volume 1
+ command: lvcreate --yes -l 50%FREE -n data-lv1 test_zap
+ failed_when: false
+
+ - name: create logical volume 2
+ command: lvcreate --yes -l 50%FREE -n data-lv2 test_zap
+ failed_when: false
+
+ # zapping the first lv shouldn't remove the vg, allowing the second zap to succeed
+ - name: zap test_zap/data-lv1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv1"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: zap test_zap/data-lv2
+ command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv2"
+ environment:
+ CEPH_VOLUME_DEBUG: 1