6 - name: stop ceph-osd@2 daemon
11 - name: stop ceph-osd@0 daemon
20 - name: mark osds down
21 command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}"
27 command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
31 until: result is succeeded
34 command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
38 until: result is succeeded
46 command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd1 --destroy"
52 command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd2 --destroy"
56 # partitions have been completely removed, so re-create them again
57 - name: re-create partition /dev/vdd for lvm data usage
67 - name: re-create partition /dev/vdd lvm journals
77 - name: redeploy osd.2 using /dev/vdd1
78 command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/vdd1 --journal /dev/vdd2 --osd-id 2"
83 # note: we don't use --destroy here to test this works without that flag.
84 # --destroy is used in the bluestore tests
85 - name: zap test_group/data-lv1
86 command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
90 # osd.0 journal device (zap without --destroy that removes the LV)
92 command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdc1"
96 - name: prepare osd.0 again using test_group/data-lv1
97 command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/vdc1 --osd-id 0"
101 - name: find all OSD paths
103 paths: /var/lib/ceph/osd
108 # set all OSD paths to root:rootto ensure that the OSD will be able to
109 # activate regardless
110 - name: mangle permissions to root
112 path: "{{ item.path }}"
117 - "{{ osd_paths.files }}"
119 - name: stop ceph-osd@2 daemon
124 - name: stop ceph-osd@1 daemon
129 - name: activate all to start the previously prepared osd.0
130 command: "ceph-volume lvm activate --filestore --all"
134 - name: node inventory
135 command: "ceph-volume inventory"
139 - name: list all OSDs
140 command: "ceph-volume lvm list"
144 - name: create temporary directory
150 - name: create a 1GB sparse file
151 command: fallocate -l 1G {{ tmpdir.path }}/sparse.file
153 - name: find an empty loop device
155 register: losetup_list
157 - name: setup loop device with sparse file
158 command: losetup {{ losetup_list.stdout }} {{ tmpdir.path }}/sparse.file
160 - name: create volume group
161 command: vgcreate test_zap {{ losetup_list.stdout }}
164 - name: create logical volume 1
165 command: lvcreate --yes -l 50%FREE -n data-lv1 test_zap
168 - name: create logical volume 2
169 command: lvcreate --yes -l 50%FREE -n data-lv2 test_zap
172 # zapping the first lv shouldn't remove the vg, allowing the second zap to succeed
173 - name: zap test_zap/data-lv1
174 command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv1"
178 - name: zap test_zap/data-lv2
179 command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv2"