6 - name: stop ceph-osd@2 daemon
11 - name: stop ceph-osd@0 daemon
20 - name: mark osds down
21 command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}"
27 command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
31 until: result is succeeded
34 command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
38 until: result is succeeded
46 command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd1 --destroy"
51 command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd2 --destroy"
55 # partitions have been completely removed, so re-create them again
56 - name: re-create partition /dev/vdd for lvm data usage
66 - name: re-create partition /dev/vdd lvm journals
76 - name: redeploy osd.2 using /dev/vdd1
77 command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/vdd1 --journal /dev/vdd2 --osd-id 2"
82 - name: zap test_group/data-lv1
83 command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
88 command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdc1 --destroy"
92 - name: re-create partition /dev/vdc1
102 - name: prepare osd.0 again using test_group/data-lv1
103 command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/vdc1 --osd-id 0"
107 - name: activate all to start the previously prepared osd.0
108 command: "ceph-volume lvm activate --filestore --all"
112 - name: node inventory
113 command: "ceph-volume inventory"
117 - name: list all OSDs
118 command: "ceph-volume lvm list"