6 - name: stop ceph-osd@2 daemon
11 - name: stop ceph-osd@0 daemon
22 command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
25 command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
34 command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy"
39 command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd2 --destroy"
43 # partitions have been completely removed, so re-create them again
44 - name: re-create partition /dev/sdd for lvm data usage
54 - name: re-create partition /dev/sdd lvm journals
64 - name: redeploy osd.2 using /dev/sdd1
65 command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/sdd1 --journal /dev/sdd2 --osd-id 2"
70 - name: zap test_group/data-lv1
71 command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
76 command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdc1 --destroy"
80 - name: re-create partition /dev/sdc1
90 - name: prepare osd.0 again using test_group/data-lv1
91 command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/sdc1 --osd-id 0"
95 - name: activate all to start the previously prepared osd.0
96 command: "ceph-volume lvm activate --filestore --all"
100 - name: node inventory
101 command: "ceph-volume inventory"
105 - name: list all OSDs
106 command: "ceph-volume lvm list"