- hosts: osds become: yes tasks: - name: stop ceph-osd@2 daemon service: name: ceph-osd@2 state: stopped - name: stop ceph-osd@0 daemon service: name: ceph-osd@0 state: stopped - hosts: mons become: yes tasks: - name: mark osds down command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}" with_items: - 0 - 2 - name: destroy osd.2 command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it" register: result retries: 30 delay: 1 until: result is succeeded - name: destroy osd.0 command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it" register: result retries: 30 delay: 1 until: result is succeeded - hosts: osds become: yes tasks: # osd.2 device - name: zap /dev/vdd1 command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd1 --destroy" environment: CEPH_VOLUME_DEBUG: 1 # osd.2 journal - name: zap /dev/vdd2 command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd2 --destroy" environment: CEPH_VOLUME_DEBUG: 1 # partitions have been completely removed, so re-create them again - name: re-create partition /dev/vdd for lvm data usage parted: device: /dev/vdd number: 1 part_start: 0% part_end: 50% unit: '%' label: gpt state: present - name: re-create partition /dev/vdd lvm journals parted: device: /dev/vdd number: 2 part_start: 50% part_end: 100% unit: '%' state: present label: gpt - name: redeploy osd.2 using /dev/vdd1 command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/vdd1 --journal /dev/vdd2 --osd-id 2" environment: CEPH_VOLUME_DEBUG: 1 # osd.0 data lv # note: we don't use --destroy here to test this works without that flag. # --destroy is used in the bluestore tests - name: zap test_group/data-lv1 command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1" environment: CEPH_VOLUME_DEBUG: 1 # osd.0 journal device - name: zap /dev/vdc1 command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy /dev/vdc1" environment: CEPH_VOLUME_DEBUG: 1 - name: re-create partition /dev/vdc1 parted: device: /dev/vdc number: 1 part_start: 0% part_end: 50% unit: '%' state: present label: gpt - name: prepare osd.0 again using test_group/data-lv1 command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/vdc1 --osd-id 0" environment: CEPH_VOLUME_DEBUG: 1 - name: find all OSD paths find: paths: /var/lib/ceph/osd recurse: no file_type: directory register: osd_paths # set all OSD paths to root:rootto ensure that the OSD will be able to # activate regardless - name: mangle permissions to root file: path: "{{ item.path }}" owner: root group: root recurse: yes with_items: - "{{ osd_paths.files }}" - name: stop ceph-osd@2 daemon service: name: ceph-osd@2 state: stopped - name: stop ceph-osd@1 daemon service: name: ceph-osd@1 state: stopped - name: activate all to start the previously prepared osd.0 command: "ceph-volume lvm activate --filestore --all" environment: CEPH_VOLUME_DEBUG: 1 - name: node inventory command: "ceph-volume inventory" environment: CEPH_VOLUME_DEBUG: 1 - name: list all OSDs command: "ceph-volume lvm list" environment: CEPH_VOLUME_DEBUG: 1 - name: create temporary directory tempfile: state: directory suffix: sparse register: tmpdir - name: create a 1GB sparse file command: fallocate -l 1G {{ tmpdir.path }}/sparse.file - name: find an empty loop device command: losetup -f register: losetup_list - name: setup loop device with sparse file command: losetup {{ losetup_list.stdout }} {{ tmpdir.path }}/sparse.file - name: create volume group command: vgcreate test_zap {{ losetup_list.stdout }} failed_when: false - name: create logical volume 1 command: lvcreate --yes -l 50%FREE -n data-lv1 test_zap failed_when: false - name: create logical volume 2 command: lvcreate --yes -l 50%FREE -n data-lv2 test_zap failed_when: false # zapping the first lv shouldn't remove the vg, allowing the second zap to succeed - name: zap test_zap/data-lv1 command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv1" environment: CEPH_VOLUME_DEBUG: 1 - name: zap test_zap/data-lv2 command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv2" environment: CEPH_VOLUME_DEBUG: 1