]>
Commit | Line | Data |
---|---|---|
b32b8144 FG |
1 | |
2 | - hosts: osds | |
3 | become: yes | |
4 | tasks: | |
5 | ||
6 | - name: stop ceph-osd@2 daemon | |
7 | service: | |
8 | name: ceph-osd@2 | |
9 | state: stopped | |
10 | ||
28e407b8 AA |
11 | - name: stop ceph-osd@0 daemon |
12 | service: | |
13 | name: ceph-osd@0 | |
14 | state: stopped | |
15 | ||
16 | ||
17 | - hosts: mons | |
18 | become: yes | |
19 | tasks: | |
20 | ||
94b18763 | 21 | - name: destroy osd.2 |
91327a77 | 22 | command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it" |
b32b8144 | 23 | |
28e407b8 | 24 | - name: destroy osd.0 |
91327a77 | 25 | command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it" |
28e407b8 AA |
26 | |
27 | ||
28 | - hosts: osds | |
29 | become: yes | |
30 | tasks: | |
31 | ||
32 | # osd.2 device | |
9f95a23c TL |
33 | - name: zap /dev/vdd1 |
34 | command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd1 --destroy" | |
b32b8144 FG |
35 | environment: |
36 | CEPH_VOLUME_DEBUG: 1 | |
37 | ||
28e407b8 | 38 | # osd.2 journal |
9f95a23c TL |
39 | - name: zap /dev/vdd2 |
40 | command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd2 --destroy" | |
b32b8144 FG |
41 | environment: |
42 | CEPH_VOLUME_DEBUG: 1 | |
43 | ||
f64942e4 | 44 | # partitions have been completely removed, so re-create them again |
9f95a23c | 45 | - name: re-create partition /dev/vdd for lvm data usage |
f64942e4 | 46 | parted: |
9f95a23c | 47 | device: /dev/vdd |
f64942e4 AA |
48 | number: 1 |
49 | part_start: 0% | |
50 | part_end: 50% | |
51 | unit: '%' | |
52 | label: gpt | |
53 | state: present | |
54 | ||
9f95a23c | 55 | - name: re-create partition /dev/vdd lvm journals |
f64942e4 | 56 | parted: |
9f95a23c | 57 | device: /dev/vdd |
f64942e4 AA |
58 | number: 2 |
59 | part_start: 50% | |
60 | part_end: 100% | |
61 | unit: '%' | |
62 | state: present | |
63 | label: gpt | |
64 | ||
9f95a23c TL |
65 | - name: redeploy osd.2 using /dev/vdd1 |
66 | command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/vdd1 --journal /dev/vdd2 --osd-id 2" | |
b32b8144 FG |
67 | environment: |
68 | CEPH_VOLUME_DEBUG: 1 | |
94b18763 | 69 | |
28e407b8 | 70 | # osd.0 data lv |
91327a77 AA |
71 | # note: we don't use --destroy here to test this works without that flag. |
72 | # --destroy is used in the bluestore tests | |
94b18763 | 73 | - name: zap test_group/data-lv1 |
91327a77 | 74 | command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1" |
94b18763 FG |
75 | environment: |
76 | CEPH_VOLUME_DEBUG: 1 | |
77 | ||
91327a77 | 78 | # osd.0 journal device (zap without --destroy that removes the LV) |
9f95a23c TL |
79 | - name: zap /dev/vdc1 |
80 | command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdc1" | |
94b18763 FG |
81 | environment: |
82 | CEPH_VOLUME_DEBUG: 1 | |
83 | ||
84 | - name: prepare osd.0 again using test_group/data-lv1 | |
9f95a23c | 85 | command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/vdc1 --osd-id 0" |
94b18763 FG |
86 | environment: |
87 | CEPH_VOLUME_DEBUG: 1 | |
88 | ||
f64942e4 AA |
89 | - name: find all OSD paths |
90 | find: | |
91 | paths: /var/lib/ceph/osd | |
92 | recurse: no | |
93 | file_type: directory | |
94 | register: osd_paths | |
95 | ||
96 | # set all OSD paths to root:rootto ensure that the OSD will be able to | |
97 | # activate regardless | |
98 | - name: mangle permissions to root | |
99 | file: | |
100 | path: "{{ item.path }}" | |
101 | owner: root | |
102 | group: root | |
103 | recurse: yes | |
104 | with_items: | |
105 | - "{{ osd_paths.files }}" | |
106 | ||
107 | - name: stop ceph-osd@2 daemon | |
108 | service: | |
109 | name: ceph-osd@2 | |
110 | state: stopped | |
111 | ||
112 | - name: stop ceph-osd@1 daemon | |
113 | service: | |
114 | name: ceph-osd@1 | |
115 | state: stopped | |
116 | ||
94b18763 FG |
117 | - name: activate all to start the previously prepared osd.0 |
118 | command: "ceph-volume lvm activate --filestore --all" | |
119 | environment: | |
120 | CEPH_VOLUME_DEBUG: 1 | |
1adf2230 | 121 | |
a8e16298 TL |
122 | - name: node inventory |
123 | command: "ceph-volume inventory" | |
124 | environment: | |
125 | CEPH_VOLUME_DEBUG: 1 | |
126 | ||
1adf2230 AA |
127 | - name: list all OSDs |
128 | command: "ceph-volume lvm list" | |
129 | environment: | |
130 | CEPH_VOLUME_DEBUG: 1 | |
91327a77 AA |
131 | |
132 | - name: create temporary directory | |
133 | tempfile: | |
134 | state: directory | |
135 | suffix: sparse | |
136 | register: tmpdir | |
137 | ||
f6b5b4d7 TL |
138 | - name: create a 1GB sparse file |
139 | command: fallocate -l 1G {{ tmpdir.path }}/sparse.file | |
91327a77 AA |
140 | |
141 | - name: find an empty loop device | |
142 | command: losetup -f | |
143 | register: losetup_list | |
144 | ||
145 | - name: setup loop device with sparse file | |
146 | command: losetup {{ losetup_list.stdout }} {{ tmpdir.path }}/sparse.file | |
147 | ||
148 | - name: create volume group | |
149 | command: vgcreate test_zap {{ losetup_list.stdout }} | |
150 | failed_when: false | |
151 | ||
152 | - name: create logical volume 1 | |
153 | command: lvcreate --yes -l 50%FREE -n data-lv1 test_zap | |
154 | failed_when: false | |
155 | ||
156 | - name: create logical volume 2 | |
157 | command: lvcreate --yes -l 50%FREE -n data-lv2 test_zap | |
158 | failed_when: false | |
159 | ||
160 | # zapping the first lv shouldn't remove the vg, allowing the second zap to succeed | |
161 | - name: zap test_zap/data-lv1 | |
162 | command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv1" | |
163 | environment: | |
164 | CEPH_VOLUME_DEBUG: 1 | |
165 | ||
166 | - name: zap test_zap/data-lv2 | |
167 | command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv2" | |
168 | environment: | |
169 | CEPH_VOLUME_DEBUG: 1 |