]>
Commit | Line | Data |
---|---|---|
1 | ||
2 | - hosts: osds | |
3 | become: yes | |
4 | tasks: | |
5 | ||
6 | - name: stop ceph-osd@2 daemon | |
7 | service: | |
8 | name: ceph-osd@2 | |
9 | state: stopped | |
10 | ||
11 | - name: stop ceph-osd@0 daemon | |
12 | service: | |
13 | name: ceph-osd@0 | |
14 | state: stopped | |
15 | ||
16 | ||
17 | - hosts: mons | |
18 | become: yes | |
19 | tasks: | |
20 | - name: mark osds down | |
21 | command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}" | |
22 | with_items: | |
23 | - 0 | |
24 | - 2 | |
25 | ||
26 | - name: destroy osd.2 | |
27 | command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it" | |
28 | register: result | |
29 | retries: 30 | |
30 | delay: 1 | |
31 | until: result is succeeded | |
32 | ||
33 | - name: destroy osd.0 | |
34 | command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it" | |
35 | register: result | |
36 | retries: 30 | |
37 | delay: 1 | |
38 | until: result is succeeded | |
39 | ||
40 | - hosts: osds | |
41 | become: yes | |
42 | tasks: | |
43 | ||
44 | # osd.2 device | |
45 | - name: zap /dev/vdd1 | |
46 | command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd1 --destroy" | |
47 | environment: | |
48 | CEPH_VOLUME_DEBUG: 1 | |
49 | ||
50 | # osd.2 journal | |
51 | - name: zap /dev/vdd2 | |
52 | command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd2 --destroy" | |
53 | environment: | |
54 | CEPH_VOLUME_DEBUG: 1 | |
55 | ||
56 | # partitions have been completely removed, so re-create them again | |
57 | - name: re-create partition /dev/vdd for lvm data usage | |
58 | parted: | |
59 | device: /dev/vdd | |
60 | number: 1 | |
61 | part_start: 0% | |
62 | part_end: 50% | |
63 | unit: '%' | |
64 | label: gpt | |
65 | state: present | |
66 | ||
67 | - name: re-create partition /dev/vdd lvm journals | |
68 | parted: | |
69 | device: /dev/vdd | |
70 | number: 2 | |
71 | part_start: 50% | |
72 | part_end: 100% | |
73 | unit: '%' | |
74 | state: present | |
75 | label: gpt | |
76 | ||
77 | - name: redeploy osd.2 using /dev/vdd1 | |
78 | command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/vdd1 --journal /dev/vdd2 --osd-id 2" | |
79 | environment: | |
80 | CEPH_VOLUME_DEBUG: 1 | |
81 | ||
82 | # osd.0 data lv | |
83 | # note: we don't use --destroy here to test this works without that flag. | |
84 | # --destroy is used in the bluestore tests | |
85 | - name: zap test_group/data-lv1 | |
86 | command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1" | |
87 | environment: | |
88 | CEPH_VOLUME_DEBUG: 1 | |
89 | ||
90 | # osd.0 journal device | |
91 | - name: zap /dev/vdc1 | |
92 | command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy /dev/vdc1" | |
93 | environment: | |
94 | CEPH_VOLUME_DEBUG: 1 | |
95 | ||
96 | - name: re-create partition /dev/vdc1 | |
97 | parted: | |
98 | device: /dev/vdc | |
99 | number: 1 | |
100 | part_start: 0% | |
101 | part_end: 50% | |
102 | unit: '%' | |
103 | state: present | |
104 | label: gpt | |
105 | ||
106 | - name: prepare osd.0 again using test_group/data-lv1 | |
107 | command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/vdc1 --osd-id 0" | |
108 | environment: | |
109 | CEPH_VOLUME_DEBUG: 1 | |
110 | ||
111 | - name: find all OSD paths | |
112 | find: | |
113 | paths: /var/lib/ceph/osd | |
114 | recurse: no | |
115 | file_type: directory | |
116 | register: osd_paths | |
117 | ||
118 | # set all OSD paths to root:rootto ensure that the OSD will be able to | |
119 | # activate regardless | |
120 | - name: mangle permissions to root | |
121 | file: | |
122 | path: "{{ item.path }}" | |
123 | owner: root | |
124 | group: root | |
125 | recurse: yes | |
126 | with_items: | |
127 | - "{{ osd_paths.files }}" | |
128 | ||
129 | - name: stop ceph-osd@2 daemon | |
130 | service: | |
131 | name: ceph-osd@2 | |
132 | state: stopped | |
133 | ||
134 | - name: stop ceph-osd@1 daemon | |
135 | service: | |
136 | name: ceph-osd@1 | |
137 | state: stopped | |
138 | ||
139 | - name: activate all to start the previously prepared osd.0 | |
140 | command: "ceph-volume lvm activate --filestore --all" | |
141 | environment: | |
142 | CEPH_VOLUME_DEBUG: 1 | |
143 | ||
144 | - name: node inventory | |
145 | command: "ceph-volume inventory" | |
146 | environment: | |
147 | CEPH_VOLUME_DEBUG: 1 | |
148 | ||
149 | - name: list all OSDs | |
150 | command: "ceph-volume lvm list" | |
151 | environment: | |
152 | CEPH_VOLUME_DEBUG: 1 | |
153 | ||
154 | - name: create temporary directory | |
155 | tempfile: | |
156 | state: directory | |
157 | suffix: sparse | |
158 | register: tmpdir | |
159 | ||
160 | - name: create a 1GB sparse file | |
161 | command: fallocate -l 1G {{ tmpdir.path }}/sparse.file | |
162 | ||
163 | - name: find an empty loop device | |
164 | command: losetup -f | |
165 | register: losetup_list | |
166 | ||
167 | - name: setup loop device with sparse file | |
168 | command: losetup {{ losetup_list.stdout }} {{ tmpdir.path }}/sparse.file | |
169 | ||
170 | - name: create volume group | |
171 | command: vgcreate test_zap {{ losetup_list.stdout }} | |
172 | failed_when: false | |
173 | ||
174 | - name: create logical volume 1 | |
175 | command: lvcreate --yes -l 50%FREE -n data-lv1 test_zap | |
176 | failed_when: false | |
177 | ||
178 | - name: create logical volume 2 | |
179 | command: lvcreate --yes -l 50%FREE -n data-lv2 test_zap | |
180 | failed_when: false | |
181 | ||
182 | # zapping the first lv shouldn't remove the vg, allowing the second zap to succeed | |
183 | - name: zap test_zap/data-lv1 | |
184 | command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv1" | |
185 | environment: | |
186 | CEPH_VOLUME_DEBUG: 1 | |
187 | ||
188 | - name: zap test_zap/data-lv2 | |
189 | command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv2" | |
190 | environment: | |
191 | CEPH_VOLUME_DEBUG: 1 |