]>
Commit | Line | Data |
---|---|---|
3a9019d9 FG |
1 | |
2 | - hosts: osds | |
3 | become: yes | |
4 | tasks: | |
5 | ||
6 | - name: stop ceph-osd@2 daemon | |
7 | service: | |
8 | name: ceph-osd@2 | |
9 | state: stopped | |
10 | ||
28e407b8 AA |
11 | - name: stop ceph-osd@0 daemon |
12 | service: | |
13 | name: ceph-osd@0 | |
14 | state: stopped | |
15 | ||
16 | ||
17 | - hosts: mons | |
18 | become: yes | |
19 | tasks: | |
20 | ||
94b18763 | 21 | - name: destroy osd.2 |
91327a77 | 22 | command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it" |
3a9019d9 | 23 | |
28e407b8 | 24 | - name: destroy osd.0 |
91327a77 | 25 | command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it" |
28e407b8 AA |
26 | |
27 | ||
28 | - hosts: osds | |
29 | become: yes | |
30 | tasks: | |
31 | ||
32 | # osd.2 device | |
9f95a23c TL |
33 | - name: zap /dev/vdd1 |
34 | command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd1 --destroy" | |
3a9019d9 FG |
35 | environment: |
36 | CEPH_VOLUME_DEBUG: 1 | |
37 | ||
9f95a23c TL |
38 | - name: zap /dev/vdd2 |
39 | command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd2 --destroy" | |
3a9019d9 FG |
40 | environment: |
41 | CEPH_VOLUME_DEBUG: 1 | |
42 | ||
f64942e4 | 43 | # partitions have been completely removed, so re-create them again |
9f95a23c | 44 | - name: re-create partition /dev/vdd for lvm data usage |
f64942e4 | 45 | parted: |
9f95a23c | 46 | device: /dev/vdd |
f64942e4 AA |
47 | number: 1 |
48 | part_start: 0% | |
49 | part_end: 50% | |
50 | unit: '%' | |
51 | label: gpt | |
52 | state: present | |
53 | ||
9f95a23c | 54 | - name: re-create partition /dev/vdd lvm journals |
f64942e4 | 55 | parted: |
9f95a23c | 56 | device: /dev/vdd |
f64942e4 AA |
57 | number: 2 |
58 | part_start: 50% | |
59 | part_end: 100% | |
60 | unit: '%' | |
61 | state: present | |
62 | label: gpt | |
63 | ||
9f95a23c TL |
64 | - name: redeploy osd.2 using /dev/vdd1 |
65 | command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/vdd1 --journal /dev/vdd2 --osd-id 2" | |
3a9019d9 FG |
66 | environment: |
67 | CEPH_VOLUME_DEBUG: 1 | |
94b18763 | 68 | |
28e407b8 | 69 | # osd.0 lv |
94b18763 | 70 | - name: zap test_group/data-lv1 |
91327a77 | 71 | command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1" |
94b18763 FG |
72 | environment: |
73 | CEPH_VOLUME_DEBUG: 1 | |
74 | ||
9f95a23c TL |
75 | - name: zap /dev/vdc1 |
76 | command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdc1 --destroy" | |
94b18763 FG |
77 | environment: |
78 | CEPH_VOLUME_DEBUG: 1 | |
79 | ||
9f95a23c | 80 | - name: re-create partition /dev/vdc1 |
f64942e4 | 81 | parted: |
9f95a23c | 82 | device: /dev/vdc |
f64942e4 AA |
83 | number: 1 |
84 | part_start: 0% | |
85 | part_end: 50% | |
86 | unit: '%' | |
87 | state: present | |
88 | label: gpt | |
89 | ||
94b18763 | 90 | - name: prepare osd.0 again using test_group/data-lv1 |
9f95a23c | 91 | command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/vdc1 --osd-id 0" |
94b18763 FG |
92 | environment: |
93 | CEPH_VOLUME_DEBUG: 1 | |
94 | ||
95 | - name: activate all to start the previously prepared osd.0 | |
96 | command: "ceph-volume lvm activate --filestore --all" | |
97 | environment: | |
98 | CEPH_VOLUME_DEBUG: 1 | |
1adf2230 | 99 | |
a8e16298 TL |
100 | - name: node inventory |
101 | command: "ceph-volume inventory" | |
102 | environment: | |
103 | CEPH_VOLUME_DEBUG: 1 | |
104 | ||
1adf2230 AA |
105 | - name: list all OSDs |
106 | command: "ceph-volume lvm list" | |
107 | environment: | |
108 | CEPH_VOLUME_DEBUG: 1 |