]> git.proxmox.com Git - ceph.git/blame - ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_filestore.yml
import ceph quincy 17.2.6
[ceph.git] / ceph / src / ceph-volume / ceph_volume / tests / functional / lvm / playbooks / test_filestore.yml
CommitLineData
b32b8144
FG
1
2- hosts: osds
3 become: yes
4 tasks:
5
6 - name: stop ceph-osd@2 daemon
7 service:
8 name: ceph-osd@2
9 state: stopped
10
28e407b8
AA
11 - name: stop ceph-osd@0 daemon
12 service:
13 name: ceph-osd@0
14 state: stopped
15
16
17- hosts: mons
18 become: yes
19 tasks:
522d829b
TL
20 - name: mark osds down
21 command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}"
22 with_items:
23 - 0
24 - 2
28e407b8 25
94b18763 26 - name: destroy osd.2
91327a77 27 command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
522d829b
TL
28 register: result
29 retries: 30
30 delay: 1
31 until: result is succeeded
b32b8144 32
28e407b8 33 - name: destroy osd.0
91327a77 34 command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
522d829b
TL
35 register: result
36 retries: 30
37 delay: 1
38 until: result is succeeded
28e407b8
AA
39
40- hosts: osds
41 become: yes
42 tasks:
43
44 # osd.2 device
9f95a23c
TL
45 - name: zap /dev/vdd1
46 command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd1 --destroy"
b32b8144
FG
47 environment:
48 CEPH_VOLUME_DEBUG: 1
49
28e407b8 50 # osd.2 journal
9f95a23c
TL
51 - name: zap /dev/vdd2
52 command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd2 --destroy"
b32b8144
FG
53 environment:
54 CEPH_VOLUME_DEBUG: 1
55
f64942e4 56 # partitions have been completely removed, so re-create them again
9f95a23c 57 - name: re-create partition /dev/vdd for lvm data usage
f64942e4 58 parted:
9f95a23c 59 device: /dev/vdd
f64942e4
AA
60 number: 1
61 part_start: 0%
62 part_end: 50%
63 unit: '%'
64 label: gpt
65 state: present
66
9f95a23c 67 - name: re-create partition /dev/vdd lvm journals
f64942e4 68 parted:
9f95a23c 69 device: /dev/vdd
f64942e4
AA
70 number: 2
71 part_start: 50%
72 part_end: 100%
73 unit: '%'
74 state: present
75 label: gpt
76
9f95a23c
TL
77 - name: redeploy osd.2 using /dev/vdd1
78 command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/vdd1 --journal /dev/vdd2 --osd-id 2"
b32b8144
FG
79 environment:
80 CEPH_VOLUME_DEBUG: 1
94b18763 81
28e407b8 82 # osd.0 data lv
91327a77
AA
83 # note: we don't use --destroy here to test this works without that flag.
84 # --destroy is used in the bluestore tests
94b18763 85 - name: zap test_group/data-lv1
91327a77 86 command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
94b18763
FG
87 environment:
88 CEPH_VOLUME_DEBUG: 1
89
39ae355f 90 # osd.0 journal device
9f95a23c 91 - name: zap /dev/vdc1
39ae355f 92 command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy /dev/vdc1"
94b18763
FG
93 environment:
94 CEPH_VOLUME_DEBUG: 1
95
39ae355f
TL
96 - name: re-create partition /dev/vdc1
97 parted:
98 device: /dev/vdc
99 number: 1
100 part_start: 0%
101 part_end: 50%
102 unit: '%'
103 state: present
104 label: gpt
105
94b18763 106 - name: prepare osd.0 again using test_group/data-lv1
9f95a23c 107 command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/vdc1 --osd-id 0"
94b18763
FG
108 environment:
109 CEPH_VOLUME_DEBUG: 1
110
f64942e4
AA
111 - name: find all OSD paths
112 find:
113 paths: /var/lib/ceph/osd
114 recurse: no
115 file_type: directory
116 register: osd_paths
117
118 # set all OSD paths to root:rootto ensure that the OSD will be able to
119 # activate regardless
120 - name: mangle permissions to root
121 file:
122 path: "{{ item.path }}"
123 owner: root
124 group: root
125 recurse: yes
126 with_items:
127 - "{{ osd_paths.files }}"
128
129 - name: stop ceph-osd@2 daemon
130 service:
131 name: ceph-osd@2
132 state: stopped
133
134 - name: stop ceph-osd@1 daemon
135 service:
136 name: ceph-osd@1
137 state: stopped
138
94b18763
FG
139 - name: activate all to start the previously prepared osd.0
140 command: "ceph-volume lvm activate --filestore --all"
141 environment:
142 CEPH_VOLUME_DEBUG: 1
1adf2230 143
a8e16298
TL
144 - name: node inventory
145 command: "ceph-volume inventory"
146 environment:
147 CEPH_VOLUME_DEBUG: 1
148
1adf2230
AA
149 - name: list all OSDs
150 command: "ceph-volume lvm list"
151 environment:
152 CEPH_VOLUME_DEBUG: 1
91327a77
AA
153
154 - name: create temporary directory
155 tempfile:
156 state: directory
157 suffix: sparse
158 register: tmpdir
159
f6b5b4d7
TL
160 - name: create a 1GB sparse file
161 command: fallocate -l 1G {{ tmpdir.path }}/sparse.file
91327a77
AA
162
163 - name: find an empty loop device
164 command: losetup -f
165 register: losetup_list
166
167 - name: setup loop device with sparse file
168 command: losetup {{ losetup_list.stdout }} {{ tmpdir.path }}/sparse.file
169
170 - name: create volume group
171 command: vgcreate test_zap {{ losetup_list.stdout }}
172 failed_when: false
173
174 - name: create logical volume 1
175 command: lvcreate --yes -l 50%FREE -n data-lv1 test_zap
176 failed_when: false
177
178 - name: create logical volume 2
179 command: lvcreate --yes -l 50%FREE -n data-lv2 test_zap
180 failed_when: false
181
182 # zapping the first lv shouldn't remove the vg, allowing the second zap to succeed
183 - name: zap test_zap/data-lv1
184 command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv1"
185 environment:
186 CEPH_VOLUME_DEBUG: 1
187
188 - name: zap test_zap/data-lv2
189 command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv2"
190 environment:
191 CEPH_VOLUME_DEBUG: 1