]> git.proxmox.com Git - ceph.git/blame - ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_filestore.yml
bump version to 17.2.5-pve1
[ceph.git] / ceph / src / ceph-volume / ceph_volume / tests / functional / lvm / playbooks / test_filestore.yml
CommitLineData
b32b8144
FG
1
2- hosts: osds
3 become: yes
4 tasks:
5
6 - name: stop ceph-osd@2 daemon
7 service:
8 name: ceph-osd@2
9 state: stopped
10
28e407b8
AA
11 - name: stop ceph-osd@0 daemon
12 service:
13 name: ceph-osd@0
14 state: stopped
15
16
17- hosts: mons
18 become: yes
19 tasks:
522d829b
TL
20 - name: mark osds down
21 command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}"
22 with_items:
23 - 0
24 - 2
28e407b8 25
94b18763 26 - name: destroy osd.2
91327a77 27 command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
522d829b
TL
28 register: result
29 retries: 30
30 delay: 1
31 until: result is succeeded
b32b8144 32
28e407b8 33 - name: destroy osd.0
91327a77 34 command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
522d829b
TL
35 register: result
36 retries: 30
37 delay: 1
38 until: result is succeeded
28e407b8
AA
39
40- hosts: osds
41 become: yes
42 tasks:
43
44 # osd.2 device
9f95a23c
TL
45 - name: zap /dev/vdd1
46 command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd1 --destroy"
b32b8144
FG
47 environment:
48 CEPH_VOLUME_DEBUG: 1
49
28e407b8 50 # osd.2 journal
9f95a23c
TL
51 - name: zap /dev/vdd2
52 command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd2 --destroy"
b32b8144
FG
53 environment:
54 CEPH_VOLUME_DEBUG: 1
55
f64942e4 56 # partitions have been completely removed, so re-create them again
9f95a23c 57 - name: re-create partition /dev/vdd for lvm data usage
f64942e4 58 parted:
9f95a23c 59 device: /dev/vdd
f64942e4
AA
60 number: 1
61 part_start: 0%
62 part_end: 50%
63 unit: '%'
64 label: gpt
65 state: present
66
9f95a23c 67 - name: re-create partition /dev/vdd lvm journals
f64942e4 68 parted:
9f95a23c 69 device: /dev/vdd
f64942e4
AA
70 number: 2
71 part_start: 50%
72 part_end: 100%
73 unit: '%'
74 state: present
75 label: gpt
76
9f95a23c
TL
77 - name: redeploy osd.2 using /dev/vdd1
78 command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/vdd1 --journal /dev/vdd2 --osd-id 2"
b32b8144
FG
79 environment:
80 CEPH_VOLUME_DEBUG: 1
94b18763 81
28e407b8 82 # osd.0 data lv
91327a77
AA
83 # note: we don't use --destroy here to test this works without that flag.
84 # --destroy is used in the bluestore tests
94b18763 85 - name: zap test_group/data-lv1
91327a77 86 command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
94b18763
FG
87 environment:
88 CEPH_VOLUME_DEBUG: 1
89
91327a77 90 # osd.0 journal device (zap without --destroy that removes the LV)
9f95a23c
TL
91 - name: zap /dev/vdc1
92 command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdc1"
94b18763
FG
93 environment:
94 CEPH_VOLUME_DEBUG: 1
95
96 - name: prepare osd.0 again using test_group/data-lv1
9f95a23c 97 command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/vdc1 --osd-id 0"
94b18763
FG
98 environment:
99 CEPH_VOLUME_DEBUG: 1
100
f64942e4
AA
101 - name: find all OSD paths
102 find:
103 paths: /var/lib/ceph/osd
104 recurse: no
105 file_type: directory
106 register: osd_paths
107
108 # set all OSD paths to root:rootto ensure that the OSD will be able to
109 # activate regardless
110 - name: mangle permissions to root
111 file:
112 path: "{{ item.path }}"
113 owner: root
114 group: root
115 recurse: yes
116 with_items:
117 - "{{ osd_paths.files }}"
118
119 - name: stop ceph-osd@2 daemon
120 service:
121 name: ceph-osd@2
122 state: stopped
123
124 - name: stop ceph-osd@1 daemon
125 service:
126 name: ceph-osd@1
127 state: stopped
128
94b18763
FG
129 - name: activate all to start the previously prepared osd.0
130 command: "ceph-volume lvm activate --filestore --all"
131 environment:
132 CEPH_VOLUME_DEBUG: 1
1adf2230 133
a8e16298
TL
134 - name: node inventory
135 command: "ceph-volume inventory"
136 environment:
137 CEPH_VOLUME_DEBUG: 1
138
1adf2230
AA
139 - name: list all OSDs
140 command: "ceph-volume lvm list"
141 environment:
142 CEPH_VOLUME_DEBUG: 1
91327a77
AA
143
144 - name: create temporary directory
145 tempfile:
146 state: directory
147 suffix: sparse
148 register: tmpdir
149
f6b5b4d7
TL
150 - name: create a 1GB sparse file
151 command: fallocate -l 1G {{ tmpdir.path }}/sparse.file
91327a77
AA
152
153 - name: find an empty loop device
154 command: losetup -f
155 register: losetup_list
156
157 - name: setup loop device with sparse file
158 command: losetup {{ losetup_list.stdout }} {{ tmpdir.path }}/sparse.file
159
160 - name: create volume group
161 command: vgcreate test_zap {{ losetup_list.stdout }}
162 failed_when: false
163
164 - name: create logical volume 1
165 command: lvcreate --yes -l 50%FREE -n data-lv1 test_zap
166 failed_when: false
167
168 - name: create logical volume 2
169 command: lvcreate --yes -l 50%FREE -n data-lv2 test_zap
170 failed_when: false
171
172 # zapping the first lv shouldn't remove the vg, allowing the second zap to succeed
173 - name: zap test_zap/data-lv1
174 command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv1"
175 environment:
176 CEPH_VOLUME_DEBUG: 1
177
178 - name: zap test_zap/data-lv2
179 command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv2"
180 environment:
181 CEPH_VOLUME_DEBUG: 1