]> git.proxmox.com Git - ceph.git/blob - ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_filestore.yml
aca1f40a652c6b3dd8d5dd1c9b2599d2b554693c
[ceph.git] / ceph / src / ceph-volume / ceph_volume / tests / functional / lvm / playbooks / test_filestore.yml
1
2 - hosts: osds
3 become: yes
4 tasks:
5
6 - name: stop ceph-osd@2 daemon
7 service:
8 name: ceph-osd@2
9 state: stopped
10
11 - name: stop ceph-osd@0 daemon
12 service:
13 name: ceph-osd@0
14 state: stopped
15
16
17 - hosts: mons
18 become: yes
19 tasks:
20 - name: mark osds down
21 command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}"
22 with_items:
23 - 0
24 - 2
25
26 - name: destroy osd.2
27 command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
28 register: result
29 retries: 30
30 delay: 1
31 until: result is succeeded
32
33 - name: destroy osd.0
34 command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
35 register: result
36 retries: 30
37 delay: 1
38 until: result is succeeded
39
40 - hosts: osds
41 become: yes
42 tasks:
43
44 # osd.2 device
45 - name: zap /dev/vdd1
46 command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd1 --destroy"
47 environment:
48 CEPH_VOLUME_DEBUG: 1
49
50 # osd.2 journal
51 - name: zap /dev/vdd2
52 command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd2 --destroy"
53 environment:
54 CEPH_VOLUME_DEBUG: 1
55
56 # partitions have been completely removed, so re-create them again
57 - name: re-create partition /dev/vdd for lvm data usage
58 parted:
59 device: /dev/vdd
60 number: 1
61 part_start: 0%
62 part_end: 50%
63 unit: '%'
64 label: gpt
65 state: present
66
67 - name: re-create partition /dev/vdd lvm journals
68 parted:
69 device: /dev/vdd
70 number: 2
71 part_start: 50%
72 part_end: 100%
73 unit: '%'
74 state: present
75 label: gpt
76
77 - name: redeploy osd.2 using /dev/vdd1
78 command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/vdd1 --journal /dev/vdd2 --osd-id 2"
79 environment:
80 CEPH_VOLUME_DEBUG: 1
81
82 # osd.0 data lv
83 # note: we don't use --destroy here to test this works without that flag.
84 # --destroy is used in the bluestore tests
85 - name: zap test_group/data-lv1
86 command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
87 environment:
88 CEPH_VOLUME_DEBUG: 1
89
90 # osd.0 journal device (zap without --destroy that removes the LV)
91 - name: zap /dev/vdc1
92 command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdc1"
93 environment:
94 CEPH_VOLUME_DEBUG: 1
95
96 - name: prepare osd.0 again using test_group/data-lv1
97 command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/vdc1 --osd-id 0"
98 environment:
99 CEPH_VOLUME_DEBUG: 1
100
101 - name: find all OSD paths
102 find:
103 paths: /var/lib/ceph/osd
104 recurse: no
105 file_type: directory
106 register: osd_paths
107
108 # set all OSD paths to root:rootto ensure that the OSD will be able to
109 # activate regardless
110 - name: mangle permissions to root
111 file:
112 path: "{{ item.path }}"
113 owner: root
114 group: root
115 recurse: yes
116 with_items:
117 - "{{ osd_paths.files }}"
118
119 - name: stop ceph-osd@2 daemon
120 service:
121 name: ceph-osd@2
122 state: stopped
123
124 - name: stop ceph-osd@1 daemon
125 service:
126 name: ceph-osd@1
127 state: stopped
128
129 - name: activate all to start the previously prepared osd.0
130 command: "ceph-volume lvm activate --filestore --all"
131 environment:
132 CEPH_VOLUME_DEBUG: 1
133
134 - name: node inventory
135 command: "ceph-volume inventory"
136 environment:
137 CEPH_VOLUME_DEBUG: 1
138
139 - name: list all OSDs
140 command: "ceph-volume lvm list"
141 environment:
142 CEPH_VOLUME_DEBUG: 1
143
144 - name: create temporary directory
145 tempfile:
146 state: directory
147 suffix: sparse
148 register: tmpdir
149
150 - name: create a 1GB sparse file
151 command: fallocate -l 1G {{ tmpdir.path }}/sparse.file
152
153 - name: find an empty loop device
154 command: losetup -f
155 register: losetup_list
156
157 - name: setup loop device with sparse file
158 command: losetup {{ losetup_list.stdout }} {{ tmpdir.path }}/sparse.file
159
160 - name: create volume group
161 command: vgcreate test_zap {{ losetup_list.stdout }}
162 failed_when: false
163
164 - name: create logical volume 1
165 command: lvcreate --yes -l 50%FREE -n data-lv1 test_zap
166 failed_when: false
167
168 - name: create logical volume 2
169 command: lvcreate --yes -l 50%FREE -n data-lv2 test_zap
170 failed_when: false
171
172 # zapping the first lv shouldn't remove the vg, allowing the second zap to succeed
173 - name: zap test_zap/data-lv1
174 command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv1"
175 environment:
176 CEPH_VOLUME_DEBUG: 1
177
178 - name: zap test_zap/data-lv2
179 command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv2"
180 environment:
181 CEPH_VOLUME_DEBUG: 1