]> git.proxmox.com Git - ceph.git/blame - ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_filestore.yml
bump version to 12.2.11-pve1
[ceph.git] / ceph / src / ceph-volume / ceph_volume / tests / functional / lvm / playbooks / test_filestore.yml
CommitLineData
b32b8144
FG
1
2- hosts: osds
3 become: yes
4 tasks:
5
6 - name: stop ceph-osd@2 daemon
7 service:
8 name: ceph-osd@2
9 state: stopped
10
28e407b8
AA
11 - name: stop ceph-osd@0 daemon
12 service:
13 name: ceph-osd@0
14 state: stopped
15
16
17- hosts: mons
18 become: yes
19 tasks:
20
94b18763 21 - name: destroy osd.2
91327a77 22 command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
b32b8144 23
28e407b8 24 - name: destroy osd.0
91327a77 25 command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
28e407b8
AA
26
27
28- hosts: osds
29 become: yes
30 tasks:
31
32 # osd.2 device
94b18763 33 - name: zap /dev/sdd1
91327a77 34 command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy"
b32b8144
FG
35 environment:
36 CEPH_VOLUME_DEBUG: 1
37
28e407b8 38 # osd.2 journal
94b18763 39 - name: zap /dev/sdd2
91327a77 40 command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd2 --destroy"
b32b8144
FG
41 environment:
42 CEPH_VOLUME_DEBUG: 1
43
f64942e4
AA
44 # partitions have been completely removed, so re-create them again
45 - name: re-create partition /dev/sdd for lvm data usage
46 parted:
47 device: /dev/sdd
48 number: 1
49 part_start: 0%
50 part_end: 50%
51 unit: '%'
52 label: gpt
53 state: present
54
55 - name: re-create partition /dev/sdd lvm journals
56 parted:
57 device: /dev/sdd
58 number: 2
59 part_start: 50%
60 part_end: 100%
61 unit: '%'
62 state: present
63 label: gpt
64
94b18763 65 - name: redeploy osd.2 using /dev/sdd1
91327a77 66 command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/sdd1 --journal /dev/sdd2 --osd-id 2"
b32b8144
FG
67 environment:
68 CEPH_VOLUME_DEBUG: 1
94b18763 69
28e407b8 70 # osd.0 data lv
91327a77
AA
71 # note: we don't use --destroy here to test this works without that flag.
72 # --destroy is used in the bluestore tests
94b18763 73 - name: zap test_group/data-lv1
91327a77 74 command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
94b18763
FG
75 environment:
76 CEPH_VOLUME_DEBUG: 1
77
91327a77 78 # osd.0 journal device (zap without --destroy that removes the LV)
94b18763 79 - name: zap /dev/sdc1
91327a77 80 command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdc1"
94b18763
FG
81 environment:
82 CEPH_VOLUME_DEBUG: 1
83
84 - name: prepare osd.0 again using test_group/data-lv1
91327a77 85 command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/sdc1 --osd-id 0"
94b18763
FG
86 environment:
87 CEPH_VOLUME_DEBUG: 1
88
f64942e4
AA
89 - name: find all OSD paths
90 find:
91 paths: /var/lib/ceph/osd
92 recurse: no
93 file_type: directory
94 register: osd_paths
95
96 # set all OSD paths to root:rootto ensure that the OSD will be able to
97 # activate regardless
98 - name: mangle permissions to root
99 file:
100 path: "{{ item.path }}"
101 owner: root
102 group: root
103 recurse: yes
104 with_items:
105 - "{{ osd_paths.files }}"
106
107 - name: stop ceph-osd@2 daemon
108 service:
109 name: ceph-osd@2
110 state: stopped
111
112 - name: stop ceph-osd@1 daemon
113 service:
114 name: ceph-osd@1
115 state: stopped
116
94b18763
FG
117 - name: activate all to start the previously prepared osd.0
118 command: "ceph-volume lvm activate --filestore --all"
119 environment:
120 CEPH_VOLUME_DEBUG: 1
1adf2230
AA
121
122 - name: list all OSDs
123 command: "ceph-volume lvm list"
124 environment:
125 CEPH_VOLUME_DEBUG: 1
91327a77
AA
126
127 - name: create temporary directory
128 tempfile:
129 state: directory
130 suffix: sparse
131 register: tmpdir
132
133 - name: create a 5GB sparse file
134 command: fallocate -l 5G {{ tmpdir.path }}/sparse.file
135
136 - name: find an empty loop device
137 command: losetup -f
138 register: losetup_list
139
140 - name: setup loop device with sparse file
141 command: losetup {{ losetup_list.stdout }} {{ tmpdir.path }}/sparse.file
142
143 - name: create volume group
144 command: vgcreate test_zap {{ losetup_list.stdout }}
145 failed_when: false
146
147 - name: create logical volume 1
148 command: lvcreate --yes -l 50%FREE -n data-lv1 test_zap
149 failed_when: false
150
151 - name: create logical volume 2
152 command: lvcreate --yes -l 50%FREE -n data-lv2 test_zap
153 failed_when: false
154
155 # zapping the first lv shouldn't remove the vg, allowing the second zap to succeed
156 - name: zap test_zap/data-lv1
157 command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv1"
158 environment:
159 CEPH_VOLUME_DEBUG: 1
160
161 - name: zap test_zap/data-lv2
162 command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv2"
163 environment:
164 CEPH_VOLUME_DEBUG: 1