]> git.proxmox.com Git - ceph.git/blame - ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml
import ceph quincy 17.2.6
[ceph.git] / ceph / src / ceph-volume / ceph_volume / tests / functional / lvm / playbooks / test_bluestore.yml
CommitLineData
b32b8144
FG
1
2- hosts: osds
3 become: yes
4 tasks:
5
6 - name: stop ceph-osd@2 daemon
7 service:
8 name: ceph-osd@2
9 state: stopped
10
28e407b8
AA
11 - name: stop ceph-osd@0 daemon
12 service:
13 name: ceph-osd@0
14 state: stopped
15
16
17- hosts: mons
18 become: yes
19 tasks:
522d829b
TL
20 - name: mark osds down
21 command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}"
22 with_items:
23 - 0
24 - 2
28e407b8 25
94b18763 26 - name: destroy osd.2
91327a77 27 command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
522d829b
TL
28 register: result
29 retries: 30
30 delay: 1
31 until: result is succeeded
b32b8144 32
28e407b8 33 - name: destroy osd.0
91327a77 34 command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
522d829b
TL
35 register: result
36 retries: 30
37 delay: 1
38 until: result is succeeded
28e407b8
AA
39
40
41- hosts: osds
42 become: yes
43 tasks:
44
45 # osd.2 device
9f95a23c
TL
46 - name: zap /dev/vdd1
47 command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd1 --destroy"
b32b8144
FG
48 environment:
49 CEPH_VOLUME_DEBUG: 1
50
f64942e4 51 # partitions have been completely removed, so re-create them again
9f95a23c 52 - name: re-create partition /dev/vdd for lvm data usage
f64942e4 53 parted:
9f95a23c 54 device: /dev/vdd
f64942e4
AA
55 number: 1
56 part_start: 0%
57 part_end: 50%
58 unit: '%'
59 label: gpt
60 state: present
61
9f95a23c
TL
62 - name: redeploy osd.2 using /dev/vdd1
63 command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data /dev/vdd1 --osd-id 2"
b32b8144
FG
64 environment:
65 CEPH_VOLUME_DEBUG: 1
94b18763 66
91327a77 67 # osd.0 device (zap without --destroy that removes the LV)
94b18763 68 - name: zap test_group/data-lv1
91327a77 69 command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
94b18763
FG
70 environment:
71 CEPH_VOLUME_DEBUG: 1
72
73 - name: prepare osd.0 again using test_group/data-lv1
91327a77 74 command: "ceph-volume --cluster {{ cluster }} lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0"
94b18763
FG
75 environment:
76 CEPH_VOLUME_DEBUG: 1
77
f64942e4
AA
78 - name: find all OSD directories
79 find:
80 paths: /var/lib/ceph/osd
81 recurse: no
82 file_type: directory
83 register: osd_directories
84
85 - name: find all OSD symlinks
86 find:
87 paths: /var/lib/ceph/osd
88 recurse: yes
89 depth: 2
90 file_type: link
91 register: osd_symlinks
92
93 # set the OSD dir and the block/block.db links to root:root permissions, to
94 # ensure that the OSD will be able to activate regardless
95 - file:
96 path: "{{ item.path }}"
97 owner: root
98 group: root
99 with_items:
100 - "{{ osd_directories.files }}"
101
102 - file:
103 path: "{{ item.path }}"
104 owner: root
105 group: root
106 with_items:
107 - "{{ osd_symlinks.files }}"
108
94b18763
FG
109 - name: activate all to start the previously prepared osd.0
110 command: "ceph-volume lvm activate --all"
111 environment:
112 CEPH_VOLUME_DEBUG: 1
1adf2230 113
a8e16298
TL
114 - name: node inventory
115 command: "ceph-volume inventory"
116 environment:
117 CEPH_VOLUME_DEBUG: 1
118
1adf2230
AA
119 - name: list all OSDs
120 command: "ceph-volume lvm list"
121 environment:
122 CEPH_VOLUME_DEBUG: 1
91327a77
AA
123
124 - name: create temporary directory
125 tempfile:
126 state: directory
127 suffix: sparse
128 register: tmpdir
129
f6b5b4d7
TL
130 - name: create a 1GB sparse file
131 command: fallocate -l 1G {{ tmpdir.path }}/sparse.file
91327a77
AA
132
133 - name: find an empty loop device
134 command: losetup -f
135 register: losetup_list
136
137 - name: setup loop device with sparse file
138 command: losetup {{ losetup_list.stdout }} {{ tmpdir.path }}/sparse.file
139
140 - name: create volume group
141 command: vgcreate test_zap {{ losetup_list.stdout }}
142 failed_when: false
143
144 - name: create logical volume 1
145 command: lvcreate --yes -l 50%FREE -n data-lv1 test_zap
146 failed_when: false
147
148 - name: create logical volume 2
149 command: lvcreate --yes -l 50%FREE -n data-lv2 test_zap
150 failed_when: false
151
152 # zapping the first lv shouldn't remove the vg, allowing the second zap to succeed
153 - name: zap test_zap/data-lv1
154 command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv1"
155 environment:
156 CEPH_VOLUME_DEBUG: 1
157
158 - name: zap test_zap/data-lv2
159 command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv2"
160 environment:
161 CEPH_VOLUME_DEBUG: 1