]> git.proxmox.com Git - ceph.git/blob - ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml
import 15.2.5
[ceph.git] / ceph / src / ceph-volume / ceph_volume / tests / functional / lvm / playbooks / test_bluestore.yml
1
2 - hosts: osds
3 become: yes
4 tasks:
5
6 - name: stop ceph-osd@2 daemon
7 service:
8 name: ceph-osd@2
9 state: stopped
10
11 - name: stop ceph-osd@0 daemon
12 service:
13 name: ceph-osd@0
14 state: stopped
15
16
17 - hosts: mons
18 become: yes
19 tasks:
20
21 - name: destroy osd.2
22 command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
23
24 - name: destroy osd.0
25 command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
26
27
28 - hosts: osds
29 become: yes
30 tasks:
31
32 # osd.2 device
33 - name: zap /dev/vdd1
34 command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd1 --destroy"
35 environment:
36 CEPH_VOLUME_DEBUG: 1
37
38 # partitions have been completely removed, so re-create them again
39 - name: re-create partition /dev/vdd for lvm data usage
40 parted:
41 device: /dev/vdd
42 number: 1
43 part_start: 0%
44 part_end: 50%
45 unit: '%'
46 label: gpt
47 state: present
48
49 - name: redeploy osd.2 using /dev/vdd1
50 command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data /dev/vdd1 --osd-id 2"
51 environment:
52 CEPH_VOLUME_DEBUG: 1
53
54 # osd.0 device (zap without --destroy that removes the LV)
55 - name: zap test_group/data-lv1
56 command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
57 environment:
58 CEPH_VOLUME_DEBUG: 1
59
60 - name: prepare osd.0 again using test_group/data-lv1
61 command: "ceph-volume --cluster {{ cluster }} lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0"
62 environment:
63 CEPH_VOLUME_DEBUG: 1
64
65 - name: find all OSD directories
66 find:
67 paths: /var/lib/ceph/osd
68 recurse: no
69 file_type: directory
70 register: osd_directories
71
72 - name: find all OSD symlinks
73 find:
74 paths: /var/lib/ceph/osd
75 recurse: yes
76 depth: 2
77 file_type: link
78 register: osd_symlinks
79
80 # set the OSD dir and the block/block.db links to root:root permissions, to
81 # ensure that the OSD will be able to activate regardless
82 - file:
83 path: "{{ item.path }}"
84 owner: root
85 group: root
86 with_items:
87 - "{{ osd_directories.files }}"
88
89 - file:
90 path: "{{ item.path }}"
91 owner: root
92 group: root
93 with_items:
94 - "{{ osd_symlinks.files }}"
95
96 - name: activate all to start the previously prepared osd.0
97 command: "ceph-volume lvm activate --all"
98 environment:
99 CEPH_VOLUME_DEBUG: 1
100
101 - name: node inventory
102 command: "ceph-volume inventory"
103 environment:
104 CEPH_VOLUME_DEBUG: 1
105
106 - name: list all OSDs
107 command: "ceph-volume lvm list"
108 environment:
109 CEPH_VOLUME_DEBUG: 1
110
111 - name: create temporary directory
112 tempfile:
113 state: directory
114 suffix: sparse
115 register: tmpdir
116
117 - name: create a 1GB sparse file
118 command: fallocate -l 1G {{ tmpdir.path }}/sparse.file
119
120 - name: find an empty loop device
121 command: losetup -f
122 register: losetup_list
123
124 - name: setup loop device with sparse file
125 command: losetup {{ losetup_list.stdout }} {{ tmpdir.path }}/sparse.file
126
127 - name: create volume group
128 command: vgcreate test_zap {{ losetup_list.stdout }}
129 failed_when: false
130
131 - name: create logical volume 1
132 command: lvcreate --yes -l 50%FREE -n data-lv1 test_zap
133 failed_when: false
134
135 - name: create logical volume 2
136 command: lvcreate --yes -l 50%FREE -n data-lv2 test_zap
137 failed_when: false
138
139 # zapping the first lv shouldn't remove the vg, allowing the second zap to succeed
140 - name: zap test_zap/data-lv1
141 command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv1"
142 environment:
143 CEPH_VOLUME_DEBUG: 1
144
145 - name: zap test_zap/data-lv2
146 command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv2"
147 environment:
148 CEPH_VOLUME_DEBUG: 1