]> git.proxmox.com Git - ceph.git/blame - ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml
update source to 12.2.11
[ceph.git] / ceph / src / ceph-volume / ceph_volume / tests / functional / lvm / playbooks / test_bluestore.yml
CommitLineData
b32b8144
FG
1
2- hosts: osds
3 become: yes
4 tasks:
5
6 - name: stop ceph-osd@2 daemon
7 service:
8 name: ceph-osd@2
9 state: stopped
10
28e407b8
AA
11 - name: stop ceph-osd@0 daemon
12 service:
13 name: ceph-osd@0
14 state: stopped
15
16
17- hosts: mons
18 become: yes
19 tasks:
20
94b18763 21 - name: destroy osd.2
91327a77 22 command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
b32b8144 23
28e407b8 24 - name: destroy osd.0
91327a77 25 command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
28e407b8
AA
26
27
28- hosts: osds
29 become: yes
30 tasks:
31
32 # osd.2 device
94b18763 33 - name: zap /dev/sdd1
91327a77 34 command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy"
b32b8144
FG
35 environment:
36 CEPH_VOLUME_DEBUG: 1
37
f64942e4
AA
38 # partitions have been completely removed, so re-create them again
39 - name: re-create partition /dev/sdd for lvm data usage
40 parted:
41 device: /dev/sdd
42 number: 1
43 part_start: 0%
44 part_end: 50%
45 unit: '%'
46 label: gpt
47 state: present
48
94b18763 49 - name: redeploy osd.2 using /dev/sdd1
91327a77 50 command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data /dev/sdd1 --osd-id 2"
b32b8144
FG
51 environment:
52 CEPH_VOLUME_DEBUG: 1
94b18763 53
91327a77 54 # osd.0 device (zap without --destroy that removes the LV)
94b18763 55 - name: zap test_group/data-lv1
91327a77 56 command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
94b18763
FG
57 environment:
58 CEPH_VOLUME_DEBUG: 1
59
60 - name: prepare osd.0 again using test_group/data-lv1
91327a77 61 command: "ceph-volume --cluster {{ cluster }} lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0"
94b18763
FG
62 environment:
63 CEPH_VOLUME_DEBUG: 1
64
f64942e4
AA
65 - name: find all OSD directories
66 find:
67 paths: /var/lib/ceph/osd
68 recurse: no
69 file_type: directory
70 register: osd_directories
71
72 - name: find all OSD symlinks
73 find:
74 paths: /var/lib/ceph/osd
75 recurse: yes
76 depth: 2
77 file_type: link
78 register: osd_symlinks
79
80 # set the OSD dir and the block/block.db links to root:root permissions, to
81 # ensure that the OSD will be able to activate regardless
82 - file:
83 path: "{{ item.path }}"
84 owner: root
85 group: root
86 with_items:
87 - "{{ osd_directories.files }}"
88
89 - file:
90 path: "{{ item.path }}"
91 owner: root
92 group: root
93 with_items:
94 - "{{ osd_symlinks.files }}"
95
94b18763
FG
96 - name: activate all to start the previously prepared osd.0
97 command: "ceph-volume lvm activate --all"
98 environment:
99 CEPH_VOLUME_DEBUG: 1
1adf2230
AA
100
101 - name: list all OSDs
102 command: "ceph-volume lvm list"
103 environment:
104 CEPH_VOLUME_DEBUG: 1
91327a77
AA
105
106 - name: create temporary directory
107 tempfile:
108 state: directory
109 suffix: sparse
110 register: tmpdir
111
112 - name: create a 5GB sparse file
113 command: fallocate -l 5G {{ tmpdir.path }}/sparse.file
114
115 - name: find an empty loop device
116 command: losetup -f
117 register: losetup_list
118
119 - name: setup loop device with sparse file
120 command: losetup {{ losetup_list.stdout }} {{ tmpdir.path }}/sparse.file
121
122 - name: create volume group
123 command: vgcreate test_zap {{ losetup_list.stdout }}
124 failed_when: false
125
126 - name: create logical volume 1
127 command: lvcreate --yes -l 50%FREE -n data-lv1 test_zap
128 failed_when: false
129
130 - name: create logical volume 2
131 command: lvcreate --yes -l 50%FREE -n data-lv2 test_zap
132 failed_when: false
133
134 # zapping the first lv shouldn't remove the vg, allowing the second zap to succeed
135 - name: zap test_zap/data-lv1
136 command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv1"
137 environment:
138 CEPH_VOLUME_DEBUG: 1
139
140 - name: zap test_zap/data-lv2
141 command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv2"
142 environment:
143 CEPH_VOLUME_DEBUG: 1