]> git.proxmox.com Git - ceph.git/blame - ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/test.yml
import ceph 16.2.6
[ceph.git] / ceph / src / ceph-volume / ceph_volume / tests / functional / lvm / centos8 / bluestore / dmcrypt / test.yml
CommitLineData
3a9019d9
FG
1- hosts: osds
2 become: yes
3 tasks:
4
5 - name: stop ceph-osd@2 daemon
6 service:
7 name: ceph-osd@2
8 state: stopped
9
28e407b8
AA
10 - name: stop ceph-osd@0 daemon
11 service:
12 name: ceph-osd@0
13 state: stopped
14
15- hosts: mons
16 become: yes
17 tasks:
522d829b
TL
18 - name: mark osds down
19 command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}"
20 with_items:
21 - 0
22 - 2
28e407b8
AA
23
24 - name: destroy osd.2
91327a77 25 command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
522d829b
TL
26 register: result
27 retries: 30
28 delay: 1
29 until: result is succeeded
3a9019d9 30
28e407b8 31 - name: destroy osd.0
91327a77 32 command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
522d829b
TL
33 register: result
34 retries: 30
35 delay: 1
36 until: result is succeeded
28e407b8 37
28e407b8
AA
38- hosts: osds
39 become: yes
40 tasks:
41
42 # osd.2 device
9f95a23c
TL
43 - name: zap /dev/vdd1
44 command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd1 --destroy"
3a9019d9
FG
45 environment:
46 CEPH_VOLUME_DEBUG: 1
47
f64942e4 48 # partitions have been completely removed, so re-create them again
9f95a23c 49 - name: re-create partition /dev/vdd for lvm data usage
f64942e4 50 parted:
9f95a23c 51 device: /dev/vdd
f64942e4
AA
52 number: 1
53 part_start: 0%
54 part_end: 50%
55 unit: '%'
56 label: gpt
57 state: present
58
9f95a23c
TL
59 - name: redeploy osd.2 using /dev/vdd1
60 command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data /dev/vdd1 --osd-id 2"
3a9019d9
FG
61 environment:
62 CEPH_VOLUME_DEBUG: 1
63
28e407b8
AA
64 # osd.0 lv
65 - name: zap test_group/data-lv1
91327a77 66 command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
3a9019d9
FG
67 environment:
68 CEPH_VOLUME_DEBUG: 1
69
28e407b8 70 - name: redeploy osd.0 using test_group/data-lv1
91327a77 71 command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data test_group/data-lv1 --osd-id 0"
3a9019d9
FG
72 environment:
73 CEPH_VOLUME_DEBUG: 1
94b18763
FG
74
75 - name: stop ceph-osd@0 daemon
76 service:
77 name: ceph-osd@0
78 state: stopped
79
28e407b8
AA
80
81- hosts: mons
82 become: yes
83 tasks:
522d829b
TL
84 - name: mark osds down
85 command: "ceph --cluster {{ cluster }} osd down osd.0"
28e407b8 86
94b18763 87 - name: destroy osd.0
91327a77 88 command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
522d829b
TL
89 register: result
90 retries: 30
91 delay: 1
92 until: result is succeeded
94b18763 93
28e407b8
AA
94
95- hosts: osds
96 become: yes
97 tasks:
98
9f95a23c 99
94b18763 100 - name: zap test_group/data-lv1
91327a77 101 command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
94b18763
FG
102 environment:
103 CEPH_VOLUME_DEBUG: 1
104
105 - name: prepare osd.0 using test_group/data-lv1
91327a77 106 command: "ceph-volume --cluster {{ cluster }} lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0"
94b18763
FG
107 environment:
108 CEPH_VOLUME_DEBUG: 1
109
110 - name: activate all to start the previously prepared osd.0
111 command: "ceph-volume lvm activate --all"
112 environment:
113 CEPH_VOLUME_DEBUG: 1
1adf2230 114
a8e16298
TL
115 - name: node inventory
116 command: "ceph-volume inventory"
117 environment:
118 CEPH_VOLUME_DEBUG: 1
119
1adf2230
AA
120 - name: list all OSDs
121 command: "ceph-volume lvm list"
122 environment:
123 CEPH_VOLUME_DEBUG: 1