]> git.proxmox.com Git - ceph.git/blame - ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/test.yml
update download target update for octopus release
[ceph.git] / ceph / src / ceph-volume / ceph_volume / tests / functional / lvm / centos7 / bluestore / dmcrypt / test.yml
CommitLineData
3a9019d9
FG
1- hosts: osds
2 become: yes
3 tasks:
4
5 - name: stop ceph-osd@2 daemon
6 service:
7 name: ceph-osd@2
8 state: stopped
9
28e407b8
AA
10 - name: stop ceph-osd@0 daemon
11 service:
12 name: ceph-osd@0
13 state: stopped
14
15- hosts: mons
16 become: yes
17 tasks:
18
94b18763 19 - name: destroy osd.2
91327a77 20 command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
3a9019d9 21
28e407b8 22 - name: destroy osd.0
91327a77 23 command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
28e407b8
AA
24
25- hosts: osds
26 become: yes
27 tasks:
28
29 # osd.2 device
94b18763 30 - name: zap /dev/sdd1
91327a77 31 command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy"
3a9019d9
FG
32 environment:
33 CEPH_VOLUME_DEBUG: 1
34
f64942e4
AA
35 # partitions have been completely removed, so re-create them again
36 - name: re-create partition /dev/sdd for lvm data usage
37 parted:
38 device: /dev/sdd
39 number: 1
40 part_start: 0%
41 part_end: 50%
42 unit: '%'
43 label: gpt
44 state: present
45
94b18763 46 - name: redeploy osd.2 using /dev/sdd1
91327a77 47 command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data /dev/sdd1 --osd-id 2"
3a9019d9
FG
48 environment:
49 CEPH_VOLUME_DEBUG: 1
50
28e407b8 51 # osd.0 lv
94b18763 52 - name: zap test_group/data-lv1
91327a77 53 command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
3a9019d9
FG
54 environment:
55 CEPH_VOLUME_DEBUG: 1
56
94b18763 57 - name: redeploy osd.0 using test_group/data-lv1
91327a77 58 command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data test_group/data-lv1 --osd-id 0"
3a9019d9
FG
59 environment:
60 CEPH_VOLUME_DEBUG: 1
94b18763
FG
61
62 - name: stop ceph-osd@0 daemon
63 service:
64 name: ceph-osd@0
65 state: stopped
66
28e407b8
AA
67
68- hosts: mons
69 become: yes
70 tasks:
71
94b18763 72 - name: destroy osd.0
91327a77 73 command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
94b18763 74
28e407b8
AA
75
76- hosts: osds
77 become: yes
78 tasks:
79
80
94b18763 81 - name: zap test_group/data-lv1
91327a77 82 command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
94b18763
FG
83 environment:
84 CEPH_VOLUME_DEBUG: 1
85
86 - name: prepare osd.0 using test_group/data-lv1
91327a77 87 command: "ceph-volume --cluster {{ cluster }} lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0"
94b18763
FG
88 environment:
89 CEPH_VOLUME_DEBUG: 1
90
91 - name: activate all to start the previously prepared osd.0
92 command: "ceph-volume lvm activate --all"
93 environment:
94 CEPH_VOLUME_DEBUG: 1
1adf2230 95
a8e16298
TL
96 - name: node inventory
97 command: "ceph-volume inventory"
98 environment:
99 CEPH_VOLUME_DEBUG: 1
100
1adf2230
AA
101 - name: list all OSDs
102 command: "ceph-volume lvm list"
103 environment:
104 CEPH_VOLUME_DEBUG: 1