]> git.proxmox.com Git - ceph.git/blame - ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/test.yml
update download target update for octopus release
[ceph.git] / ceph / src / ceph-volume / ceph_volume / tests / functional / lvm / xenial / bluestore / dmcrypt / test.yml
CommitLineData
3a9019d9
FG
1- hosts: osds
2 become: yes
3 tasks:
4
5 - name: stop ceph-osd@2 daemon
6 service:
7 name: ceph-osd@2
8 state: stopped
9
28e407b8
AA
10 - name: stop ceph-osd@0 daemon
11 service:
12 name: ceph-osd@0
13 state: stopped
14
15- hosts: mons
16 become: yes
17 tasks:
18
19 - name: destroy osd.2
91327a77 20 command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
3a9019d9 21
28e407b8 22 - name: destroy osd.0
91327a77 23 command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
28e407b8
AA
24
25
26- hosts: osds
27 become: yes
28 tasks:
29
30 # osd.2 device
31 - name: zap /dev/sdd1
91327a77 32 command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy"
3a9019d9
FG
33 environment:
34 CEPH_VOLUME_DEBUG: 1
35
f64942e4
AA
36 # partitions have been completely removed, so re-create them again
37 - name: re-create partition /dev/sdd for lvm data usage
38 parted:
39 device: /dev/sdd
40 number: 1
41 part_start: 0%
42 part_end: 50%
43 unit: '%'
44 label: gpt
45 state: present
46
28e407b8 47 - name: redeploy osd.2 using /dev/sdd1
91327a77 48 command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data /dev/sdd1 --osd-id 2"
3a9019d9
FG
49 environment:
50 CEPH_VOLUME_DEBUG: 1
51
28e407b8
AA
52 # osd.0 lv
53 - name: zap test_group/data-lv1
91327a77 54 command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
3a9019d9
FG
55 environment:
56 CEPH_VOLUME_DEBUG: 1
57
28e407b8 58 - name: redeploy osd.0 using test_group/data-lv1
91327a77 59 command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data test_group/data-lv1 --osd-id 0"
3a9019d9
FG
60 environment:
61 CEPH_VOLUME_DEBUG: 1
94b18763
FG
62
63 - name: stop ceph-osd@0 daemon
64 service:
65 name: ceph-osd@0
66 state: stopped
67
28e407b8
AA
68
69- hosts: mons
70 become: yes
71 tasks:
72
94b18763 73 - name: destroy osd.0
91327a77 74 command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
94b18763 75
28e407b8
AA
76
77- hosts: osds
78 become: yes
79 tasks:
80
94b18763 81 - name: zap test_group/data-lv1
91327a77 82 command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
94b18763
FG
83 environment:
84 CEPH_VOLUME_DEBUG: 1
85
86 - name: prepare osd.0 using test_group/data-lv1
91327a77 87 command: "ceph-volume --cluster {{ cluster }} lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0"
94b18763
FG
88 environment:
89 CEPH_VOLUME_DEBUG: 1
90
91 - name: activate all to start the previously prepared osd.0
92 command: "ceph-volume lvm activate --all"
93 environment:
94 CEPH_VOLUME_DEBUG: 1
1adf2230 95
a8e16298
TL
96 - name: node inventory
97 command: "ceph-volume inventory"
98 environment:
99 CEPH_VOLUME_DEBUG: 1
100
1adf2230
AA
101 - name: list all OSDs
102 command: "ceph-volume lvm list"
103 environment:
104 CEPH_VOLUME_DEBUG: 1