]> git.proxmox.com Git - ceph.git/blame_incremental - ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/test.yml
update download target update for octopus release
[ceph.git] / ceph / src / ceph-volume / ceph_volume / tests / functional / lvm / xenial / bluestore / dmcrypt / test.yml
... / ...
CommitLineData
1- hosts: osds
2 become: yes
3 tasks:
4
5 - name: stop ceph-osd@2 daemon
6 service:
7 name: ceph-osd@2
8 state: stopped
9
10 - name: stop ceph-osd@0 daemon
11 service:
12 name: ceph-osd@0
13 state: stopped
14
15- hosts: mons
16 become: yes
17 tasks:
18
19 - name: destroy osd.2
20 command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
21
22 - name: destroy osd.0
23 command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
24
25
26- hosts: osds
27 become: yes
28 tasks:
29
30 # osd.2 device
31 - name: zap /dev/sdd1
32 command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy"
33 environment:
34 CEPH_VOLUME_DEBUG: 1
35
36 # partitions have been completely removed, so re-create them again
37 - name: re-create partition /dev/sdd for lvm data usage
38 parted:
39 device: /dev/sdd
40 number: 1
41 part_start: 0%
42 part_end: 50%
43 unit: '%'
44 label: gpt
45 state: present
46
47 - name: redeploy osd.2 using /dev/sdd1
48 command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data /dev/sdd1 --osd-id 2"
49 environment:
50 CEPH_VOLUME_DEBUG: 1
51
52 # osd.0 lv
53 - name: zap test_group/data-lv1
54 command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
55 environment:
56 CEPH_VOLUME_DEBUG: 1
57
58 - name: redeploy osd.0 using test_group/data-lv1
59 command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data test_group/data-lv1 --osd-id 0"
60 environment:
61 CEPH_VOLUME_DEBUG: 1
62
63 - name: stop ceph-osd@0 daemon
64 service:
65 name: ceph-osd@0
66 state: stopped
67
68
69- hosts: mons
70 become: yes
71 tasks:
72
73 - name: destroy osd.0
74 command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
75
76
77- hosts: osds
78 become: yes
79 tasks:
80
81 - name: zap test_group/data-lv1
82 command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
83 environment:
84 CEPH_VOLUME_DEBUG: 1
85
86 - name: prepare osd.0 using test_group/data-lv1
87 command: "ceph-volume --cluster {{ cluster }} lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0"
88 environment:
89 CEPH_VOLUME_DEBUG: 1
90
91 - name: activate all to start the previously prepared osd.0
92 command: "ceph-volume lvm activate --all"
93 environment:
94 CEPH_VOLUME_DEBUG: 1
95
96 - name: node inventory
97 command: "ceph-volume inventory"
98 environment:
99 CEPH_VOLUME_DEBUG: 1
100
101 - name: list all OSDs
102 command: "ceph-volume lvm list"
103 environment:
104 CEPH_VOLUME_DEBUG: 1