]> git.proxmox.com Git - ceph.git/blob - ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/test.yml
import ceph 15.2.15
[ceph.git] / ceph / src / ceph-volume / ceph_volume / tests / functional / lvm / centos8 / filestore / dmcrypt / test.yml
1
2 - hosts: osds
3 become: yes
4 tasks:
5
6 - name: stop ceph-osd@2 daemon
7 service:
8 name: ceph-osd@2
9 state: stopped
10
11 - name: stop ceph-osd@0 daemon
12 service:
13 name: ceph-osd@0
14 state: stopped
15
16
17 - hosts: mons
18 become: yes
19 tasks:
20 - name: mark osds down
21 command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}"
22 with_items:
23 - 0
24 - 2
25
26 - name: destroy osd.2
27 command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
28 register: result
29 retries: 30
30 delay: 1
31 until: result is succeeded
32
33 - name: destroy osd.0
34 command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
35 register: result
36 retries: 30
37 delay: 1
38 until: result is succeeded
39
40 - hosts: osds
41 become: yes
42 tasks:
43
44 # osd.2 device
45 - name: zap /dev/vdd1
46 command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd1 --destroy"
47 environment:
48 CEPH_VOLUME_DEBUG: 1
49
50 - name: zap /dev/vdd2
51 command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd2 --destroy"
52 environment:
53 CEPH_VOLUME_DEBUG: 1
54
55 # partitions have been completely removed, so re-create them again
56 - name: re-create partition /dev/vdd for lvm data usage
57 parted:
58 device: /dev/vdd
59 number: 1
60 part_start: 0%
61 part_end: 50%
62 unit: '%'
63 label: gpt
64 state: present
65
66 - name: re-create partition /dev/vdd lvm journals
67 parted:
68 device: /dev/vdd
69 number: 2
70 part_start: 50%
71 part_end: 100%
72 unit: '%'
73 state: present
74 label: gpt
75
76 - name: redeploy osd.2 using /dev/vdd1
77 command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/vdd1 --journal /dev/vdd2 --osd-id 2"
78 environment:
79 CEPH_VOLUME_DEBUG: 1
80
81 # osd.0 lv
82 - name: zap test_group/data-lv1
83 command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
84 environment:
85 CEPH_VOLUME_DEBUG: 1
86
87 - name: zap /dev/vdc1
88 command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdc1 --destroy"
89 environment:
90 CEPH_VOLUME_DEBUG: 1
91
92 - name: re-create partition /dev/vdc1
93 parted:
94 device: /dev/vdc
95 number: 1
96 part_start: 0%
97 part_end: 50%
98 unit: '%'
99 state: present
100 label: gpt
101
102 - name: prepare osd.0 again using test_group/data-lv1
103 command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/vdc1 --osd-id 0"
104 environment:
105 CEPH_VOLUME_DEBUG: 1
106
107 - name: activate all to start the previously prepared osd.0
108 command: "ceph-volume lvm activate --filestore --all"
109 environment:
110 CEPH_VOLUME_DEBUG: 1
111
112 - name: node inventory
113 command: "ceph-volume inventory"
114 environment:
115 CEPH_VOLUME_DEBUG: 1
116
117 - name: list all OSDs
118 command: "ceph-volume lvm list"
119 environment:
120 CEPH_VOLUME_DEBUG: 1