]> git.proxmox.com Git - ceph.git/blob - ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_filestore.yml
4aa3cf19d2ea8c79af558d09cdc472e0d9e3cdbd
[ceph.git] / ceph / src / ceph-volume / ceph_volume / tests / functional / lvm / playbooks / test_filestore.yml
1
2 - hosts: osds
3 become: yes
4 tasks:
5
6 - name: stop ceph-osd@2 daemon
7 service:
8 name: ceph-osd@2
9 state: stopped
10
11 - name: stop ceph-osd@0 daemon
12 service:
13 name: ceph-osd@0
14 state: stopped
15
16
17 - hosts: mons
18 become: yes
19 tasks:
20
21 - name: destroy osd.2
22 command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
23
24 - name: destroy osd.0
25 command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
26
27
28 - hosts: osds
29 become: yes
30 tasks:
31
32 # osd.2 device
33 - name: zap /dev/sdd1
34 command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy"
35 environment:
36 CEPH_VOLUME_DEBUG: 1
37
38 # osd.2 journal
39 - name: zap /dev/sdd2
40 command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd2 --destroy"
41 environment:
42 CEPH_VOLUME_DEBUG: 1
43
44 - name: redeploy osd.2 using /dev/sdd1
45 command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/sdd1 --journal /dev/sdd2 --osd-id 2"
46 environment:
47 CEPH_VOLUME_DEBUG: 1
48
49 # osd.0 data lv
50 # note: we don't use --destroy here to test this works without that flag.
51 # --destroy is used in the bluestore tests
52 - name: zap test_group/data-lv1
53 command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
54 environment:
55 CEPH_VOLUME_DEBUG: 1
56
57 # osd.0 journal device (zap without --destroy that removes the LV)
58 - name: zap /dev/sdc1
59 command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdc1"
60 environment:
61 CEPH_VOLUME_DEBUG: 1
62
63 - name: prepare osd.0 again using test_group/data-lv1
64 command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/sdc1 --osd-id 0"
65 environment:
66 CEPH_VOLUME_DEBUG: 1
67
68 - name: activate all to start the previously prepared osd.0
69 command: "ceph-volume lvm activate --filestore --all"
70 environment:
71 CEPH_VOLUME_DEBUG: 1
72
73 - name: list all OSDs
74 command: "ceph-volume lvm list"
75 environment:
76 CEPH_VOLUME_DEBUG: 1
77
78 - name: create temporary directory
79 tempfile:
80 state: directory
81 suffix: sparse
82 register: tmpdir
83
84 - name: create a 5GB sparse file
85 command: fallocate -l 5G {{ tmpdir.path }}/sparse.file
86
87 - name: find an empty loop device
88 command: losetup -f
89 register: losetup_list
90
91 - name: setup loop device with sparse file
92 command: losetup {{ losetup_list.stdout }} {{ tmpdir.path }}/sparse.file
93
94 - name: create volume group
95 command: vgcreate test_zap {{ losetup_list.stdout }}
96 failed_when: false
97
98 - name: create logical volume 1
99 command: lvcreate --yes -l 50%FREE -n data-lv1 test_zap
100 failed_when: false
101
102 - name: create logical volume 2
103 command: lvcreate --yes -l 50%FREE -n data-lv2 test_zap
104 failed_when: false
105
106 # zapping the first lv shouldn't remove the vg, allowing the second zap to succeed
107 - name: zap test_zap/data-lv1
108 command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv1"
109 environment:
110 CEPH_VOLUME_DEBUG: 1
111
112 - name: zap test_zap/data-lv2
113 command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv2"
114 environment:
115 CEPH_VOLUME_DEBUG: 1