]> git.proxmox.com Git - ceph.git/blobdiff - ceph/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/test.yml
update sources to 12.2.10
[ceph.git] / ceph / src / ceph-volume / ceph_volume / tests / functional / lvm / centos7 / filestore / dmcrypt / test.yml
index c1ade2fbcd34e1ab6815d2da0a3687d03cf4474f..c48e4becece7d8ace417acde3699b373c0e76d98 100644 (file)
@@ -8,20 +8,65 @@
         name: ceph-osd@2
         state: stopped
 
-    - name: destroy osd.2 
-      command: "ceph osd destroy osd.2 --yes-i-really-mean-it"
+    - name: stop ceph-osd@0 daemon
+      service:
+        name: ceph-osd@0
+        state: stopped
+
+
+- hosts: mons
+  become: yes
+  tasks:
+
+    - name: destroy osd.2
+      command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
+
+    - name: destroy osd.0
+      command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
+
+
+- hosts: osds
+  become: yes
+  tasks:
+
+    # osd.2 device
+    - name: zap /dev/sdd1
+      command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy"
+      environment:
+        CEPH_VOLUME_DEBUG: 1
+
+    - name: zap /dev/sdd2
+      command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd2 --destroy"
+      environment:
+        CEPH_VOLUME_DEBUG: 1
+
+    - name: redeploy osd.2 using /dev/sdd1
+      command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/sdd1 --journal /dev/sdd2 --osd-id 2"
+      environment:
+        CEPH_VOLUME_DEBUG: 1
+
+    # osd.0 lv
+    - name: zap test_group/data-lv1
+      command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
+      environment:
+        CEPH_VOLUME_DEBUG: 1
+
+    - name: zap /dev/sdc1
+      command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdc1 --destroy"
+      environment:
+        CEPH_VOLUME_DEBUG: 1
 
-    - name: zap /dev/sdd1 
-      command: "ceph-volume lvm zap /dev/sdd1 --destroy"
+    - name: prepare osd.0 again using test_group/data-lv1
+      command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/sdc1 --osd-id 0"
       environment:
         CEPH_VOLUME_DEBUG: 1
 
-    - name: zap /dev/sdd2 
-      command: "ceph-volume lvm zap /dev/sdd2 --destroy"
+    - name: activate all to start the previously prepared osd.0
+      command: "ceph-volume lvm activate --filestore --all"
       environment:
         CEPH_VOLUME_DEBUG: 1
 
-    - name: redeploy osd.2 using /dev/sdd1 
-      command: "ceph-volume lvm create --filestore --data /dev/sdd1 --journal /dev/sdd2 --osd-id 2"
+    - name: list all OSDs
+      command: "ceph-volume lvm list"
       environment:
         CEPH_VOLUME_DEBUG: 1