]> git.proxmox.com Git - ceph.git/blobdiff - ceph/qa/suites/rados/singleton/all/recovery-preemption.yaml
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / qa / suites / rados / singleton / all / recovery-preemption.yaml
index 7507bf635ec2d76c51e62701d8e890abc2609f42..27338d4429ff0bbd0031528d3368123fcdfc73fe 100644 (file)
@@ -17,19 +17,25 @@ tasks:
     conf:
       osd:
         osd recovery sleep: .1
-        osd min pg log entries: 100
+        osd min pg log entries: 10
         osd max pg log entries: 1000
+        osd pg log trim min: 10
     log-whitelist:
       - \(POOL_APP_NOT_ENABLED\)
       - \(OSDMAP_FLAGS\)
       - \(OSD_
       - \(OBJECT_
       - \(PG_
+      - \(SLOW_OPS\)
       - overall HEALTH
 - exec:
     osd.0:
       - ceph osd pool create foo 128
       - ceph osd pool application enable foo foo
+      - sleep 5
+- ceph.healthy:
+- exec:
+    osd.0:
       - rados -p foo bench 30 write -b 4096 --no-cleanup
       - ceph osd out 0
       - sleep 5
@@ -43,8 +49,7 @@ tasks:
       - rados -p foo bench 3 write -b 4096 --no-cleanup
       - ceph osd unset noup
       - sleep 10
-      - ceph tell osd.* config set osd_recovery_sleep 0
-      - ceph tell osd.* config set osd_recovery_max_active 20
+      - for f in 0 1 2 3 ; do sudo ceph daemon osd.$f config set osd_recovery_sleep 0 ; sudo ceph daemon osd.$f config set osd_recovery_max_active 20 ; done
 - ceph.healthy:
 - exec:
     osd.0: