X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=ceph%2Fqa%2Fstandalone%2Fosd%2Fosd-recovery-prio.sh;h=02b65f67a27050d19fe7db874bbee9e1c1690ba5;hb=20effc670b57271cb089376d6d0800990e5218d5;hp=fb386e265ab3525dce0b5cb0748f1a1a4c840964;hpb=eafe8130898c3d7229e1c84c100c2e62e32be0d0;p=ceph.git diff --git a/ceph/qa/standalone/osd/osd-recovery-prio.sh b/ceph/qa/standalone/osd/osd-recovery-prio.sh index fb386e265..02b65f67a 100755 --- a/ceph/qa/standalone/osd/osd-recovery-prio.sh +++ b/ceph/qa/standalone/osd/osd-recovery-prio.sh @@ -25,7 +25,10 @@ function run() { export CEPH_MON="127.0.0.1:7114" # git grep '\<7114\>' : there must be only one export CEPH_ARGS CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " - CEPH_ARGS+="--mon-host=$CEPH_MON --osd_max_backfills=1 --debug_reserver=20" + CEPH_ARGS+="--mon-host=$CEPH_MON --osd_max_backfills=1 --debug_reserver=20 " + # Set osd op queue = wpq for the tests. Recovery priority is not + # considered by mclock_scheduler leading to unexpected results. + CEPH_ARGS+="--osd-op-queue=wpq " export objects=200 export poolprefix=test export FORCE_PRIO="255" # See OSD_RECOVERY_PRIORITY_FORCED @@ -125,8 +128,8 @@ function TEST_recovery_priority() { fi done - ceph osd pool set $pool2 size 1 - ceph osd pool set $pool3 size 1 + ceph osd pool set $pool2 size 1 --yes-i-really-mean-it + ceph osd pool set $pool3 size 1 --yes-i-really-mean-it wait_for_clean || return 1 dd if=/dev/urandom of=$dir/data bs=1M count=10 @@ -401,9 +404,9 @@ function TEST_recovery_pool_priority() { pool1_prio=$(expr $NORMAL_PRIO + $pool1_extra_prio) pool2_prio=$(expr $NORMAL_PRIO + $pool2_extra_prio) - ceph osd pool set $pool1 size 1 + ceph osd pool set $pool1 size 1 --yes-i-really-mean-it ceph osd pool set $pool1 recovery_priority $pool1_extra_prio - ceph osd pool set $pool2 size 1 + ceph osd pool set $pool2 size 1 --yes-i-really-mean-it ceph osd pool set $pool2 recovery_priority $pool2_extra_prio wait_for_clean || return 1 @@ -425,7 +428,31 @@ function TEST_recovery_pool_priority() { ceph osd pool set $pool1 size 2 ceph osd pool set $pool2 size 2 - sleep 10 + + # Wait for both PGs to be in recovering state + ceph pg dump pgs + + # Wait for recovery to start + set -o pipefail + count=0 + while(true) + do + if test $(ceph --format json pg dump pgs | + jq '.pg_stats | .[] | .state | contains("recovering")' | grep -c true) == "2" + then + break + fi + sleep 2 + if test "$count" -eq "10" + then + echo "Recovery never started on both PGs" + return 1 + fi + count=$(expr $count + 1) + done + set +o pipefail + ceph pg dump pgs + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations > $dir/dump.${chk_osd1_1}.out echo osd.${chk_osd1_1} cat $dir/dump.${chk_osd1_1}.out