3 # Copyright (C) 2015 Red Hat <contact@redhat.com>
5 # Author: David Zafman <dzafman@redhat.com>
7 # This program is free software; you can redistribute it and/or modify
8 # it under the terms of the GNU Library Public License as published by
9 # the Free Software Foundation; either version 2, or (at your option)
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Library Public License for more details.
17 source $CEPH_ROOT/qa
/standalone
/ceph-helpers.sh
19 # Test development and debugging
20 # Set to "yes" in order to ignore diff errors and save results to update test
23 jqfilter
='.inconsistents'
24 sortkeys
='import json; import sys ; JSON=sys.stdin.read() ; ud = json.loads(JSON) ; print ( json.dumps(ud, sort_keys=True, indent=2) )'
30 export CEPH_MON
="127.0.0.1:7121" # git grep '\<7121\>' : there must be only one
32 CEPH_ARGS
+="--fsid=$(uuidgen) --auth-supported=none "
33 CEPH_ARGS
+="--mon-host=$CEPH_MON "
35 export -n CEPH_CLI_TEST_DUP_COMMAND
36 local funcs
=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
37 for func
in $funcs ; do
38 setup
$dir ||
return 1
39 $func $dir ||
return 1
40 teardown
$dir ||
return 1
44 function create_scenario
() {
51 rados
-p $poolname mksnap snap
${SNAP}
52 dd if=/dev
/urandom of
=$TESTDATA bs
=256 count
=${SNAP}
53 rados
-p $poolname put obj1
$TESTDATA
54 rados
-p $poolname put obj5
$TESTDATA
55 rados
-p $poolname put obj3
$TESTDATA
57 do rados
-p $poolname put obj
${i} $TESTDATA
61 rados
-p $poolname mksnap snap
${SNAP}
62 dd if=/dev
/urandom of
=$TESTDATA bs
=256 count
=${SNAP}
63 rados
-p $poolname put obj5
$TESTDATA
66 rados
-p $poolname mksnap snap
${SNAP}
67 dd if=/dev
/urandom of
=$TESTDATA bs
=256 count
=${SNAP}
68 rados
-p $poolname put obj3
$TESTDATA
71 rados
-p $poolname mksnap snap
${SNAP}
72 dd if=/dev
/urandom of
=$TESTDATA bs
=256 count
=${SNAP}
73 rados
-p $poolname put obj5
$TESTDATA
74 rados
-p $poolname put obj2
$TESTDATA
77 rados
-p $poolname mksnap snap
${SNAP}
79 rados
-p $poolname mksnap snap
${SNAP}
80 dd if=/dev
/urandom of
=$TESTDATA bs
=256 count
=${SNAP}
81 rados
-p $poolname put obj5
$TESTDATA
84 rados
-p $poolname mksnap snap
${SNAP}
86 rados
-p $poolname rm obj4
87 rados
-p $poolname rm obj16
88 rados
-p $poolname rm obj2
90 kill_daemons
$dir TERM osd ||
return 1
92 # Don't need to use ceph_objectstore_tool() function because osd stopped
94 JSON
="$(ceph-objectstore-tool --data-path $dir/${osd} --head --op list obj1)"
95 ceph-objectstore-tool
--data-path $dir/${osd} "$JSON" --force remove ||
return 1
97 JSON
="$(ceph-objectstore-tool --data-path $dir/${osd} --op list obj5 | grep \"snapid\":2)"
98 ceph-objectstore-tool
--data-path $dir/${osd} "$JSON" remove ||
return 1
100 JSON
="$(ceph-objectstore-tool --data-path $dir/${osd} --op list obj5 | grep \"snapid\":1)"
102 # Starts with a snapmap
103 ceph-kvstore-tool bluestore-kv
$dir/${osd} list
2> /dev
/null
> $dir/drk.log
104 grep SNA_
$dir/drk.log
105 grep "^[pm].*SNA_.*[.]1[.]obj5[.][.]$" $dir/drk.log ||
return 1
106 ceph-objectstore-tool
--data-path $dir/${osd} --rmtype nosnapmap
"$JSON" remove ||
return 1
107 # Check that snapmap is stil there
108 ceph-kvstore-tool bluestore-kv
$dir/${osd} list
2> /dev
/null
> $dir/drk.log
109 grep SNA_
$dir/drk.log
110 grep "^[pm].*SNA_.*[.]1[.]obj5[.][.]$" $dir/drk.log ||
return 1
113 JSON
="$(ceph-objectstore-tool --data-path $dir/${osd} --op list obj5 | grep \"snapid\":4)"
114 dd if=/dev
/urandom of
=$TESTDATA bs
=256 count
=18
115 ceph-objectstore-tool
--data-path $dir/${osd} "$JSON" set-bytes
$TESTDATA ||
return 1
117 JSON
="$(ceph-objectstore-tool --data-path $dir/${osd} --head --op list obj3)"
118 dd if=/dev
/urandom of
=$TESTDATA bs
=256 count
=15
119 ceph-objectstore-tool
--data-path $dir/${osd} "$JSON" set-bytes
$TESTDATA ||
return 1
121 JSON
="$(ceph-objectstore-tool --data-path $dir/${osd} --op list obj4 | grep \"snapid\":7)"
122 ceph-objectstore-tool
--data-path $dir/${osd} "$JSON" remove ||
return 1
124 # Starts with a snapmap
125 ceph-kvstore-tool bluestore-kv
$dir/${osd} list
2> /dev
/null
> $dir/drk.log
126 grep SNA_
$dir/drk.log
127 grep "^[pm].*SNA_.*[.]7[.]obj16[.][.]$" $dir/drk.log ||
return 1
128 JSON
="$(ceph-objectstore-tool --data-path $dir/${osd} --op list obj16 | grep \"snapid\":7)"
129 ceph-objectstore-tool
--data-path $dir/${osd} --rmtype snapmap
"$JSON" remove ||
return 1
130 # Check that snapmap is now removed
131 ceph-kvstore-tool bluestore-kv
$dir/${osd} list
2> /dev
/null
> $dir/drk.log
132 grep SNA_
$dir/drk.log
133 ! grep "^[pm].*SNA_.*[.]7[.]obj16[.][.]$" $dir/drk.log ||
return 1
136 JSON
="$(ceph-objectstore-tool --data-path $dir/${osd} --head --op list obj2)"
137 ceph-objectstore-tool
--data-path $dir/${osd} "$JSON" rm-attr snapset ||
return 1
139 # Create a clone which isn't in snapset and doesn't have object info
140 JSON
="$(echo "$OBJ5SAVE" | sed s/snapid\":1/snapid\":7/)"
141 dd if=/dev
/urandom of
=$TESTDATA bs
=256 count
=7
142 ceph-objectstore-tool
--data-path $dir/${osd} "$JSON" set-bytes
$TESTDATA ||
return 1
144 JSON
="$(ceph-objectstore-tool --data-path $dir/${osd} --head --op list obj6)"
145 ceph-objectstore-tool
--data-path $dir/${osd} "$JSON" clear-snapset ||
return 1
146 JSON
="$(ceph-objectstore-tool --data-path $dir/${osd} --head --op list obj7)"
147 ceph-objectstore-tool
--data-path $dir/${osd} "$JSON" clear-snapset corrupt ||
return 1
148 JSON
="$(ceph-objectstore-tool --data-path $dir/${osd} --head --op list obj8)"
149 ceph-objectstore-tool
--data-path $dir/${osd} "$JSON" clear-snapset
seq ||
return 1
150 JSON
="$(ceph-objectstore-tool --data-path $dir/${osd} --head --op list obj9)"
151 ceph-objectstore-tool
--data-path $dir/${osd} "$JSON" clear-snapset clone_size ||
return 1
152 JSON
="$(ceph-objectstore-tool --data-path $dir/${osd} --head --op list obj10)"
153 ceph-objectstore-tool
--data-path $dir/${osd} "$JSON" clear-snapset clone_overlap ||
return 1
154 JSON
="$(ceph-objectstore-tool --data-path $dir/${osd} --head --op list obj11)"
155 ceph-objectstore-tool
--data-path $dir/${osd} "$JSON" clear-snapset clones ||
return 1
156 JSON
="$(ceph-objectstore-tool --data-path $dir/${osd} --head --op list obj12)"
157 ceph-objectstore-tool
--data-path $dir/${osd} "$JSON" clear-snapset
head ||
return 1
158 JSON
="$(ceph-objectstore-tool --data-path $dir/${osd} --head --op list obj13)"
159 ceph-objectstore-tool
--data-path $dir/${osd} "$JSON" clear-snapset snaps ||
return 1
160 JSON
="$(ceph-objectstore-tool --data-path $dir/${osd} --head --op list obj14)"
161 ceph-objectstore-tool
--data-path $dir/${osd} "$JSON" clear-snapset size ||
return 1
163 echo "garbage" > $dir/bad
164 JSON
="$(ceph-objectstore-tool --data-path $dir/${osd} --head --op list obj15)"
165 ceph-objectstore-tool
--data-path $dir/${osd} "$JSON" set-attr snapset
$dir/bad ||
return 1
170 function TEST_scrub_snaps
() {
176 TESTDATA
="testdata.$$"
178 run_mon
$dir a
--osd_pool_default_size=$OSDS ||
return 1
179 run_mgr
$dir x ||
return 1
180 for osd
in $
(seq 0 $
(expr $OSDS - 1))
182 run_osd
$dir $osd ||
return 1
185 # All scrubs done manually. Don't want any unexpected scheduled scrubs.
186 ceph osd
set noscrub ||
return 1
187 ceph osd
set nodeep-scrub ||
return 1
189 # Create a pool with a single pg
190 create_pool
$poolname 1 1
191 wait_for_clean ||
return 1
192 poolid
=$
(ceph osd dump |
grep "^pool.*[']test[']" |
awk '{ print $2 }')
194 dd if=/dev
/urandom of
=$TESTDATA bs
=1032 count
=1
195 for i
in `seq 1 $OBJS`
197 rados
-p $poolname put obj
${i} $TESTDATA
200 local primary
=$
(get_primary
$poolname obj1
)
202 create_scenario
$dir $poolname $TESTDATA $primary ||
return 1
206 for osd
in $
(seq 0 $
(expr $OSDS - 1))
208 activate_osd
$dir $osd ||
return 1
210 ceph tell osd.
* config
set osd_shallow_scrub_chunk_max
25
211 ceph tell osd.
* config
set osd_shallow_scrub_chunk_min
5
212 ceph tell osd.
* config
set osd_pg_stat_report_interval_max
1
215 wait_for_clean ||
return 1
217 ceph tell osd.
* config get osd_shallow_scrub_chunk_max
218 ceph tell osd.
* config get osd_shallow_scrub_chunk_min
219 ceph tell osd.
* config get osd_pg_stat_report_interval_max
220 ceph tell osd.
* config get osd_scrub_chunk_max
221 ceph tell osd.
* config get osd_scrub_chunk_min
223 local pgid
="${poolid}.0"
224 if ! pg_scrub
"$pgid" ; then
228 test "$(grep "_scan_snaps start
" $dir/osd.${primary}.log | wc -l)" = "2" ||
return 1
230 rados list-inconsistent-pg
$poolname > $dir/json ||
return 1
232 test $
(jq
'. | length' $dir/json
) = "1" ||
return 1
234 test $
(jq
-r '.[0]' $dir/json
) = $pgid ||
return 1
236 rados list-inconsistent-obj
$pgid > $dir/json ||
return 1
238 # The injected snapshot errors with a single copy pool doesn't
239 # see object errors because all the issues are detected by
241 jq
"$jqfilter" << EOF | python3 -c "$sortkeys" > $dir/checkcsjson
248 jq
"$jqfilter" $dir/json | python3
-c "$sortkeys" > $dir/csjson
249 multidiff
$dir/checkcsjson
$dir/csjson ||
test $getjson = "yes" ||
return 1
251 rados list-inconsistent-snapset
$pgid > $dir/json ||
return 1
253 jq
"$jqfilter" << EOF | python3 -c "$sortkeys" > $dir/checkcsjson
632 jq
"$jqfilter" $dir/json | python3
-c "$sortkeys" > $dir/csjson
633 multidiff
$dir/checkcsjson
$dir/csjson ||
test $getjson = "yes" ||
return 1
634 if test $getjson = "yes"
636 jq
'.' $dir/json
> save1.json
639 if test "$LOCALRUN" = "yes" && which jsonschema
> /dev
/null
;
641 jsonschema
-i $dir/json
$CEPH_ROOT/doc
/rados
/command
/list-inconsistent-snap.json ||
return 1
644 pidfiles
=$
(find $dir 2>/dev
/null |
grep 'osd[^/]*\.pid')
646 for pidfile
in ${pidfiles}
648 pids
+="$(cat $pidfile) "
655 rados
-p $poolname rmsnap snap
$i
659 while ceph pg dump pgs |
grep -q snaptrim
;
661 if ceph pg dump pgs |
grep -q snaptrim_error
;
667 if (( $loop >= 10 )) ; then
668 ERRORS
=$
(expr $ERRORS + 1)
678 echo "OSD Crash occurred"
679 ERRORS
=$
(expr $ERRORS + 1)
683 kill_daemons
$dir ||
return 1
685 declare -a err_strings
686 err_strings
[0]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*::obj10:.* : is missing in clone_overlap"
687 err_strings
[1]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*::obj5:7 : no '_' attr"
688 err_strings
[2]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*::obj5:7 : is an unexpected clone"
689 err_strings
[3]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*::obj5:4 : on disk size [(]4608[)] does not match object info size [(]512[)] adjusted for ondisk to [(]512[)]"
690 err_strings
[4]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj5:head : expected clone .*:::obj5:2"
691 err_strings
[5]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj5:head : expected clone .*:::obj5:1"
692 err_strings
[6]="log_channel[(]cluster[)] log [[]INF[]] : scrub [0-9]*[.]0 .*:::obj5:head : 2 missing clone[(]s[)]"
693 err_strings
[7]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj8:head : snaps.seq not set"
694 err_strings
[8]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj7:1 : is an unexpected clone"
695 err_strings
[9]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj3:head : on disk size [(]3840[)] does not match object info size [(]768[)] adjusted for ondisk to [(]768[)]"
696 err_strings
[10]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj6:1 : is an unexpected clone"
697 err_strings
[11]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj2:head : no 'snapset' attr"
698 err_strings
[12]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj2:7 : clone ignored due to missing snapset"
699 err_strings
[13]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj2:4 : clone ignored due to missing snapset"
700 err_strings
[14]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj4:head : expected clone .*:::obj4:7"
701 err_strings
[15]="log_channel[(]cluster[)] log [[]INF[]] : scrub [0-9]*[.]0 .*:::obj4:head : 1 missing clone[(]s[)]"
702 err_strings
[16]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj1:1 : is an unexpected clone"
703 err_strings
[17]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj9:1 : is missing in clone_size"
704 err_strings
[18]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj11:1 : is an unexpected clone"
705 err_strings
[19]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj14:1 : size 1032 != clone_size 1033"
706 err_strings
[20]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 scrub 20 errors"
707 err_strings
[21]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj15:head : can't decode 'snapset' attr "
708 err_strings
[22]="log_channel[(]cluster[)] log [[]ERR[]] : osd[.][0-9]* found snap mapper error on pg 1.0 oid 1:461f8b5e:::obj16:7 snaps missing in mapper, should be: {1, 2, 3, 4, 5, 6, 7} ...repaired"
710 for err_string
in "${err_strings[@]}"
712 if ! grep "$err_string" $dir/osd.
${primary}.log
> /dev
/null
;
714 echo "Missing log message '$err_string'"
715 ERRORS
=$
(expr $ERRORS + 1)
719 if [ $ERRORS != "0" ];
721 echo "TEST FAILED WITH $ERRORS ERRORS"
729 function _scrub_snaps_multi
() {
736 TESTDATA
="testdata.$$"
738 run_mon
$dir a
--osd_pool_default_size=$OSDS ||
return 1
739 run_mgr
$dir x ||
return 1
740 for osd
in $
(seq 0 $
(expr $OSDS - 1))
742 run_osd
$dir $osd ||
return 1
745 # All scrubs done manually. Don't want any unexpected scheduled scrubs.
746 ceph osd
set noscrub ||
return 1
747 ceph osd
set nodeep-scrub ||
return 1
749 # Create a pool with a single pg
750 create_pool
$poolname 1 1
751 wait_for_clean ||
return 1
752 poolid
=$
(ceph osd dump |
grep "^pool.*[']test[']" |
awk '{ print $2 }')
754 dd if=/dev
/urandom of
=$TESTDATA bs
=1032 count
=1
755 for i
in `seq 1 $OBJS`
757 rados
-p $poolname put obj
${i} $TESTDATA
760 local primary
=$
(get_primary
$poolname obj1
)
761 local replica
=$
(get_not_primary
$poolname obj1
)
763 eval create_scenario
$dir $poolname $TESTDATA \$
$which ||
return 1
767 for osd
in $
(seq 0 $
(expr $OSDS - 1))
769 activate_osd
$dir $osd ||
return 1
772 ceph tell osd.
* config
set osd_shallow_scrub_chunk_max
3
773 ceph tell osd.
* config
set osd_shallow_scrub_chunk_min
3
774 ceph tell osd.
* config
set osd_scrub_chunk_min
3
775 ceph tell osd.
* config
set osd_pg_stat_report_interval_max
1
776 wait_for_clean ||
return 1
778 local pgid
="${poolid}.0"
779 if ! pg_scrub
"$pgid" ; then
783 test "$(grep "_scan_snaps start
" $dir/osd.${primary}.log | wc -l)" -gt "3" ||
return 1
784 test "$(grep "_scan_snaps start
" $dir/osd.${replica}.log | wc -l)" -gt "3" ||
return 1
786 rados list-inconsistent-pg
$poolname > $dir/json ||
return 1
788 test $
(jq
'. | length' $dir/json
) = "1" ||
return 1
790 test $
(jq
-r '.[0]' $dir/json
) = $pgid ||
return 1
792 rados list-inconsistent-obj
$pgid --format=json-pretty
794 rados list-inconsistent-snapset
$pgid > $dir/json ||
return 1
796 # Since all of the snapshots on the primary is consistent there are no errors here
797 if [ $which = "replica" ];
800 jq
"$jqfilter" << EOF | python3 -c "$sortkeys" > $dir/checkcsjson
809 jq
"$jqfilter" << EOF | python3 -c "$sortkeys" > $dir/checkcsjson
1063 jq
"$jqfilter" $dir/json | python3
-c "$sortkeys" > $dir/csjson
1064 multidiff
$dir/checkcsjson
$dir/csjson ||
test $getjson = "yes" ||
return 1
1065 if test $getjson = "yes"
1067 jq
'.' $dir/json
> save1.json
1070 if test "$LOCALRUN" = "yes" && which jsonschema
> /dev
/null
;
1072 jsonschema
-i $dir/json
$CEPH_ROOT/doc
/rados
/command
/list-inconsistent-snap.json ||
return 1
1075 pidfiles
=$
(find $dir 2>/dev
/null |
grep 'osd[^/]*\.pid')
1077 for pidfile
in ${pidfiles}
1079 pids
+="$(cat $pidfile) "
1084 # When removing snapshots with a corrupt replica, it crashes.
1085 # See http://tracker.ceph.com/issues/23875
1086 if [ $which = "primary" ];
1090 rados
-p $poolname rmsnap snap
$i
1094 while ceph pg dump pgs |
grep -q snaptrim
;
1096 if ceph pg dump pgs |
grep -q snaptrim_error
;
1102 if (( $loop >= 10 )) ; then
1103 ERRORS
=$
(expr $ERRORS + 1)
1114 echo "OSD Crash occurred"
1115 ERRORS
=$
(expr $ERRORS + 1)
1119 kill_daemons
$dir ||
return 1
1121 declare -a err_strings
1122 err_strings
[0]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard [0-1] .*:::obj4:7 : missing"
1123 err_strings
[1]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard [0-1] soid .*:::obj3:head : size 3840 != size 768 from auth oi"
1124 err_strings
[2]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard [0-1] .*:::obj5:1 : missing"
1125 err_strings
[3]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard [0-1] .*:::obj5:2 : missing"
1126 err_strings
[4]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard [0-1] soid .*:::obj5:4 : size 4608 != size 512 from auth oi"
1127 err_strings
[5]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid .*:::obj5:7 : failed to pick suitable object info"
1128 err_strings
[6]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard [0-1] .*:::obj1:head : missing"
1129 err_strings
[7]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 scrub ${scruberrors} errors"
1131 for err_string
in "${err_strings[@]}"
1133 if ! grep "$err_string" $dir/osd.
${primary}.log
> /dev
/null
;
1135 echo "Missing log message '$err_string'"
1136 ERRORS
=$
(expr $ERRORS + 1)
1140 # Check replica specific messages
1141 declare -a rep_err_strings
1142 osd
=$
(eval echo \$
$which)
1143 rep_err_strings
[0]="log_channel[(]cluster[)] log [[]ERR[]] : osd[.][0-9]* found snap mapper error on pg 1.0 oid 1:461f8b5e:::obj16:7 snaps missing in mapper, should be: {1, 2, 3, 4, 5, 6, 7} ...repaired"
1144 for err_string
in "${rep_err_strings[@]}"
1146 if ! grep "$err_string" $dir/osd.
${osd}.log
> /dev
/null
;
1148 echo "Missing log message '$err_string'"
1149 ERRORS
=$
(expr $ERRORS + 1)
1153 if [ $ERRORS != "0" ];
1155 echo "TEST FAILED WITH $ERRORS ERRORS"
1163 function TEST_scrub_snaps_replica
() {
1165 ORIG_ARGS
=$CEPH_ARGS
1166 CEPH_ARGS
+=" --osd_scrub_chunk_min=3 --osd_scrub_chunk_max=20 --osd_shallow_scrub_chunk_min=3 --osd_shallow_scrub_chunk_max=3 --osd_pg_stat_report_interval_max=1"
1167 _scrub_snaps_multi
$dir replica
1169 CEPH_ARGS
=$ORIG_ARGS
1173 function TEST_scrub_snaps_primary
() {
1175 ORIG_ARGS
=$CEPH_ARGS
1176 CEPH_ARGS
+=" --osd_scrub_chunk_min=3 --osd_scrub_chunk_max=20 --osd_shallow_scrub_chunk_min=3 --osd_shallow_scrub_chunk_max=3 --osd_pg_stat_report_interval_max=1"
1177 _scrub_snaps_multi
$dir primary
1179 CEPH_ARGS
=$ORIG_ARGS
1183 main osd-scrub-snaps
"$@"
1186 # compile-command: "cd build ; make -j4 && \
1187 # ../qa/run-standalone.sh osd-scrub-snaps.sh"