]> git.proxmox.com Git - ceph.git/blob - ceph/qa/standalone/scrub/osd-scrub-test.sh
bump version to 18.2.2-pve1
[ceph.git] / ceph / qa / standalone / scrub / osd-scrub-test.sh
1 #!/usr/bin/env bash
2 #
3 # Copyright (C) 2018 Red Hat <contact@redhat.com>
4 #
5 # Author: David Zafman <dzafman@redhat.com>
6 #
7 # This program is free software; you can redistribute it and/or modify
8 # it under the terms of the GNU Library Public License as published by
9 # the Free Software Foundation; either version 2, or (at your option)
10 # any later version.
11 #
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Library Public License for more details.
16 #
17 source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
18 source $CEPH_ROOT/qa/standalone/scrub/scrub-helpers.sh
19
20 function run() {
21 local dir=$1
22 shift
23
24 export CEPH_MON="127.0.0.1:7138" # git grep '\<7138\>' : there must be only one
25 export CEPH_ARGS
26 CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
27 CEPH_ARGS+="--mon-host=$CEPH_MON "
28
29 export -n CEPH_CLI_TEST_DUP_COMMAND
30 local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
31 for func in $funcs ; do
32 setup $dir || return 1
33 $func $dir || return 1
34 teardown $dir || return 1
35 done
36 }
37
38 function TEST_scrub_test() {
39 local dir=$1
40 local poolname=test
41 local OSDS=3
42 local objects=15
43
44 TESTDATA="testdata.$$"
45
46 run_mon $dir a --osd_pool_default_size=3 || return 1
47 run_mgr $dir x || return 1
48 for osd in $(seq 0 $(expr $OSDS - 1))
49 do
50 run_osd $dir $osd || return 1
51 done
52
53 # Create a pool with a single pg
54 create_pool $poolname 1 1
55 wait_for_clean || return 1
56 poolid=$(ceph osd dump | grep "^pool.*[']${poolname}[']" | awk '{ print $2 }')
57
58 dd if=/dev/urandom of=$TESTDATA bs=1032 count=1
59 for i in `seq 1 $objects`
60 do
61 rados -p $poolname put obj${i} $TESTDATA
62 done
63 rm -f $TESTDATA
64
65 local primary=$(get_primary $poolname obj1)
66 local otherosd=$(get_not_primary $poolname obj1)
67 if [ "$otherosd" = "2" ];
68 then
69 local anotherosd="0"
70 else
71 local anotherosd="2"
72 fi
73
74 objectstore_tool $dir $anotherosd obj1 set-bytes /etc/fstab
75
76 local pgid="${poolid}.0"
77 pg_deep_scrub "$pgid" || return 1
78
79 ceph pg dump pgs | grep ^${pgid} | grep -q -- +inconsistent || return 1
80 test "$(ceph pg $pgid query | jq '.info.stats.stat_sum.num_scrub_errors')" = "2" || return 1
81
82 ceph osd out $primary
83 wait_for_clean || return 1
84
85 pg_deep_scrub "$pgid" || return 1
86
87 test "$(ceph pg $pgid query | jq '.info.stats.stat_sum.num_scrub_errors')" = "2" || return 1
88 test "$(ceph pg $pgid query | jq '.peer_info[0].stats.stat_sum.num_scrub_errors')" = "2" || return 1
89 ceph pg dump pgs | grep ^${pgid} | grep -q -- +inconsistent || return 1
90
91 ceph osd in $primary
92 wait_for_clean || return 1
93
94 repair "$pgid" || return 1
95 wait_for_clean || return 1
96
97 # This sets up the test after we've repaired with previous primary has old value
98 test "$(ceph pg $pgid query | jq '.peer_info[0].stats.stat_sum.num_scrub_errors')" = "2" || return 1
99 ceph pg dump pgs | grep ^${pgid} | grep -vq -- +inconsistent || return 1
100
101 ceph osd out $primary
102 wait_for_clean || return 1
103
104 test "$(ceph pg $pgid query | jq '.info.stats.stat_sum.num_scrub_errors')" = "0" || return 1
105 test "$(ceph pg $pgid query | jq '.peer_info[0].stats.stat_sum.num_scrub_errors')" = "0" || return 1
106 test "$(ceph pg $pgid query | jq '.peer_info[1].stats.stat_sum.num_scrub_errors')" = "0" || return 1
107 ceph pg dump pgs | grep ^${pgid} | grep -vq -- +inconsistent || return 1
108 }
109
110 # Grab year-month-day
111 DATESED="s/\([0-9]*-[0-9]*-[0-9]*\).*/\1/"
112 DATEFORMAT="%Y-%m-%d"
113
114 function check_dump_scrubs() {
115 local primary=$1
116 local sched_time_check="$2"
117 local deadline_check="$3"
118
119 DS="$(CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${primary}) dump_scrubs)"
120 # use eval to drop double-quotes
121 eval SCHED_TIME=$(echo $DS | jq '.[0].sched_time')
122 test $(echo $SCHED_TIME | sed $DATESED) = $(date +${DATEFORMAT} -d "now + $sched_time_check") || return 1
123 # use eval to drop double-quotes
124 eval DEADLINE=$(echo $DS | jq '.[0].deadline')
125 test $(echo $DEADLINE | sed $DATESED) = $(date +${DATEFORMAT} -d "now + $deadline_check") || return 1
126 }
127
128 function TEST_interval_changes() {
129 local poolname=test
130 local OSDS=2
131 local objects=10
132 # Don't assume how internal defaults are set
133 local day="$(expr 24 \* 60 \* 60)"
134 local week="$(expr $day \* 7)"
135 local min_interval=$day
136 local max_interval=$week
137 local WAIT_FOR_UPDATE=15
138
139 TESTDATA="testdata.$$"
140
141 # This min scrub interval results in 30 seconds backoff time
142 run_mon $dir a --osd_pool_default_size=$OSDS || return 1
143 run_mgr $dir x || return 1
144 for osd in $(seq 0 $(expr $OSDS - 1))
145 do
146 run_osd $dir $osd --osd_scrub_min_interval=$min_interval --osd_scrub_max_interval=$max_interval --osd_scrub_interval_randomize_ratio=0 || return 1
147 done
148
149 # Create a pool with a single pg
150 create_pool $poolname 1 1
151 wait_for_clean || return 1
152 local poolid=$(ceph osd dump | grep "^pool.*[']${poolname}[']" | awk '{ print $2 }')
153
154 dd if=/dev/urandom of=$TESTDATA bs=1032 count=1
155 for i in `seq 1 $objects`
156 do
157 rados -p $poolname put obj${i} $TESTDATA
158 done
159 rm -f $TESTDATA
160
161 local primary=$(get_primary $poolname obj1)
162
163 # Check initial settings from above (min 1 day, min 1 week)
164 check_dump_scrubs $primary "1 day" "1 week" || return 1
165
166 # Change global osd_scrub_min_interval to 2 days
167 CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${primary}) config set osd_scrub_min_interval $(expr $day \* 2)
168 sleep $WAIT_FOR_UPDATE
169 check_dump_scrubs $primary "2 days" "1 week" || return 1
170
171 # Change global osd_scrub_max_interval to 2 weeks
172 CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${primary}) config set osd_scrub_max_interval $(expr $week \* 2)
173 sleep $WAIT_FOR_UPDATE
174 check_dump_scrubs $primary "2 days" "2 week" || return 1
175
176 # Change pool osd_scrub_min_interval to 3 days
177 ceph osd pool set $poolname scrub_min_interval $(expr $day \* 3)
178 sleep $WAIT_FOR_UPDATE
179 check_dump_scrubs $primary "3 days" "2 week" || return 1
180
181 # Change pool osd_scrub_max_interval to 3 weeks
182 ceph osd pool set $poolname scrub_max_interval $(expr $week \* 3)
183 sleep $WAIT_FOR_UPDATE
184 check_dump_scrubs $primary "3 days" "3 week" || return 1
185 }
186
187 function TEST_scrub_extended_sleep() {
188 local dir=$1
189 local poolname=test
190 local OSDS=3
191 local objects=15
192
193 TESTDATA="testdata.$$"
194
195 DAY=$(date +%w)
196 # Handle wrap
197 if [ "$DAY" -ge "4" ];
198 then
199 DAY="0"
200 fi
201 # Start after 2 days in case we are near midnight
202 DAY_START=$(expr $DAY + 2)
203 DAY_END=$(expr $DAY + 3)
204
205 run_mon $dir a --osd_pool_default_size=3 || return 1
206 run_mgr $dir x || return 1
207 for osd in $(seq 0 $(expr $OSDS - 1))
208 do
209 run_osd $dir $osd --osd_scrub_sleep=0 \
210 --osd_scrub_extended_sleep=20 \
211 --bluestore_cache_autotune=false \
212 --osd_deep_scrub_randomize_ratio=0.0 \
213 --osd_scrub_interval_randomize_ratio=0 \
214 --osd_scrub_begin_week_day=$DAY_START \
215 --osd_scrub_end_week_day=$DAY_END \
216 || return 1
217 done
218
219 # Create a pool with a single pg
220 create_pool $poolname 1 1
221 wait_for_clean || return 1
222
223 # Trigger a scrub on a PG
224 local pgid=$(get_pg $poolname SOMETHING)
225 local primary=$(get_primary $poolname SOMETHING)
226 local last_scrub=$(get_last_scrub_stamp $pgid)
227 ceph tell $pgid scrub || return 1
228
229 # Allow scrub to start extended sleep
230 PASSED="false"
231 for ((i=0; i < 15; i++)); do
232 if grep -q "scrub state.*, sleeping" $dir/osd.${primary}.log
233 then
234 PASSED="true"
235 break
236 fi
237 sleep 1
238 done
239
240 # Check that extended sleep was triggered
241 if [ $PASSED = "false" ];
242 then
243 return 1
244 fi
245
246 # release scrub to run after extended sleep finishes
247 ceph tell osd.$primary config set osd_scrub_begin_week_day 0
248 ceph tell osd.$primary config set osd_scrub_end_week_day 0
249
250 # Due to extended sleep, the scrub should not be done within 20 seconds
251 # but test up to 10 seconds and make sure it happens by 25 seconds.
252 count=0
253 PASSED="false"
254 for ((i=0; i < 25; i++)); do
255 count=$(expr $count + 1)
256 if test "$(get_last_scrub_stamp $pgid)" '>' "$last_scrub" ; then
257 # Did scrub run too soon?
258 if [ $count -lt "10" ];
259 then
260 return 1
261 fi
262 PASSED="true"
263 break
264 fi
265 sleep 1
266 done
267
268 # Make sure scrub eventually ran
269 if [ $PASSED = "false" ];
270 then
271 return 1
272 fi
273 }
274
275 function _scrub_abort() {
276 local dir=$1
277 local poolname=test
278 local OSDS=3
279 local objects=1000
280 local type=$2
281
282 TESTDATA="testdata.$$"
283 if test $type = "scrub";
284 then
285 stopscrub="noscrub"
286 check="noscrub"
287 else
288 stopscrub="nodeep-scrub"
289 check="nodeep_scrub"
290 fi
291
292 run_mon $dir a --osd_pool_default_size=3 || return 1
293 run_mgr $dir x || return 1
294 for osd in $(seq 0 $(expr $OSDS - 1))
295 do
296 # Set scheduler to "wpq" until there's a reliable way to query scrub
297 # states with "--osd-scrub-sleep" set to 0. The "mclock_scheduler"
298 # overrides the scrub sleep to 0 and as a result the checks in the
299 # test fail.
300 run_osd $dir $osd --osd_pool_default_pg_autoscale_mode=off \
301 --osd_deep_scrub_randomize_ratio=0.0 \
302 --osd_scrub_sleep=5.0 \
303 --osd_scrub_interval_randomize_ratio=0 \
304 --osd_op_queue=wpq || return 1
305 done
306
307 # Create a pool with a single pg
308 create_pool $poolname 1 1
309 wait_for_clean || return 1
310 poolid=$(ceph osd dump | grep "^pool.*[']${poolname}[']" | awk '{ print $2 }')
311
312 dd if=/dev/urandom of=$TESTDATA bs=1032 count=1
313 for i in `seq 1 $objects`
314 do
315 rados -p $poolname put obj${i} $TESTDATA
316 done
317 rm -f $TESTDATA
318
319 local primary=$(get_primary $poolname obj1)
320 local pgid="${poolid}.0"
321
322 ceph tell $pgid $type || return 1
323 # deep-scrub won't start without scrub noticing
324 if [ "$type" = "deep_scrub" ];
325 then
326 ceph tell $pgid scrub || return 1
327 fi
328
329 # Wait for scrubbing to start
330 set -o pipefail
331 found="no"
332 for i in $(seq 0 200)
333 do
334 flush_pg_stats
335 if ceph pg dump pgs | grep ^$pgid| grep -q "scrubbing"
336 then
337 found="yes"
338 #ceph pg dump pgs
339 break
340 fi
341 done
342 set +o pipefail
343
344 if test $found = "no";
345 then
346 echo "Scrubbing never started"
347 return 1
348 fi
349
350 ceph osd set $stopscrub
351 if [ "$type" = "deep_scrub" ];
352 then
353 ceph osd set noscrub
354 fi
355
356 # Wait for scrubbing to end
357 set -o pipefail
358 for i in $(seq 0 200)
359 do
360 flush_pg_stats
361 if ceph pg dump pgs | grep ^$pgid | grep -q "scrubbing"
362 then
363 continue
364 fi
365 #ceph pg dump pgs
366 break
367 done
368 set +o pipefail
369
370 sleep 5
371
372 if ! grep "$check set, aborting" $dir/osd.${primary}.log
373 then
374 echo "Abort not seen in log"
375 return 1
376 fi
377
378 local last_scrub=$(get_last_scrub_stamp $pgid)
379 ceph config set osd "osd_scrub_sleep" "0.1"
380
381 ceph osd unset $stopscrub
382 if [ "$type" = "deep_scrub" ];
383 then
384 ceph osd unset noscrub
385 fi
386 TIMEOUT=$(($objects / 2))
387 wait_for_scrub $pgid "$last_scrub" || return 1
388 }
389
390 function TEST_scrub_abort() {
391 local dir=$1
392 _scrub_abort $dir scrub
393 }
394
395 function TEST_deep_scrub_abort() {
396 local dir=$1
397 _scrub_abort $dir deep_scrub
398 }
399
400 function TEST_scrub_permit_time() {
401 local dir=$1
402 local poolname=test
403 local OSDS=3
404 local objects=15
405
406 TESTDATA="testdata.$$"
407
408 run_mon $dir a --osd_pool_default_size=3 || return 1
409 run_mgr $dir x || return 1
410 local scrub_begin_hour=$(date -d '2 hour ago' +"%H" | sed 's/^0//')
411 local scrub_end_hour=$(date -d '1 hour ago' +"%H" | sed 's/^0//')
412 for osd in $(seq 0 $(expr $OSDS - 1))
413 do
414 run_osd $dir $osd --bluestore_cache_autotune=false \
415 --osd_deep_scrub_randomize_ratio=0.0 \
416 --osd_scrub_interval_randomize_ratio=0 \
417 --osd_scrub_begin_hour=$scrub_begin_hour \
418 --osd_scrub_end_hour=$scrub_end_hour || return 1
419 done
420
421 # Create a pool with a single pg
422 create_pool $poolname 1 1
423 wait_for_clean || return 1
424
425 # Trigger a scrub on a PG
426 local pgid=$(get_pg $poolname SOMETHING)
427 local primary=$(get_primary $poolname SOMETHING)
428 local last_scrub=$(get_last_scrub_stamp $pgid)
429 # If we don't specify an amount of time to subtract from
430 # current time to set last_scrub_stamp, it sets the deadline
431 # back by osd_max_interval which would cause the time permit checking
432 # to be skipped. Set back 1 day, the default scrub_min_interval.
433 ceph tell $pgid scrub $(( 24 * 60 * 60 )) || return 1
434
435 # Scrub should not run
436 for ((i=0; i < 30; i++)); do
437 if test "$(get_last_scrub_stamp $pgid)" '>' "$last_scrub" ; then
438 return 1
439 fi
440 sleep 1
441 done
442 }
443
444 # a test to recreate the problem described in bug #52901 - setting 'noscrub'
445 # without explicitly preventing deep scrubs made the PG 'unscrubable'.
446 # Fixed by PR#43521
447 function TEST_just_deep_scrubs() {
448 local dir=$1
449 local -A cluster_conf=(
450 ['osds_num']="3"
451 ['pgs_in_pool']="4"
452 ['pool_name']="test"
453 )
454
455 standard_scrub_cluster $dir cluster_conf
456 local poolid=${cluster_conf['pool_id']}
457 local poolname=${cluster_conf['pool_name']}
458 echo "Pool: $poolname : $poolid"
459
460 TESTDATA="testdata.$$"
461 local objects=15
462 dd if=/dev/urandom of=$TESTDATA bs=1032 count=1
463 for i in `seq 1 $objects`
464 do
465 rados -p $poolname put obj${i} $TESTDATA
466 done
467 rm -f $TESTDATA
468
469 # set both 'no scrub' & 'no deep-scrub', then request a deep-scrub.
470 # we do not expect to see the scrub scheduled.
471
472 ceph osd set noscrub || return 1
473 ceph osd set nodeep-scrub || return 1
474 sleep 6 # the 'noscrub' command takes a long time to reach the OSDs
475 local now_is=`date -I"ns"`
476 declare -A sched_data
477 local pgid="${poolid}.2"
478
479 # turn on the publishing of test data in the 'scrubber' section of 'pg query' output
480 set_query_debug $pgid
481
482 extract_published_sch $pgid $now_is $now_is sched_data
483 local saved_last_stamp=${sched_data['query_last_stamp']}
484 local dbg_counter_at_start=${sched_data['query_scrub_seq']}
485 echo "test counter @ start: $dbg_counter_at_start"
486
487 ceph pg $pgid deep_scrub
488
489 sleep 5 # 5s is the 'pg dump' interval
490 declare -A sc_data_2
491 extract_published_sch $pgid $now_is $now_is sc_data_2
492 echo "test counter @ should show no change: " ${sc_data_2['query_scrub_seq']}
493 (( ${sc_data_2['dmp_last_duration']} == 0)) || return 1
494 (( ${sc_data_2['query_scrub_seq']} == $dbg_counter_at_start)) || return 1
495
496 # unset the 'no deep-scrub'. Deep scrubbing should start now.
497 ceph osd unset nodeep-scrub || return 1
498 sleep 5
499 declare -A expct_qry_duration=( ['query_last_duration']="0" ['query_last_duration_neg']="not0" )
500 sc_data_2=()
501 echo "test counter @ should be higher than before the unset: " ${sc_data_2['query_scrub_seq']}
502 wait_any_cond $pgid 10 $saved_last_stamp expct_qry_duration "WaitingAfterScrub " sc_data_2 || return 1
503 }
504
505 function TEST_dump_scrub_schedule() {
506 local dir=$1
507 local poolname=test
508 local OSDS=3
509 local objects=15
510
511 TESTDATA="testdata.$$"
512
513 run_mon $dir a --osd_pool_default_size=$OSDS || return 1
514 run_mgr $dir x || return 1
515
516 # Set scheduler to "wpq" until there's a reliable way to query scrub states
517 # with "--osd-scrub-sleep" set to 0. The "mclock_scheduler" overrides the
518 # scrub sleep to 0 and as a result the checks in the test fail.
519 local ceph_osd_args="--osd_deep_scrub_randomize_ratio=0 \
520 --osd_scrub_interval_randomize_ratio=0 \
521 --osd_scrub_backoff_ratio=0.0 \
522 --osd_op_queue=wpq \
523 --osd_scrub_sleep=0.2"
524
525 for osd in $(seq 0 $(expr $OSDS - 1))
526 do
527 run_osd $dir $osd $ceph_osd_args|| return 1
528 done
529
530 # Create a pool with a single pg
531 create_pool $poolname 1 1
532 wait_for_clean || return 1
533 poolid=$(ceph osd dump | grep "^pool.*[']${poolname}[']" | awk '{ print $2 }')
534
535 dd if=/dev/urandom of=$TESTDATA bs=1032 count=1
536 for i in `seq 1 $objects`
537 do
538 rados -p $poolname put obj${i} $TESTDATA
539 done
540 rm -f $TESTDATA
541
542 local pgid="${poolid}.0"
543 local now_is=`date -I"ns"`
544
545 # before the scrubbing starts
546
547 # last scrub duration should be 0. The scheduling data should show
548 # a time in the future:
549 # e.g. 'periodic scrub scheduled @ 2021-10-12T20:32:43.645168+0000'
550
551 declare -A expct_starting=( ['query_active']="false" ['query_is_future']="true" ['query_schedule']="scrub scheduled" )
552 declare -A sched_data
553 extract_published_sch $pgid $now_is "2019-10-12T20:32:43.645168+0000" sched_data
554 schedule_against_expected sched_data expct_starting "initial"
555 (( ${sched_data['dmp_last_duration']} == 0)) || return 1
556 echo "last-scrub --- " ${sched_data['query_last_scrub']}
557
558 #
559 # step 1: scrub once (mainly to ensure there is no urgency to scrub)
560 #
561
562 saved_last_stamp=${sched_data['query_last_stamp']}
563 ceph tell osd.* config set osd_scrub_sleep "0"
564 ceph pg deep-scrub $pgid
565 ceph pg scrub $pgid
566
567 # wait for the 'last duration' entries to change. Note that the 'dump' one will need
568 # up to 5 seconds to sync
569
570 sleep 5
571 sched_data=()
572 declare -A expct_qry_duration=( ['query_last_duration']="0" ['query_last_duration_neg']="not0" )
573 wait_any_cond $pgid 10 $saved_last_stamp expct_qry_duration "WaitingAfterScrub " sched_data || return 1
574 # verify that 'pg dump' also shows the change in last_scrub_duration
575 sched_data=()
576 declare -A expct_dmp_duration=( ['dmp_last_duration']="0" ['dmp_last_duration_neg']="not0" )
577 wait_any_cond $pgid 10 $saved_last_stamp expct_dmp_duration "WaitingAfterScrub_dmp " sched_data || return 1
578
579 sleep 2
580
581 #
582 # step 2: set noscrub and request a "periodic scrub". Watch for the change in the 'is the scrub
583 # scheduled for the future' value
584 #
585
586 ceph tell osd.* config set osd_scrub_chunk_max "3" || return 1
587 ceph tell osd.* config set osd_scrub_sleep "1.0" || return 1
588 ceph osd set noscrub || return 1
589 sleep 2
590 saved_last_stamp=${sched_data['query_last_stamp']}
591
592 ceph pg $pgid scrub
593 sleep 1
594 sched_data=()
595 declare -A expct_scrub_peri_sched=( ['query_is_future']="false" )
596 wait_any_cond $pgid 10 $saved_last_stamp expct_scrub_peri_sched "waitingBeingScheduled" sched_data || return 1
597
598 # note: the induced change in 'last_scrub_stamp' that we've caused above, is by itself not a publish-stats
599 # trigger. Thus it might happen that the information in 'pg dump' will not get updated here. Do not expect
600 # 'dmp_is_future' to follow 'query_is_future' without a good reason
601 ## declare -A expct_scrub_peri_sched_dmp=( ['dmp_is_future']="false" )
602 ## wait_any_cond $pgid 15 $saved_last_stamp expct_scrub_peri_sched_dmp "waitingBeingScheduled" sched_data || echo "must be fixed"
603
604 #
605 # step 3: allow scrubs. Watch for the conditions during the scrubbing
606 #
607
608 saved_last_stamp=${sched_data['query_last_stamp']}
609 ceph osd unset noscrub
610
611 declare -A cond_active=( ['query_active']="true" )
612 sched_data=()
613 wait_any_cond $pgid 10 $saved_last_stamp cond_active "WaitingActive " sched_data || return 1
614
615 # check for pg-dump to show being active. But if we see 'query_active' being reset - we've just
616 # missed it.
617 declare -A cond_active_dmp=( ['dmp_state_has_scrubbing']="true" ['query_active']="false" )
618 sched_data=()
619 wait_any_cond $pgid 10 $saved_last_stamp cond_active_dmp "WaitingActive " sched_data || return 1
620 }
621
622 function TEST_pg_dump_objects_scrubbed() {
623 local dir=$1
624 local poolname=test
625 local OSDS=3
626 local objects=15
627 local timeout=10
628
629 TESTDATA="testdata.$$"
630
631 setup $dir || return 1
632 run_mon $dir a --osd_pool_default_size=$OSDS || return 1
633 run_mgr $dir x || return 1
634 for osd in $(seq 0 $(expr $OSDS - 1))
635 do
636 run_osd $dir $osd || return 1
637 done
638
639 # Create a pool with a single pg
640 create_pool $poolname 1 1
641 wait_for_clean || return 1
642 poolid=$(ceph osd dump | grep "^pool.*[']${poolname}[']" | awk '{ print $2 }')
643
644 dd if=/dev/urandom of=$TESTDATA bs=1032 count=1
645 for i in `seq 1 $objects`
646 do
647 rados -p $poolname put obj${i} $TESTDATA
648 done
649 rm -f $TESTDATA
650
651 local pgid="${poolid}.0"
652 #Trigger a scrub on a PG
653 pg_scrub $pgid || return 1
654 test "$(ceph pg $pgid query | jq '.info.stats.objects_scrubbed')" '=' $objects || return 1
655
656 teardown $dir || return 1
657 }
658
659 main osd-scrub-test "$@"
660
661 # Local Variables:
662 # compile-command: "cd build ; make -j4 && \
663 # ../qa/run-standalone.sh osd-scrub-test.sh"
664 # End: