]> git.proxmox.com Git - ceph.git/blob - ceph/qa/standalone/scrub/osd-scrub-snaps.sh
update sources to v12.1.2
[ceph.git] / ceph / qa / standalone / scrub / osd-scrub-snaps.sh
1 #! /bin/bash
2 #
3 # Copyright (C) 2015 Red Hat <contact@redhat.com>
4 #
5 # Author: David Zafman <dzafman@redhat.com>
6 #
7 # This program is free software; you can redistribute it and/or modify
8 # it under the terms of the GNU Library Public License as published by
9 # the Free Software Foundation; either version 2, or (at your option)
10 # any later version.
11 #
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Library Public License for more details.
16 #
17 source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
18
19 function run() {
20 local dir=$1
21 shift
22
23 export CEPH_MON="127.0.0.1:7121" # git grep '\<7121\>' : there must be only one
24 export CEPH_ARGS
25 CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
26 CEPH_ARGS+="--mon-host=$CEPH_MON "
27
28 local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
29 for func in $funcs ; do
30 $func $dir || return 1
31 done
32 }
33
34 function TEST_scrub_snaps() {
35 local dir=$1
36 local poolname=test
37
38 TESTDATA="testdata.$$"
39
40 setup $dir || return 1
41 run_mon $dir a --osd_pool_default_size=1 || return 1
42 run_mgr $dir x || return 1
43 run_osd $dir 0 || return 1
44
45 create_rbd_pool || return 1
46 wait_for_clean || return 1
47
48 # Create a pool with a single pg
49 ceph osd pool create $poolname 1 1
50 poolid=$(ceph osd dump | grep "^pool.*[']test[']" | awk '{ print $2 }')
51
52 dd if=/dev/urandom of=$TESTDATA bs=1032 count=1
53 for i in `seq 1 15`
54 do
55 rados -p $poolname put obj${i} $TESTDATA
56 done
57
58 SNAP=1
59 rados -p $poolname mksnap snap${SNAP}
60 dd if=/dev/urandom of=$TESTDATA bs=256 count=${SNAP}
61 rados -p $poolname put obj1 $TESTDATA
62 rados -p $poolname put obj5 $TESTDATA
63 rados -p $poolname put obj3 $TESTDATA
64 for i in `seq 6 14`
65 do rados -p $poolname put obj${i} $TESTDATA
66 done
67
68 SNAP=2
69 rados -p $poolname mksnap snap${SNAP}
70 dd if=/dev/urandom of=$TESTDATA bs=256 count=${SNAP}
71 rados -p $poolname put obj5 $TESTDATA
72
73 SNAP=3
74 rados -p $poolname mksnap snap${SNAP}
75 dd if=/dev/urandom of=$TESTDATA bs=256 count=${SNAP}
76 rados -p $poolname put obj3 $TESTDATA
77
78 SNAP=4
79 rados -p $poolname mksnap snap${SNAP}
80 dd if=/dev/urandom of=$TESTDATA bs=256 count=${SNAP}
81 rados -p $poolname put obj5 $TESTDATA
82 rados -p $poolname put obj2 $TESTDATA
83
84 SNAP=5
85 rados -p $poolname mksnap snap${SNAP}
86 SNAP=6
87 rados -p $poolname mksnap snap${SNAP}
88 dd if=/dev/urandom of=$TESTDATA bs=256 count=${SNAP}
89 rados -p $poolname put obj5 $TESTDATA
90
91 SNAP=7
92 rados -p $poolname mksnap snap${SNAP}
93
94 rados -p $poolname rm obj4
95 rados -p $poolname rm obj2
96
97 kill_daemons $dir TERM osd || return 1
98
99 # Don't need to ceph_objectstore_tool function because osd stopped
100
101 JSON="$(ceph-objectstore-tool --data-path $dir/0 --journal-path $dir/0/journal --head --op list obj1)"
102 ceph-objectstore-tool --data-path $dir/0 --journal-path $dir/0/journal "$JSON" --force remove
103
104 JSON="$(ceph-objectstore-tool --data-path $dir/0 --journal-path $dir/0/journal --op list obj5 | grep \"snapid\":2)"
105 ceph-objectstore-tool --data-path $dir/0 --journal-path $dir/0/journal "$JSON" remove
106
107 JSON="$(ceph-objectstore-tool --data-path $dir/0 --journal-path $dir/0/journal --op list obj5 | grep \"snapid\":1)"
108 OBJ5SAVE="$JSON"
109 ceph-objectstore-tool --data-path $dir/0 --journal-path $dir/0/journal "$JSON" remove
110
111 JSON="$(ceph-objectstore-tool --data-path $dir/0 --journal-path $dir/0/journal --op list obj5 | grep \"snapid\":4)"
112 dd if=/dev/urandom of=$TESTDATA bs=256 count=18
113 ceph-objectstore-tool --data-path $dir/0 --journal-path $dir/0/journal "$JSON" set-bytes $TESTDATA
114
115 JSON="$(ceph-objectstore-tool --data-path $dir/0 --journal-path $dir/0/journal --head --op list obj3)"
116 dd if=/dev/urandom of=$TESTDATA bs=256 count=15
117 ceph-objectstore-tool --data-path $dir/0 --journal-path $dir/0/journal "$JSON" set-bytes $TESTDATA
118
119 JSON="$(ceph-objectstore-tool --data-path $dir/0 --journal-path $dir/0/journal --op list obj4 | grep \"snapid\":7)"
120 ceph-objectstore-tool --data-path $dir/0 --journal-path $dir/0/journal "$JSON" remove
121
122 JSON="$(ceph-objectstore-tool --data-path $dir/0 --journal-path $dir/0/journal --head --op list obj2)"
123 ceph-objectstore-tool --data-path $dir/0 --journal-path $dir/0/journal "$JSON" rm-attr snapset
124
125 # Create a clone which isn't in snapset and doesn't have object info
126 JSON="$(echo "$OBJ5SAVE" | sed s/snapid\":1/snapid\":7/)"
127 dd if=/dev/urandom of=$TESTDATA bs=256 count=7
128 ceph-objectstore-tool --data-path $dir/0 --journal-path $dir/0/journal "$JSON" set-bytes $TESTDATA
129
130 rm -f $TESTDATA
131
132 JSON="$(ceph-objectstore-tool --data-path $dir/0 --journal-path $dir/0/journal --head --op list obj6)"
133 ceph-objectstore-tool --data-path $dir/0 --journal-path $dir/0/journal "$JSON" clear-snapset
134 JSON="$(ceph-objectstore-tool --data-path $dir/0 --journal-path $dir/0/journal --head --op list obj7)"
135 ceph-objectstore-tool --data-path $dir/0 --journal-path $dir/0/journal "$JSON" clear-snapset corrupt
136 JSON="$(ceph-objectstore-tool --data-path $dir/0 --journal-path $dir/0/journal --head --op list obj8)"
137 ceph-objectstore-tool --data-path $dir/0 --journal-path $dir/0/journal "$JSON" clear-snapset seq
138 JSON="$(ceph-objectstore-tool --data-path $dir/0 --journal-path $dir/0/journal --head --op list obj9)"
139 ceph-objectstore-tool --data-path $dir/0 --journal-path $dir/0/journal "$JSON" clear-snapset clone_size
140 JSON="$(ceph-objectstore-tool --data-path $dir/0 --journal-path $dir/0/journal --head --op list obj10)"
141 ceph-objectstore-tool --data-path $dir/0 --journal-path $dir/0/journal "$JSON" clear-snapset clone_overlap
142 JSON="$(ceph-objectstore-tool --data-path $dir/0 --journal-path $dir/0/journal --head --op list obj11)"
143 ceph-objectstore-tool --data-path $dir/0 --journal-path $dir/0/journal "$JSON" clear-snapset clones
144 JSON="$(ceph-objectstore-tool --data-path $dir/0 --journal-path $dir/0/journal --head --op list obj12)"
145 ceph-objectstore-tool --data-path $dir/0 --journal-path $dir/0/journal "$JSON" clear-snapset head
146 JSON="$(ceph-objectstore-tool --data-path $dir/0 --journal-path $dir/0/journal --head --op list obj13)"
147 ceph-objectstore-tool --data-path $dir/0 --journal-path $dir/0/journal "$JSON" clear-snapset snaps
148 JSON="$(ceph-objectstore-tool --data-path $dir/0 --journal-path $dir/0/journal --head --op list obj14)"
149 ceph-objectstore-tool --data-path $dir/0 --journal-path $dir/0/journal "$JSON" clear-snapset size
150
151 echo "garbage" > $dir/bad
152 JSON="$(ceph-objectstore-tool --data-path $dir/0 --journal-path $dir/0/journal --head --op list obj15)"
153 ceph-objectstore-tool --data-path $dir/0 --journal-path $dir/0/journal "$JSON" set-attr snapset $dir/bad
154 rm -f $dir/bad
155
156 run_osd $dir 0 || return 1
157 create_rbd_pool || return 1
158 wait_for_clean || return 1
159
160 local pgid="${poolid}.0"
161 if ! pg_scrub "$pgid" ; then
162 cat $dir/osd.0.log
163 return 1
164 fi
165 grep 'log_channel' $dir/osd.0.log
166
167 rados list-inconsistent-pg $poolname > $dir/json || return 1
168 # Check pg count
169 test $(jq '. | length' $dir/json) = "1" || return 1
170 # Check pgid
171 test $(jq -r '.[0]' $dir/json) = $pgid || return 1
172
173 rados list-inconsistent-snapset $pgid > $dir/json || return 1
174 test $(jq '.inconsistents | length' $dir/json) = "21" || return 1
175
176 local jqfilter='.inconsistents'
177 local sortkeys='import json; import sys ; JSON=sys.stdin.read() ; ud = json.loads(JSON) ; print json.dumps(ud, sort_keys=True, indent=2)'
178
179 jq "$jqfilter" << EOF | python -c "$sortkeys" > $dir/checkcsjson
180 {
181 "inconsistents": [
182 {
183 "errors": [
184 "headless"
185 ],
186 "snap": 1,
187 "locator": "",
188 "nspace": "",
189 "name": "obj1"
190 },
191 {
192 "errors": [
193 "size_mismatch"
194 ],
195 "snap": 1,
196 "locator": "",
197 "nspace": "",
198 "name": "obj10"
199 },
200 {
201 "errors": [
202 "headless"
203 ],
204 "snap": 1,
205 "locator": "",
206 "nspace": "",
207 "name": "obj11"
208 },
209 {
210 "errors": [
211 "size_mismatch"
212 ],
213 "snap": 1,
214 "locator": "",
215 "nspace": "",
216 "name": "obj14"
217 },
218 {
219 "errors": [
220 "headless"
221 ],
222 "snap": 1,
223 "locator": "",
224 "nspace": "",
225 "name": "obj6"
226 },
227 {
228 "errors": [
229 "headless"
230 ],
231 "snap": 1,
232 "locator": "",
233 "nspace": "",
234 "name": "obj7"
235 },
236 {
237 "errors": [
238 "size_mismatch"
239 ],
240 "snap": 1,
241 "locator": "",
242 "nspace": "",
243 "name": "obj9"
244 },
245 {
246 "errors": [
247 "headless"
248 ],
249 "snap": 4,
250 "locator": "",
251 "nspace": "",
252 "name": "obj2"
253 },
254 {
255 "errors": [
256 "size_mismatch"
257 ],
258 "snap": 4,
259 "locator": "",
260 "nspace": "",
261 "name": "obj5"
262 },
263 {
264 "errors": [
265 "headless"
266 ],
267 "snap": 7,
268 "locator": "",
269 "nspace": "",
270 "name": "obj2"
271 },
272 {
273 "errors": [
274 "oi_attr_missing",
275 "headless"
276 ],
277 "snap": 7,
278 "locator": "",
279 "nspace": "",
280 "name": "obj5"
281 },
282 {
283 "extra clones": [
284 1
285 ],
286 "errors": [
287 "extra_clones"
288 ],
289 "snap": "head",
290 "locator": "",
291 "nspace": "",
292 "name": "obj11"
293 },
294 {
295 "errors": [
296 "head_mismatch"
297 ],
298 "snap": "head",
299 "locator": "",
300 "nspace": "",
301 "name": "obj12"
302 },
303 {
304 "errors": [
305 "ss_attr_corrupted"
306 ],
307 "snap": "head",
308 "locator": "",
309 "nspace": "",
310 "name": "obj15"
311 },
312 {
313 "extra clones": [
314 7,
315 4
316 ],
317 "errors": [
318 "ss_attr_missing",
319 "extra_clones"
320 ],
321 "snap": "head",
322 "locator": "",
323 "nspace": "",
324 "name": "obj2"
325 },
326 {
327 "errors": [
328 "size_mismatch"
329 ],
330 "snap": "head",
331 "locator": "",
332 "nspace": "",
333 "name": "obj3"
334 },
335 {
336 "missing": [
337 7
338 ],
339 "errors": [
340 "clone_missing"
341 ],
342 "snap": "head",
343 "locator": "",
344 "nspace": "",
345 "name": "obj4"
346 },
347 {
348 "missing": [
349 2,
350 1
351 ],
352 "extra clones": [
353 7
354 ],
355 "errors": [
356 "extra_clones",
357 "clone_missing"
358 ],
359 "snap": "head",
360 "locator": "",
361 "nspace": "",
362 "name": "obj5"
363 },
364 {
365 "extra clones": [
366 1
367 ],
368 "errors": [
369 "extra_clones"
370 ],
371 "snap": "head",
372 "locator": "",
373 "nspace": "",
374 "name": "obj6"
375 },
376 {
377 "extra clones": [
378 1
379 ],
380 "errors": [
381 "head_mismatch",
382 "extra_clones"
383 ],
384 "snap": "head",
385 "locator": "",
386 "nspace": "",
387 "name": "obj7"
388 },
389 {
390 "errors": [
391 "snapset_mismatch"
392 ],
393 "snap": "head",
394 "locator": "",
395 "nspace": "",
396 "name": "obj8"
397 }
398 ],
399 "epoch": 20
400 }
401 EOF
402
403 jq "$jqfilter" $dir/json | python -c "$sortkeys" > $dir/csjson
404 diff ${DIFFCOLOPTS} $dir/checkcsjson $dir/csjson || return 1
405
406 if which jsonschema > /dev/null;
407 then
408 jsonschema -i $dir/json $CEPH_ROOT/doc/rados/command/list-inconsistent-snap.json || return 1
409 fi
410
411 for i in `seq 1 7`
412 do
413 rados -p $poolname rmsnap snap$i
414 done
415
416 ERRORS=0
417
418 pidfile=$(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid')
419 pid=$(cat $pidfile)
420 if ! kill -0 $pid
421 then
422 echo "OSD crash occurred"
423 tail -100 $dir/osd.0.log
424 ERRORS=$(expr $ERRORS + 1)
425 fi
426
427 kill_daemons $dir || return 1
428
429 declare -a err_strings
430 err_strings[0]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*::obj10:.* is missing in clone_overlap"
431 err_strings[1]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*::obj5:7 no '_' attr"
432 err_strings[2]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*::obj5:7 is an unexpected clone"
433 err_strings[3]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*::obj5:4 on disk size [(]4608[)] does not match object info size [(]512[)] adjusted for ondisk to [(]512[)]"
434 err_strings[4]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj5:head expected clone .*:::obj5:2"
435 err_strings[5]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj5:head expected clone .*:::obj5:1"
436 err_strings[6]="log_channel[(]cluster[)] log [[]INF[]] : scrub [0-9]*[.]0 .*:::obj5:head 2 missing clone[(]s[)]"
437 err_strings[7]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj12:head snapset.head_exists=false, but head exists"
438 err_strings[8]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj8:head snaps.seq not set"
439 err_strings[9]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj7:head snapset.head_exists=false, but head exists"
440 err_strings[10]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj7:1 is an unexpected clone"
441 err_strings[11]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj3:head on disk size [(]3840[)] does not match object info size [(]768[)] adjusted for ondisk to [(]768[)]"
442 err_strings[12]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj6:1 is an unexpected clone"
443 err_strings[13]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj2:head no 'snapset' attr"
444 err_strings[14]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj2:7 clone ignored due to missing snapset"
445 err_strings[15]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj2:4 clone ignored due to missing snapset"
446 err_strings[16]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj4:head expected clone .*:::obj4:7"
447 err_strings[17]="log_channel[(]cluster[)] log [[]INF[]] : scrub [0-9]*[.]0 .*:::obj4:head 1 missing clone[(]s[)]"
448 err_strings[18]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj1:1 is an unexpected clone"
449 err_strings[19]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj9:1 is missing in clone_size"
450 err_strings[20]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj11:1 is an unexpected clone"
451 err_strings[21]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj14:1 size 1032 != clone_size 1033"
452 err_strings[22]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 scrub 23 errors"
453 err_strings[23]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj15:head can't decode 'snapset' attr buffer"
454 err_strings[24]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj12:1 has no oi or legacy_snaps; cannot convert 1=[[]1[]]:[[]1[]].stray_clone_snaps=[{]1=[[]1[]][}]"
455
456 for i in `seq 0 ${#err_strings[@]}`
457 do
458 if ! grep "${err_strings[$i]}" $dir/osd.0.log > /dev/null;
459 then
460 echo "Missing log message '${err_strings[$i]}'"
461 ERRORS=$(expr $ERRORS + 1)
462 fi
463 done
464
465 teardown $dir || return 1
466
467 if [ $ERRORS != "0" ];
468 then
469 echo "TEST FAILED WITH $ERRORS ERRORS"
470 return 1
471 fi
472
473 echo "TEST PASSED"
474 return 0
475 }
476
477 main osd-scrub-snaps "$@"
478
479 # Local Variables:
480 # compile-command: "cd ../.. ; make -j4 && \
481 # test/osd/osd-scrub-snaps.sh"