]>
Commit | Line | Data |
---|---|---|
0ee8118b BB |
1 | #!/bin/bash |
2 | # | |
3 | # ZPOOL fault verification test script. | |
4 | # | |
5 | # The current suite of fault tests should not be thought of an exhaustive | |
6 | # list of failure modes. Rather it is simply an starting point which trys | |
7 | # to cover the bulk the of the 'easy' and hopefully common, failure modes. | |
8 | # | |
9 | # Additional tests should be added but the current suite as new interesting | |
10 | # failures modes are observed. Additional failure modes I'd like to see | |
11 | # tests for include, but are not limited too: | |
12 | # | |
13 | # * Slow but successful IO. | |
14 | # * SCSI sense codes generated as zevents. | |
15 | # * 4k sectors | |
16 | # * noise | |
17 | # * medium error | |
18 | # * recovered error | |
19 | # | |
20 | # The current infrastructure using the 'mdadm' faulty device and the | |
21 | # 'scsi_debug' simulated scsi devices. The idea is to inject the error | |
22 | # below the zfs stack to validate all the error paths. More targeted | |
23 | # failure testing should be added using the 'zinject' command line util. | |
24 | # | |
25 | # Requires the following packages: | |
26 | # * mdadm | |
27 | # * lsscsi | |
28 | # * sg3-utils | |
29 | # | |
30 | ||
31 | basedir="$(dirname $0)" | |
32 | ||
33 | SCRIPT_COMMON=common.sh | |
34 | if [ -f "${basedir}/${SCRIPT_COMMON}" ]; then | |
35 | . "${basedir}/${SCRIPT_COMMON}" | |
36 | else | |
37 | echo "Missing helper script ${SCRIPT_COMMON}" && exit 1 | |
38 | fi | |
39 | ||
40 | PROG=zfault.sh | |
41 | ||
42 | usage() { | |
43 | cat << EOF | |
44 | USAGE: | |
e0f3df67 | 45 | $0 [hvcts] |
0ee8118b BB |
46 | |
47 | DESCRIPTION: | |
48 | ZPOOL fault verification tests | |
49 | ||
50 | OPTIONS: | |
51 | -h Show this message | |
52 | -v Verbose | |
53 | -c Cleanup md+lo+file devices at start | |
54 | -t <#> Run listed tests | |
55 | -s <#> Skip listed tests | |
56 | ||
57 | EOF | |
58 | } | |
59 | ||
60 | while getopts 'hvct:s:?' OPTION; do | |
61 | case $OPTION in | |
62 | h) | |
63 | usage | |
64 | exit 1 | |
65 | ;; | |
66 | v) | |
67 | VERBOSE=1 | |
68 | ;; | |
69 | c) | |
70 | CLEANUP=1 | |
71 | ;; | |
72 | t) | |
73 | TESTS_RUN=($OPTARG) | |
74 | ;; | |
75 | s) | |
76 | TESTS_SKIP=($OPTARG) | |
77 | ;; | |
78 | ?) | |
79 | usage | |
80 | exit | |
81 | ;; | |
82 | esac | |
83 | done | |
84 | ||
85 | if [ $(id -u) != 0 ]; then | |
86 | die "Must run as root" | |
87 | fi | |
88 | ||
89 | # Perform pre-cleanup is requested | |
90 | if [ ${CLEANUP} ]; then | |
7dc3830c | 91 | ${ZFS_SH} -u |
0ee8118b BB |
92 | cleanup_md_devices |
93 | cleanup_loop_devices | |
94 | rm -f /tmp/zpool.cache.* | |
95 | fi | |
96 | ||
97 | # Check if we need to skip all md based tests. | |
98 | MD_PARTITIONABLE=0 | |
99 | check_md_partitionable && MD_PARTITIONABLE=1 | |
100 | if [ ${MD_PARTITIONABLE} -eq 0 ]; then | |
101 | echo "Skipping tests 1-7 which require partitionable md devices" | |
102 | fi | |
103 | ||
104 | # Check if we need to skip all the scsi_debug tests. | |
105 | SCSI_DEBUG=0 | |
106 | ${INFOMOD} scsi_debug &>/dev/null && SCSI_DEBUG=1 | |
107 | if [ ${SCSI_DEBUG} -eq 0 ]; then | |
108 | echo "Skipping tests 8-9 which require the scsi_debug module" | |
109 | fi | |
110 | ||
111 | if [ ${MD_PARTITIONABLE} -eq 0 ] || [ ${SCSI_DEBUG} -eq 0 ]; then | |
112 | echo | |
113 | fi | |
114 | ||
115 | printf "%40s%s\t%s\t%s\t%s\t%s\n" "" "raid0" "raid10" "raidz" "raidz2" "raidz3" | |
116 | ||
117 | pass_nonewline() { | |
118 | echo -n -e "${COLOR_GREEN}Pass${COLOR_RESET}\t" | |
119 | } | |
120 | ||
121 | skip_nonewline() { | |
122 | echo -n -e "${COLOR_BROWN}Skip${COLOR_RESET}\t" | |
123 | } | |
124 | ||
125 | nth_zpool_vdev() { | |
126 | local POOL_NAME=$1 | |
127 | local DEVICE_TYPE=$2 | |
128 | local DEVICE_NTH=$3 | |
129 | ||
130 | ${ZPOOL} status ${POOL_NAME} | grep ${DEVICE_TYPE} ${TMP_STATUS} | \ | |
131 | head -n${DEVICE_NTH} | tail -n1 | ${AWK} "{ print \$1 }" | |
132 | } | |
133 | ||
134 | vdev_status() { | |
135 | local POOL_NAME=$1 | |
136 | local VDEV_NAME=$2 | |
137 | ||
138 | ${ZPOOL} status ${POOL_NAME} | ${AWK} "/${VDEV_NAME}/ { print \$2 }" | |
139 | } | |
140 | ||
141 | # Required format is x.yz[KMGTP] | |
142 | expand_numeric_suffix() { | |
143 | local VALUE=$1 | |
144 | ||
145 | VALUE=`echo "${VALUE/%K/*1000}"` | |
146 | VALUE=`echo "${VALUE/%M/*1000000}"` | |
147 | VALUE=`echo "${VALUE/%G/*1000000000}"` | |
148 | VALUE=`echo "${VALUE/%T/*1000000000000}"` | |
149 | VALUE=`echo "${VALUE/%P/*1000000000000000}"` | |
150 | VALUE=`echo "${VALUE}" | bc | cut -d'.' -f1` | |
151 | ||
152 | echo "${VALUE}" | |
153 | } | |
154 | ||
155 | vdev_read_errors() { | |
156 | local POOL_NAME=$1 | |
157 | local VDEV_NAME=$2 | |
158 | local VDEV_ERRORS=`${ZPOOL} status ${POOL_NAME} | | |
159 | ${AWK} "/${VDEV_NAME}/ { print \\$3 }"` | |
160 | ||
161 | expand_numeric_suffix ${VDEV_ERRORS} | |
162 | } | |
163 | ||
164 | vdev_write_errors() { | |
165 | local POOL_NAME=$1 | |
166 | local VDEV_NAME=$2 | |
167 | local VDEV_ERRORS=`${ZPOOL} status ${POOL_NAME} | | |
168 | ${AWK} "/${VDEV_NAME}/ { print \\$4 }"` | |
169 | ||
170 | expand_numeric_suffix ${VDEV_ERRORS} | |
171 | } | |
172 | ||
173 | vdev_cksum_errors() { | |
174 | local POOL_NAME=$1 | |
175 | local VDEV_NAME=$2 | |
176 | local VDEV_ERRORS=`${ZPOOL} status ${POOL_NAME} | | |
177 | ${AWK} "/${VDEV_NAME}/ { print \\$5 }"` | |
178 | ||
179 | expand_numeric_suffix ${VDEV_ERRORS} | |
180 | } | |
181 | ||
182 | zpool_state() { | |
183 | local POOL_NAME=$1 | |
184 | ||
185 | ${ZPOOL} status ${POOL_NAME} | ${AWK} "/state/ { print \$2; exit }" | |
186 | } | |
187 | ||
188 | zpool_event() { | |
189 | local EVENT_NAME=$1 | |
190 | local EVENT_KEY=$2 | |
191 | ||
192 | SCRIPT1="BEGIN {RS=\"\"; FS=\"\n\"} /${EVENT_NAME}/ { print \$0; exit }" | |
193 | SCRIPT2="BEGIN {FS=\"=\"} /${EVENT_KEY}/ { print \$2; exit }" | |
194 | ||
195 | ${ZPOOL} events -vH | ${AWK} "${SCRIPT1}" | ${AWK} "${SCRIPT2}" | |
196 | } | |
197 | ||
198 | zpool_scan_errors() { | |
199 | local POOL_NAME=$1 | |
200 | ||
201 | ${ZPOOL} status ${POOL_NAME} | ${AWK} "/scan: scrub/ { print \$8 }" | |
202 | ${ZPOOL} status ${POOL_NAME} | ${AWK} "/scan: resilver/ { print \$7 }" | |
203 | } | |
204 | ||
205 | pattern_create() { | |
206 | local PATTERN_BLOCK_SIZE=$1 | |
207 | local PATTERN_BLOCK_COUNT=$2 | |
208 | local PATTERN_NAME=`mktemp -p /tmp zpool.pattern.XXXXXXXX` | |
209 | ||
210 | echo ${PATTERN_NAME} | |
211 | dd if=/dev/urandom of=${PATTERN_NAME} bs=${PATTERN_BLOCK_SIZE} \ | |
212 | count=${PATTERN_BLOCK_COUNT} &>/dev/null | |
213 | return $? | |
214 | } | |
215 | ||
216 | pattern_write() { | |
217 | local PATTERN_NAME=$1 | |
218 | local PATTERN_BLOCK_SIZE=$2 | |
219 | local PATTERN_BLOCK_COUNT=$3 | |
220 | local DEVICE_NAME=$4 | |
221 | ||
222 | dd if=${PATTERN_NAME} of=${DEVICE_NAME} bs=${PATTERN_BLOCK_SIZE} \ | |
223 | count=${PATTERN_BLOCK_COUNT} oflag=direct &>/dev/null | |
224 | return $? | |
225 | } | |
226 | ||
227 | pattern_write_bg() { | |
228 | local PATTERN_NAME=$1 | |
229 | local PATTERN_BLOCK_SIZE=$2 | |
230 | local PATTERN_BLOCK_COUNT=$3 | |
231 | local DEVICE_NAME=$4 | |
232 | ||
233 | dd if=${PATTERN_NAME} of=${DEVICE_NAME} bs=${PATTERN_BLOCK_SIZE} \ | |
234 | count=${PATTERN_BLOCK_COUNT} oflag=direct &>/dev/null & | |
235 | return $? | |
236 | } | |
237 | ||
238 | pattern_verify() { | |
239 | local PATTERN_NAME=$1 | |
240 | local PATTERN_BLOCK_SIZE=$2 | |
241 | local PATTERN_BLOCK_COUNT=$3 | |
242 | local DEVICE_NAME=$4 | |
243 | local DEVICE_FILE=`mktemp -p /tmp zpool.pattern.XXXXXXXX` | |
244 | ||
245 | dd if=${DEVICE_NAME} of=${DEVICE_FILE} bs=${PATTERN_BLOCK_SIZE} \ | |
246 | count=${PATTERN_BLOCK_COUNT} iflag=direct &>/dev/null | |
247 | cmp -s ${PATTERN_NAME} ${DEVICE_FILE} | |
248 | RC=$? | |
249 | rm -f ${DEVICE_FILE} | |
250 | ||
251 | return ${RC} | |
252 | } | |
253 | ||
254 | pattern_remove() { | |
255 | local PATTERN_NAME=$1 | |
256 | ||
257 | rm -f ${PATTERN_NAME} | |
258 | return $? | |
259 | } | |
260 | ||
261 | fault_set_md() { | |
262 | local VDEV_FAULTY=$1 | |
263 | local FAULT_TYPE=$2 | |
264 | ||
265 | ${MDADM} /dev/${VDEV_FAULTY} --grow --level=faulty \ | |
266 | --layout=${FAULT_TYPE} >/dev/null | |
267 | return $? | |
268 | } | |
269 | ||
270 | fault_clear_md() { | |
271 | local VDEV_FAULTY=$1 | |
272 | ||
273 | # Clear all failure injection. | |
274 | ${MDADM} /dev/${VDEV_FAULTY} --grow --level=faulty \ | |
275 | --layout=clear >/dev/null || return $? | |
276 | ${MDADM} /dev/${VDEV_FAULTY} --grow --level=faulty \ | |
277 | --layout=flush >/dev/null || return $? | |
278 | return $? | |
279 | } | |
280 | ||
281 | fault_set_sd() { | |
282 | local OPTS=$1 | |
283 | local NTH=$2 | |
284 | ||
285 | echo ${OPTS} >/sys/bus/pseudo/drivers/scsi_debug/opts | |
286 | echo ${NTH} >/sys/bus/pseudo/drivers/scsi_debug/every_nth | |
287 | } | |
288 | ||
289 | fault_clear_sd() { | |
290 | echo 0 >/sys/bus/pseudo/drivers/scsi_debug/every_nth | |
291 | echo 0 >/sys/bus/pseudo/drivers/scsi_debug/opts | |
292 | } | |
293 | ||
294 | test_setup() { | |
295 | local POOL_NAME=$1 | |
296 | local POOL_CONFIG=$2 | |
297 | local ZVOL_NAME=$3 | |
298 | local TMP_CACHE=$4 | |
299 | ||
300 | ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1 | |
301 | ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c ${POOL_CONFIG} || fail 2 | |
302 | ${ZFS} create -V 64M ${POOL_NAME}/${ZVOL_NAME} || fail 3 | |
303 | ||
304 | # Trigger udev and re-read the partition table to ensure all of | |
305 | # this IO is out of the way before we begin injecting failures. | |
306 | udev_trigger || fail 4 | |
307 | ${BLOCKDEV} --rereadpt /dev/${POOL_NAME}/${ZVOL_NAME} || fail 5 | |
308 | } | |
309 | ||
310 | test_cleanup() { | |
311 | local POOL_NAME=$1 | |
312 | local POOL_CONFIG=$2 | |
313 | local ZVOL_NAME=$3 | |
314 | local TMP_CACHE=$4 | |
315 | ||
316 | ${ZFS} destroy ${POOL_NAME}/${ZVOL_NAME} || fail 101 | |
317 | ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c ${POOL_CONFIG} -d || fail 102 | |
318 | ${ZFS_SH} -u || fail 103 | |
319 | rm -f ${TMP_CACHE} || fail 104 | |
320 | } | |
321 | ||
322 | test_write_soft() { | |
323 | local POOL_NAME=$1 | |
324 | local POOL_CONFIG=$2 | |
325 | local POOL_REDUNDANT=$3 | |
326 | local ZVOL_NAME="zvol" | |
327 | local ZVOL_DEVICE="/dev/${POOL_NAME}/${ZVOL_NAME}" | |
328 | ||
329 | if [ ${MD_PARTITIONABLE} -eq 0 ]; then | |
330 | skip_nonewline | |
331 | return | |
332 | fi | |
333 | ||
334 | local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` | |
335 | test_setup ${POOL_NAME} ${POOL_CONFIG} ${ZVOL_NAME} ${TMP_CACHE} | |
336 | ||
337 | # Set soft write failure for first vdev device. | |
338 | local VDEV_FAULTY=`nth_zpool_vdev ${POOL_NAME} md 1` | |
339 | fault_set_md ${VDEV_FAULTY} write-transient | |
340 | ||
341 | # The application must not observe an error. | |
342 | local TMP_PATTERN=`pattern_create 1M 8` || fail 11 | |
343 | pattern_write ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} || fail 12 | |
344 | fault_clear_md ${VDEV_FAULTY} | |
345 | ||
346 | # Soft errors will not be logged to 'zpool status' | |
347 | local WRITE_ERRORS=`vdev_write_errors ${POOL_NAME} ${VDEV_FAULTY}` | |
348 | test ${WRITE_ERRORS} -eq 0 || fail 13 | |
349 | ||
350 | # Soft errors will still generate an EIO (5) event. | |
351 | test `zpool_event "zfs.io" "zio_err"` = "0x5" || fail 14 | |
352 | ||
353 | # Verify the known pattern. | |
354 | pattern_verify ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} || fail 15 | |
355 | pattern_remove ${TMP_PATTERN} || fail 16 | |
356 | ||
357 | test_cleanup ${POOL_NAME} ${POOL_CONFIG} ${ZVOL_NAME} ${TMP_CACHE} | |
358 | pass_nonewline | |
359 | } | |
360 | ||
361 | # Soft write error. | |
362 | test_1() { | |
363 | test_write_soft tank lo-faulty-raid0 0 | |
364 | test_write_soft tank lo-faulty-raid10 1 | |
365 | test_write_soft tank lo-faulty-raidz 1 | |
366 | test_write_soft tank lo-faulty-raidz2 1 | |
367 | test_write_soft tank lo-faulty-raidz3 1 | |
368 | echo | |
369 | } | |
370 | run_test 1 "soft write error" | |
371 | ||
372 | test_write_hard() { | |
373 | local POOL_NAME=$1 | |
374 | local POOL_CONFIG=$2 | |
375 | local POOL_REDUNDANT=$3 | |
376 | local ZVOL_NAME="zvol" | |
377 | local ZVOL_DEVICE="/dev/${POOL_NAME}/${ZVOL_NAME}" | |
378 | ||
379 | if [ ${MD_PARTITIONABLE} -eq 0 ]; then | |
380 | skip_nonewline | |
381 | return | |
382 | fi | |
383 | ||
384 | local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` | |
385 | test_setup ${POOL_NAME} ${POOL_CONFIG} ${ZVOL_NAME} ${TMP_CACHE} | |
386 | ||
387 | # Set hard write failure for first vdev device. | |
388 | local VDEV_FAULTY=`nth_zpool_vdev ${POOL_NAME} md 1` | |
389 | fault_set_md ${VDEV_FAULTY} write-persistent | |
390 | ||
391 | # The application must not observe an error. | |
392 | local TMP_PATTERN=`pattern_create 1M 8` || fail 11 | |
393 | pattern_write ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} || fail 12 | |
394 | fault_clear_md ${VDEV_FAULTY} | |
395 | ||
396 | local WRITE_ERRORS=`vdev_write_errors ${POOL_NAME} ${VDEV_FAULTY}` | |
397 | if [ ${POOL_REDUNDANT} -eq 1 ]; then | |
398 | # For redundant configurations hard errors will not be | |
399 | # logged to 'zpool status' but will generate EIO events. | |
400 | test ${WRITE_ERRORS} -eq 0 || fail 21 | |
401 | test `zpool_event "zfs.io" "zio_err"` = "0x5" || fail 22 | |
402 | else | |
403 | # For non-redundant configurations hard errors will be | |
404 | # logged to 'zpool status' and generate EIO events. They | |
405 | # will also trigger a scrub of the impacted sectors. | |
406 | sleep 10 | |
407 | test ${WRITE_ERRORS} -gt 0 || fail 31 | |
408 | test `zpool_event "zfs.io" "zio_err"` = "0x5" || fail 32 | |
409 | test `zpool_event "zfs.resilver.start" "ena"` != "" || fail 33 | |
410 | test `zpool_event "zfs.resilver.finish" "ena"` != "" || fail 34 | |
411 | test `zpool_scan_errors ${POOL_NAME}` -eq 0 || fail 35 | |
412 | fi | |
413 | ||
414 | # Verify the known pattern. | |
415 | pattern_verify ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} || fail 41 | |
416 | pattern_remove ${TMP_PATTERN} || fail 42 | |
417 | ||
418 | test_cleanup ${POOL_NAME} ${POOL_CONFIG} ${ZVOL_NAME} ${TMP_CACHE} | |
419 | pass_nonewline | |
420 | } | |
421 | ||
422 | # Hard write error. | |
423 | test_2() { | |
424 | test_write_hard tank lo-faulty-raid0 0 | |
425 | test_write_hard tank lo-faulty-raid10 1 | |
426 | test_write_hard tank lo-faulty-raidz 1 | |
427 | test_write_hard tank lo-faulty-raidz2 1 | |
428 | test_write_hard tank lo-faulty-raidz3 1 | |
429 | echo | |
430 | } | |
431 | run_test 2 "hard write error" | |
432 | ||
433 | test_write_all() { | |
434 | local POOL_NAME=$1 | |
435 | local POOL_CONFIG=$2 | |
436 | local POOL_REDUNDANT=$3 | |
437 | local ZVOL_NAME="zvol" | |
438 | local ZVOL_DEVICE="/dev/${POOL_NAME}/${ZVOL_NAME}" | |
439 | ||
440 | if [ ${MD_PARTITIONABLE} -eq 0 ]; then | |
441 | skip_nonewline | |
442 | return | |
443 | fi | |
444 | ||
445 | local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` | |
446 | test_setup ${POOL_NAME} ${POOL_CONFIG} ${ZVOL_NAME} ${TMP_CACHE} | |
447 | ||
448 | # Set all write failures for first vdev device. | |
449 | local VDEV_FAULTY=`nth_zpool_vdev ${POOL_NAME} md 1` | |
450 | fault_set_md ${VDEV_FAULTY} write-all | |
451 | ||
452 | local TMP_PATTERN=`pattern_create 1M 8` || fail 11 | |
453 | if [ ${POOL_REDUNDANT} -eq 1 ]; then | |
454 | # The application must not observe an error. | |
455 | pattern_write ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} || fail 12 | |
456 | else | |
457 | # The application is expected to hang in the background until | |
458 | # the faulty device is repaired and 'zpool clear' is run. | |
459 | pattern_write_bg ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} || fail 13 | |
460 | sleep 10 | |
461 | fi | |
462 | fault_clear_md ${VDEV_FAULTY} | |
463 | ||
464 | local WRITE_ERRORS=`vdev_write_errors ${POOL_NAME} ${VDEV_FAULTY}` | |
465 | local VDEV_STATUS=`vdev_status ${POOL_NAME} ${VDEV_FAULTY}` | |
466 | local POOL_STATE=`zpool_state ${POOL_NAME}` | |
467 | # For all configurations write errors are logged to 'zpool status', | |
468 | # and EIO events are generated. However, only a redundant config | |
469 | # will cause the vdev to be FAULTED and pool DEGRADED. In a non- | |
470 | # redundant config the IO will hang until 'zpool clear' is run. | |
471 | test ${WRITE_ERRORS} -gt 0 || fail 14 | |
472 | test `zpool_event "zfs.io" "zio_err"` = "0x5" || fail 15 | |
473 | ||
474 | if [ ${POOL_REDUNDANT} -eq 1 ]; then | |
475 | test "${VDEV_STATUS}" = "FAULTED" || fail 21 | |
476 | test "${POOL_STATE}" = "DEGRADED" || fail 22 | |
477 | else | |
478 | BLOCKED=`ps a | grep "${ZVOL_DEVICE}" | grep -c -v "grep"` | |
479 | ${ZPOOL} clear ${POOL_NAME} || fail 31 | |
480 | test ${BLOCKED} -eq 1 || fail 32 | |
481 | wait | |
482 | fi | |
483 | ||
484 | # Verify the known pattern. | |
485 | pattern_verify ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} || fail 41 | |
486 | pattern_remove ${TMP_PATTERN} || fail 42 | |
487 | ||
488 | test_cleanup ${POOL_NAME} ${POOL_CONFIG} ${ZVOL_NAME} ${TMP_CACHE} | |
489 | pass_nonewline | |
490 | } | |
491 | ||
492 | # All write errors. | |
493 | test_3() { | |
494 | test_write_all tank lo-faulty-raid0 0 | |
495 | test_write_all tank lo-faulty-raid10 1 | |
496 | test_write_all tank lo-faulty-raidz 1 | |
497 | test_write_all tank lo-faulty-raidz2 1 | |
498 | test_write_all tank lo-faulty-raidz3 1 | |
499 | echo | |
500 | } | |
501 | run_test 3 "all write errors" | |
502 | ||
503 | test_read_soft() { | |
504 | local POOL_NAME=$1 | |
505 | local POOL_CONFIG=$2 | |
506 | local POOL_REDUNDANT=$3 | |
507 | local ZVOL_NAME="zvol" | |
508 | local ZVOL_DEVICE="/dev/${POOL_NAME}/${ZVOL_NAME}" | |
509 | local READ_ERRORS=0 | |
510 | ||
511 | if [ ${MD_PARTITIONABLE} -eq 0 ]; then | |
512 | skip_nonewline | |
513 | return | |
514 | fi | |
515 | ||
516 | local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` | |
517 | test_setup ${POOL_NAME} ${POOL_CONFIG} ${ZVOL_NAME} ${TMP_CACHE} | |
518 | ||
519 | # Create a pattern to be verified during a read error. | |
520 | local TMP_PATTERN=`pattern_create 1M 8` || fail 11 | |
521 | pattern_write ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} || fail 12 | |
522 | ||
523 | # Set soft read failure for all the vdevs to ensure we hit it. | |
524 | for (( i=1; i<=4; i++ )); do | |
525 | fault_set_md `nth_zpool_vdev ${POOL_NAME} md $i` read-transient | |
526 | done | |
527 | ||
528 | pattern_verify ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} || fail 13 | |
529 | pattern_remove ${TMP_PATTERN} || fail 14 | |
530 | ||
531 | # Clear all failure injection and sum read errors. | |
532 | for (( i=1; i<=4; i++ )); do | |
533 | local VDEV_FAULTY=`nth_zpool_vdev ${POOL_NAME} md $i` | |
534 | local VDEV_ERRORS=`vdev_read_errors ${POOL_NAME} ${VDEV_FAULTY}` | |
535 | let READ_ERRORS=${READ_ERRORS}+${VDEV_ERRORS} | |
536 | fault_clear_md ${VDEV_FAULTY} | |
537 | done | |
538 | ||
539 | # Soft errors will not be logged to 'zpool status'. | |
540 | test ${READ_ERRORS} -eq 0 || fail 15 | |
541 | ||
542 | # Soft errors will still generate an EIO (5) event. | |
543 | test `zpool_event "zfs.io" "zio_err"` = "0x5" || fail 16 | |
544 | ||
545 | test_cleanup ${POOL_NAME} ${POOL_CONFIG} ${ZVOL_NAME} ${TMP_CACHE} | |
546 | pass_nonewline | |
547 | } | |
548 | ||
549 | # Soft read error. | |
550 | test_4() { | |
551 | test_read_soft tank lo-faulty-raid0 0 | |
552 | test_read_soft tank lo-faulty-raid10 1 | |
553 | test_read_soft tank lo-faulty-raidz 1 | |
554 | test_read_soft tank lo-faulty-raidz2 1 | |
555 | test_read_soft tank lo-faulty-raidz3 1 | |
556 | echo | |
557 | } | |
558 | run_test 4 "soft read error" | |
559 | ||
560 | test_read_hard() { | |
561 | local POOL_NAME=$1 | |
562 | local POOL_CONFIG=$2 | |
563 | local POOL_REDUNDANT=$3 | |
564 | local ZVOL_NAME="zvol" | |
565 | local ZVOL_DEVICE="/dev/${POOL_NAME}/${ZVOL_NAME}" | |
566 | local READ_ERRORS=0 | |
567 | ||
568 | if [ ${MD_PARTITIONABLE} -eq 0 ]; then | |
569 | skip_nonewline | |
570 | return | |
571 | fi | |
572 | ||
573 | local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` | |
574 | test_setup ${POOL_NAME} ${POOL_CONFIG} ${ZVOL_NAME} ${TMP_CACHE} | |
575 | ||
576 | # Create a pattern to be verified during a read error. | |
577 | local TMP_PATTERN=`pattern_create 1M 8` || fail 11 | |
578 | pattern_write ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} || fail 12 | |
579 | ||
580 | # Set hard read failure for the fourth vdev. | |
581 | local VDEV_FAULTY=`nth_zpool_vdev ${POOL_NAME} md 4` | |
582 | fault_set_md ${VDEV_FAULTY} read-persistent | |
583 | ||
584 | # For a redundant pool there must be no IO error, for a non-redundant | |
585 | # pool we expect permanent damage and an IO error during verify, unless | |
586 | # we get exceptionally lucky and have just damaged redundant metadata. | |
587 | if [ ${POOL_REDUNDANT} -eq 1 ]; then | |
588 | pattern_verify ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} || fail 21 | |
589 | local READ_ERRORS=`vdev_read_errors ${POOL_NAME} ${VDEV_FAULTY}` | |
590 | test ${READ_ERRORS} -eq 0 || fail 22 | |
591 | else | |
592 | pattern_verify ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} | |
593 | ${ZPOOL} scrub ${POOL_NAME} || fail 32 | |
594 | local READ_ERRORS=`vdev_read_errors ${POOL_NAME} ${VDEV_FAULTY}` | |
595 | test ${READ_ERRORS} -gt 0 || fail 33 | |
596 | ${ZPOOL} status -v ${POOL_NAME} | \ | |
597 | grep -A8 "Permanent errors" | \ | |
598 | grep -q "${POOL_NAME}" || fail 34 | |
599 | fi | |
600 | pattern_remove ${TMP_PATTERN} || fail 41 | |
601 | ||
602 | # Clear all failure injection and sum read errors. | |
603 | fault_clear_md ${VDEV_FAULTY} | |
604 | ||
605 | # Hard errors will generate an EIO (5) event. | |
606 | test `zpool_event "zfs.io" "zio_err"` = "0x5" || fail 42 | |
607 | ||
608 | test_cleanup ${POOL_NAME} ${POOL_CONFIG} ${ZVOL_NAME} ${TMP_CACHE} | |
609 | pass_nonewline | |
610 | } | |
611 | ||
612 | # Hard read error. | |
613 | test_5() { | |
614 | test_read_hard tank lo-faulty-raid0 0 | |
615 | test_read_hard tank lo-faulty-raid10 1 | |
616 | test_read_hard tank lo-faulty-raidz 1 | |
617 | test_read_hard tank lo-faulty-raidz2 1 | |
618 | test_read_hard tank lo-faulty-raidz3 1 | |
619 | echo | |
620 | } | |
621 | run_test 5 "hard read error" | |
622 | ||
623 | # Fixable read error. | |
624 | test_read_fixable() { | |
625 | local POOL_NAME=$1 | |
626 | local POOL_CONFIG=$2 | |
627 | local POOL_REDUNDANT=$3 | |
628 | local ZVOL_NAME="zvol" | |
629 | local ZVOL_DEVICE="/dev/${POOL_NAME}/${ZVOL_NAME}" | |
630 | local READ_ERRORS=0 | |
631 | ||
632 | if [ ${MD_PARTITIONABLE} -eq 0 ]; then | |
633 | skip_nonewline | |
634 | return | |
635 | fi | |
636 | ||
637 | local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` | |
638 | test_setup ${POOL_NAME} ${POOL_CONFIG} ${ZVOL_NAME} ${TMP_CACHE} | |
639 | ||
640 | # Create a pattern to be verified during a read error. | |
641 | local TMP_PATTERN=`pattern_create 1M 8` || fail 11 | |
642 | pattern_write ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} || fail 12 | |
643 | ||
644 | # Set hard read failure for the fourth vdev. | |
645 | local VDEV_FAULTY=`nth_zpool_vdev ${POOL_NAME} md 4` | |
646 | fault_set_md ${VDEV_FAULTY} read-fixable | |
647 | ||
648 | # For a redundant pool there must be no IO error, for a non-redundant | |
649 | # pool we expect permanent damage and an IO error during verify, unless | |
650 | # we get exceptionally lucky and have just damaged redundant metadata. | |
651 | if [ ${POOL_REDUNDANT} -eq 1 ]; then | |
652 | pattern_verify ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} || fail 21 | |
653 | local READ_ERRORS=`vdev_read_errors ${POOL_NAME} ${VDEV_FAULTY}` | |
654 | test ${READ_ERRORS} -eq 0 || fail 22 | |
655 | else | |
656 | pattern_verify ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} | |
657 | ${ZPOOL} scrub ${POOL_NAME} || fail 32 | |
658 | local READ_ERRORS=`vdev_read_errors ${POOL_NAME} ${VDEV_FAULTY}` | |
659 | test ${READ_ERRORS} -gt 0 || fail 33 | |
660 | ${ZPOOL} status -v ${POOL_NAME} | \ | |
661 | grep -A8 "Permanent errors" | \ | |
662 | grep -q "${POOL_NAME}" || fail 34 | |
663 | fi | |
664 | pattern_remove ${TMP_PATTERN} || fail 41 | |
665 | ||
666 | # Clear all failure injection and sum read errors. | |
667 | fault_clear_md ${VDEV_FAULTY} | |
668 | ||
669 | # Hard errors will generate an EIO (5) event. | |
670 | test `zpool_event "zfs.io" "zio_err"` = "0x5" || fail 42 | |
671 | ||
672 | test_cleanup ${POOL_NAME} ${POOL_CONFIG} ${ZVOL_NAME} ${TMP_CACHE} | |
673 | pass_nonewline | |
674 | } | |
675 | ||
676 | # Read errors fixable with a write. | |
677 | test_6() { | |
678 | test_read_fixable tank lo-faulty-raid0 0 | |
679 | test_read_fixable tank lo-faulty-raid10 1 | |
680 | test_read_fixable tank lo-faulty-raidz 1 | |
681 | test_read_fixable tank lo-faulty-raidz2 1 | |
682 | test_read_fixable tank lo-faulty-raidz3 1 | |
683 | echo | |
684 | } | |
685 | run_test 6 "fixable read error" | |
686 | ||
687 | test_cksum() { | |
688 | local POOL_NAME=$1 | |
689 | local POOL_CONFIG=$2 | |
690 | local POOL_REDUNDANT=$3 | |
691 | local VDEV_DAMAGE="$4" | |
692 | local ZVOL_NAME="zvol" | |
693 | local ZVOL_DEVICE="/dev/${POOL_NAME}/${ZVOL_NAME}" | |
694 | ||
695 | if [ ${MD_PARTITIONABLE} -eq 0 ]; then | |
696 | skip_nonewline | |
697 | return | |
698 | fi | |
699 | ||
700 | local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` | |
701 | test_setup ${POOL_NAME} ${POOL_CONFIG} ${ZVOL_NAME} ${TMP_CACHE} | |
702 | ||
703 | # Create a pattern to be verified. | |
704 | local TMP_PATTERN=`pattern_create 1M 8` || fail 11 | |
705 | pattern_write ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} || fail 12 | |
706 | ||
707 | # Verify the pattern and that no vdev has cksum errors. | |
708 | pattern_verify ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} || fail 13 | |
709 | for (( i=1; i<4; i++ )); do | |
710 | VDEV_FAULTY=`nth_zpool_vdev ${POOL_NAME} md ${i}` | |
711 | CKSUM_ERRORS=`vdev_cksum_errors ${POOL_NAME} ${VDEV_FAULTY}` | |
712 | test ${CKSUM_ERRORS} -eq 0 || fail 14 | |
713 | done | |
714 | ||
715 | # Corrupt the bulk of a vdev with random garbage, we damage as many | |
716 | # vdevs as we have levels of redundancy. For example for a raidz3 | |
717 | # configuration we can trash 3 vdevs and still expect correct data. | |
718 | # This improves the odds that we read one of the damaged vdevs. | |
719 | for VDEV in ${VDEV_DAMAGE}; do | |
720 | VDEV_FAULTY=`nth_zpool_vdev ${POOL_NAME} md $VDEV` | |
721 | pattern_write /dev/urandom 1M 64 /dev/${VDEV_FAULTY}p1 | |
722 | done | |
723 | ||
724 | # Verify the pattern is still correct. For non-redundant pools | |
725 | # expect failure and for redundant pools success due to resilvering. | |
726 | if [ ${POOL_REDUNDANT} -eq 1 ]; then | |
727 | pattern_verify ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} || fail 16 | |
728 | else | |
729 | pattern_verify ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} && fail 17 | |
730 | fi | |
731 | ||
732 | CKSUM_ERRORS=`vdev_cksum_errors ${POOL_NAME} ${VDEV_FAULTY}` | |
733 | test ${CKSUM_ERRORS} -gt 0 || fail 18 | |
734 | STATUS=`vdev_status ${POOL_NAME} ${VDEV_FAULTY}` | |
735 | test "${STATUS}" = "ONLINE" || fail 19 | |
736 | ||
737 | # The checksum errors must be logged as an event. | |
738 | local CKSUM_ERRORS=`zpool_event "zfs.checksum" "zio_err"` | |
739 | test ${CKSUM_ERRORS} = "0x34" || test ${CKSUM_ERRORS} = "0x0" || fail 20 | |
740 | ||
741 | # Verify permant errors for non-redundant pools, and for redundant | |
742 | # pools trigger a scrub and check that all checksums have been fixed. | |
743 | if [ ${POOL_REDUNDANT} -eq 1 ]; then | |
744 | # Scrub the checksum errors and clear the faults. | |
745 | ${ZPOOL} scrub ${POOL_NAME} || fail 21 | |
746 | sleep 3 | |
747 | ${ZPOOL} clear ${POOL_NAME} || fail 22 | |
748 | ||
749 | # Re-verify the pattern for fixed checksums. | |
750 | pattern_verify ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} || fail 23 | |
751 | CKSUM_ERRORS=`vdev_cksum_errors ${POOL_NAME} ${VDEV_FAULTY}` | |
752 | test ${CKSUM_ERRORS} -eq 0 || fail 24 | |
753 | ||
754 | # Re-verify the entire pool for fixed checksums. | |
755 | ${ZPOOL} scrub ${POOL_NAME} || fail 25 | |
756 | CKSUM_ERRORS=`vdev_cksum_errors ${POOL_NAME} ${VDEV_FAULTY}` | |
757 | test ${CKSUM_ERRORS} -eq 0 || fail 26 | |
758 | else | |
759 | ${ZPOOL} status -v ${POOL_NAME} | \ | |
760 | grep -A8 "Permanent errors" | \ | |
761 | grep -q "${POOL_NAME}/${ZVOL_NAME}" || fail 31 | |
762 | ${ZPOOL} clear ${POOL_NAME} || fail 32 | |
763 | fi | |
764 | pattern_remove ${TMP_PATTERN} || fail 41 | |
765 | ||
766 | test_cleanup ${POOL_NAME} ${POOL_CONFIG} ${ZVOL_NAME} ${TMP_CACHE} | |
767 | pass_nonewline | |
768 | } | |
769 | ||
770 | # Silent data corruption | |
771 | test_7() { | |
772 | test_cksum tank lo-faulty-raid0 0 "1" | |
773 | test_cksum tank lo-faulty-raid10 1 "1 3" | |
774 | test_cksum tank lo-faulty-raidz 1 "4" | |
775 | test_cksum tank lo-faulty-raidz2 1 "3 4" | |
776 | test_cksum tank lo-faulty-raidz3 1 "2 3 4" | |
777 | echo | |
778 | } | |
779 | run_test 7 "silent data corruption" | |
780 | ||
781 | # Soft write timeout at the scsi device layer. | |
782 | test_write_timeout_soft() { | |
783 | local POOL_NAME=$1 | |
784 | local POOL_CONFIG=$2 | |
785 | local POOL_REDUNDANT=$3 | |
786 | local POOL_NTH=$4 | |
787 | local ZVOL_NAME="zvol" | |
788 | local ZVOL_DEVICE="/dev/${POOL_NAME}/${ZVOL_NAME}" | |
789 | ||
790 | if [ ${SCSI_DEBUG} -eq 0 ]; then | |
791 | skip_nonewline | |
792 | return | |
793 | fi | |
794 | ||
795 | local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` | |
796 | test_setup ${POOL_NAME} ${POOL_CONFIG} ${ZVOL_NAME} ${TMP_CACHE} | |
797 | ||
798 | # Set timeout(0x4) for every nth command. | |
799 | fault_set_sd 4 ${POOL_NTH} | |
800 | ||
801 | # The application must not observe an error. | |
802 | local TMP_PATTERN=`pattern_create 1M 8` || fail 11 | |
803 | pattern_write ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} || fail 12 | |
804 | fault_clear_sd | |
805 | ||
806 | # Intermittent write timeouts even with FAILFAST set may not cause | |
807 | # an EIO (5) event. This is because how FAILFAST is handled depends | |
808 | # a log on the low level driver and the exact nature of the failure. | |
809 | # We will however see a 'zfs.delay' event logged due to the timeout. | |
810 | VDEV_DELAY=`zpool_event "zfs.delay" "zio_delay"` | |
811 | test `printf "%d" ${VDEV_DELAY}` -ge 30000 || fail 13 | |
812 | ||
813 | # Verify the known pattern. | |
814 | pattern_verify ${TMP_PATTERN} 1M 8 ${ZVOL_DEVICE} || fail 14 | |
815 | pattern_remove ${TMP_PATTERN} || fail 15 | |
816 | ||
817 | test_cleanup ${POOL_NAME} ${POOL_CONFIG} ${ZVOL_NAME} ${TMP_CACHE} | |
818 | pass_nonewline | |
819 | } | |
820 | ||
821 | test_8() { | |
822 | test_write_timeout_soft tank scsi_debug-raid0 0 50 | |
823 | test_write_timeout_soft tank scsi_debug-raid10 1 100 | |
824 | test_write_timeout_soft tank scsi_debug-raidz 1 75 | |
825 | test_write_timeout_soft tank scsi_debug-raidz2 1 150 | |
826 | test_write_timeout_soft tank scsi_debug-raidz3 1 300 | |
827 | echo | |
828 | } | |
829 | run_test 8 "soft write timeout" | |
830 | ||
831 | # Persistent write timeout at the scsi device layer. | |
832 | test_write_timeout_hard() { | |
833 | local POOL_NAME=$1 | |
834 | local POOL_CONFIG=$2 | |
835 | local POOL_REDUNDANT=$3 | |
836 | local POOL_NTH=$4 | |
837 | local ZVOL_NAME="zvol" | |
838 | local ZVOL_DEVICE="/dev/${POOL_NAME}/${ZVOL_NAME}" | |
839 | local RESCAN=1 | |
840 | ||
841 | if [ ${SCSI_DEBUG} -eq 0 ]; then | |
842 | skip_nonewline | |
843 | return | |
844 | fi | |
845 | ||
846 | local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` | |
847 | test_setup ${POOL_NAME} ${POOL_CONFIG} ${ZVOL_NAME} ${TMP_CACHE} | |
848 | ||
849 | local TMP_PATTERN1=`pattern_create 1M 8` | |
850 | local TMP_PATTERN2=`pattern_create 1M 8` | |
851 | local TMP_PATTERN3=`pattern_create 1M 8` | |
852 | ||
853 | # Create three partitions each one gets a unique pattern. The first | |
854 | # pattern is written before the failure, the second pattern during | |
855 | # the failure, and the third pattern while the vdev is degraded. | |
856 | # All three patterns are verified while the vdev is degraded and | |
857 | # then again once it is brought back online. | |
858 | ${PARTED} -s ${ZVOL_DEVICE} mklabel gpt || fail 11 | |
859 | ${PARTED} -s ${ZVOL_DEVICE} mkpart primary 1M 16M || fail 12 | |
860 | ${PARTED} -s ${ZVOL_DEVICE} mkpart primary 16M 32M || fail 13 | |
861 | ${PARTED} -s ${ZVOL_DEVICE} mkpart primary 32M 48M || fail 14 | |
862 | ||
863 | wait_udev ${ZVOL_DEVICE}1 30 | |
864 | wait_udev ${ZVOL_DEVICE}2 30 | |
865 | wait_udev ${ZVOL_DEVICE}3 30 | |
866 | ||
867 | # Before the failure. | |
868 | pattern_write ${TMP_PATTERN1} 1M 8 ${ZVOL_DEVICE}1 || fail 15 | |
869 | ||
870 | # Get the faulty vdev name. | |
871 | local VDEV_FAULTY=`nth_zpool_vdev ${POOL_NAME} sd 1` | |
872 | ||
873 | # Set timeout(0x4) for every nth command. | |
874 | fault_set_sd 4 ${POOL_NTH} | |
875 | ||
876 | # During the failure. | |
877 | pattern_write ${TMP_PATTERN2} 1M 8 ${ZVOL_DEVICE}2 || fail 21 | |
878 | ||
879 | # Expect write errors to be logged to 'zpool status' | |
880 | local WRITE_ERRORS=`vdev_write_errors ${POOL_NAME} ${VDEV_FAULTY}` | |
881 | test ${WRITE_ERRORS} -gt 0 || fail 22 | |
882 | ||
883 | local VDEV_STATUS=`vdev_status ${POOL_NAME} ${VDEV_FAULTY}` | |
884 | test "${VDEV_STATUS}" = "UNAVAIL" || fail 23 | |
885 | ||
886 | # Clear the error and remove it from /dev/. | |
887 | fault_clear_sd | |
888 | rm -f /dev/${VDEV_FAULTY}[0-9] | |
889 | ||
890 | # Verify the first two patterns and write out the third. | |
891 | pattern_write ${TMP_PATTERN3} 1M 8 ${ZVOL_DEVICE}3 || fail 31 | |
892 | pattern_verify ${TMP_PATTERN1} 1M 8 ${ZVOL_DEVICE}1 || fail 32 | |
893 | pattern_verify ${TMP_PATTERN2} 1M 8 ${ZVOL_DEVICE}2 || fail 33 | |
894 | pattern_verify ${TMP_PATTERN3} 1M 8 ${ZVOL_DEVICE}3 || fail 34 | |
895 | ||
896 | # Bring the device back online by rescanning for it. It must appear | |
897 | # in lsscsi and be available to dd before allowing ZFS to bring it | |
898 | # online. This is not required but provides additional sanity. | |
899 | while [ ${RESCAN} -eq 1 ]; do | |
900 | scsi_rescan | |
901 | wait_udev /dev/${VDEV_FAULTY} 30 | |
902 | ||
903 | if [ `${LSSCSI} | grep -c "/dev/${VDEV_FAULTY}"` -eq 0 ]; then | |
904 | continue | |
905 | fi | |
906 | ||
907 | dd if=/dev/${VDEV_FAULTY} of=/dev/null bs=8M count=1 &>/dev/null | |
908 | if [ $? -ne 0 ]; then | |
909 | continue | |
910 | fi | |
911 | ||
912 | RESCAN=0 | |
913 | done | |
914 | ||
915 | # Bring the device back online. We expect it to be automatically | |
916 | # resilvered without error and we should see minimally the zfs.io, | |
917 | # zfs.statechange (VDEV_STATE_HEALTHY (0x7)), and zfs.resilver.* | |
918 | # events posted. | |
31165fd9 | 919 | ${ZPOOL} online ${POOL_NAME} ${VDEV_FAULTY} || fail 51 |
0ee8118b BB |
920 | sleep 3 |
921 | test `zpool_event "zfs.io" "zio_err"` = "0x5" || fail 52 | |
922 | test `zpool_event "zfs.statechange" "vdev_state"` = "0x7" || fail 53 | |
923 | test `zpool_event "zfs.resilver.start" "ena"` != "" || fail 54 | |
924 | test `zpool_event "zfs.resilver.finish" "ena"` != "" || fail 55 | |
925 | test `zpool_scan_errors ${POOL_NAME}` -eq 0 || fail 56 | |
926 | ||
927 | local VDEV_STATUS=`vdev_status ${POOL_NAME} ${VDEV_FAULTY}` | |
928 | test "${VDEV_STATUS}" = "ONLINE" || fail 57 | |
929 | ||
930 | # Verify the known pattern. | |
931 | pattern_verify ${TMP_PATTERN1} 1M 8 ${ZVOL_DEVICE}1 || fail 61 | |
932 | pattern_verify ${TMP_PATTERN2} 1M 8 ${ZVOL_DEVICE}2 || fail 62 | |
933 | pattern_verify ${TMP_PATTERN3} 1M 8 ${ZVOL_DEVICE}3 || fail 63 | |
934 | pattern_remove ${TMP_PATTERN1} || fail 64 | |
935 | pattern_remove ${TMP_PATTERN2} || fail 65 | |
936 | pattern_remove ${TMP_PATTERN3} || fail 66 | |
937 | ||
938 | test_cleanup ${POOL_NAME} ${POOL_CONFIG} ${ZVOL_NAME} ${TMP_CACHE} | |
939 | pass_nonewline | |
940 | } | |
941 | ||
942 | test_9() { | |
943 | skip_nonewline # Skip non-redundant config | |
944 | test_write_timeout_hard tank scsi_debug-raid10 1 -50 | |
945 | test_write_timeout_hard tank scsi_debug-raidz 1 -50 | |
946 | test_write_timeout_hard tank scsi_debug-raidz2 1 -50 | |
947 | test_write_timeout_hard tank scsi_debug-raidz3 1 -50 | |
948 | echo | |
949 | } | |
950 | run_test 9 "hard write timeout" | |
951 | ||
952 | exit 0 |