]> git.proxmox.com Git - mirror_zfs-debian.git/blob - scripts/zconfig.sh
Add initial autoconf products
[mirror_zfs-debian.git] / scripts / zconfig.sh
1 #!/bin/bash
2 #
3 # ZFS/ZPOOL configuration test script.
4
5 basedir="$(dirname $0)"
6
7 SCRIPT_COMMON=common.sh
8 if [ -f "${basedir}/${SCRIPT_COMMON}" ]; then
9 . "${basedir}/${SCRIPT_COMMON}"
10 else
11 echo "Missing helper script ${SCRIPT_COMMON}" && exit 1
12 fi
13
14 PROG=zconfig.sh
15
16 usage() {
17 cat << EOF
18 USAGE:
19 $0 [hvc]
20
21 DESCRIPTION:
22 ZFS/ZPOOL configuration tests
23
24 OPTIONS:
25 -h Show this message
26 -v Verbose
27 -c Cleanup lo+file devices at start
28
29 EOF
30 }
31
32 while getopts 'hvct:s:?' OPTION; do
33 case $OPTION in
34 h)
35 usage
36 exit 1
37 ;;
38 v)
39 VERBOSE=1
40 ;;
41 c)
42 CLEANUP=1
43 ;;
44 t)
45 TESTS_RUN=($OPTARG)
46 ;;
47 s)
48 TESTS_SKIP=($OPTARG)
49 ;;
50 ?)
51 usage
52 exit
53 ;;
54 esac
55 done
56
57 if [ $(id -u) != 0 ]; then
58 die "Must run as root"
59 fi
60
61 # Perform pre-cleanup is requested
62 if [ ${CLEANUP} ]; then
63 cleanup_loop_devices
64 rm -f /tmp/zpool.cache.*
65 fi
66
67 zconfig_partition() {
68 local DEVICE=$1
69 local START=$2
70 local END=$3
71 local TMP_FILE=`mktemp`
72
73 /sbin/sfdisk -q ${DEVICE} << EOF &>${TMP_FILE} || fail 4
74 ${START},${END}
75 ;
76 ;
77 ;
78 EOF
79
80 rm ${TMP_FILE}
81 }
82
83 # Validate persistent zpool.cache configuration.
84 test_1() {
85 local POOL_NAME=test1
86 local TMP_FILE1=`mktemp`
87 local TMP_FILE2=`mktemp`
88 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
89
90 # Create a pool save its status for comparison.
91 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
92 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
93 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3
94
95 # Unload/load the module stack and verify the pool persists.
96 ${ZFS_SH} -u || fail 4
97 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 5
98 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 6
99 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 7
100
101 # Cleanup the test pool and temporary files
102 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 8
103 rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_CACHE} || fail 9
104 ${ZFS_SH} -u || fail 10
105
106 pass
107 }
108 run_test 1 "persistent zpool.cache"
109
110 # Validate ZFS disk scanning and import w/out zpool.cache configuration.
111 test_2() {
112 local POOL_NAME=test2
113 local TMP_FILE1=`mktemp`
114 local TMP_FILE2=`mktemp`
115 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
116
117 # Create a pool save its status for comparison.
118 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
119 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
120 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3
121
122 # Unload the module stack, remove the cache file, load the module
123 # stack and attempt to probe the disks to import the pool. As
124 # a cross check verify the old pool state against the imported.
125 ${ZFS_SH} -u || fail 4
126 rm -f ${TMP_CACHE} || fail 5
127 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 6
128 ${ZPOOL} import | grep ${POOL_NAME} >/dev/null || fail 7
129 ${ZPOOL} import ${POOL_NAME} || fail 8
130 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 9
131 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 10
132
133 # Cleanup the test pool and temporary files
134 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 11
135 rm -f ${TMP_FILE1} ${TMP_FILE2} || fail 12
136 ${ZFS_SH} -u || fail 13
137
138 pass
139 }
140 run_test 2 "scan disks for pools to import"
141
142 zconfig_zvol_device_stat() {
143 local EXPECT=$1
144 local POOL_NAME=/dev/$2
145 local ZVOL_NAME=/dev/$3
146 local SNAP_NAME=/dev/$4
147 local CLONE_NAME=/dev/$5
148 local COUNT=0
149
150 # Briefly delay for udev
151 sleep 1
152
153 # Pool exists
154 stat ${POOL_NAME} &>/dev/null && let COUNT=$COUNT+1
155
156 # Volume and partitions
157 stat ${ZVOL_NAME} &>/dev/null && let COUNT=$COUNT+1
158 stat ${ZVOL_NAME}1 &>/dev/null && let COUNT=$COUNT+1
159 stat ${ZVOL_NAME}2 &>/dev/null && let COUNT=$COUNT+1
160
161 # Snapshot with partitions
162 stat ${SNAP_NAME} &>/dev/null && let COUNT=$COUNT+1
163 stat ${SNAP_NAME}1 &>/dev/null && let COUNT=$COUNT+1
164 stat ${SNAP_NAME}2 &>/dev/null && let COUNT=$COUNT+1
165
166 # Clone with partitions
167 stat ${CLONE_NAME} &>/dev/null && let COUNT=$COUNT+1
168 stat ${CLONE_NAME}1 &>/dev/null && let COUNT=$COUNT+1
169 stat ${CLONE_NAME}2 &>/dev/null && let COUNT=$COUNT+1
170
171 if [ $EXPECT -ne $COUNT ]; then
172 return 1
173 fi
174
175 return 0
176 }
177
178 # zpool import/export device check
179 # (1 volume, 2 partitions, 1 snapshot, 1 clone)
180 test_3() {
181 local POOL_NAME=tank
182 local ZVOL_NAME=volume
183 local SNAP_NAME=snap
184 local CLONE_NAME=clone
185 local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
186 local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
187 local FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
188 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
189
190 # Create a pool, volume, partition, snapshot, and clone.
191 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
192 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
193 ${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3
194 zconfig_partition /dev/${FULL_ZVOL_NAME} 0 64 || fail 4
195 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5
196 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6
197
198 # Verify the devices were created
199 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
200 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 7
201
202 # Export the pool
203 ${ZPOOL} export ${POOL_NAME} || fail 8
204
205 # verify the devices were removed
206 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
207 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9
208
209 # Import the pool, wait 1 second for udev
210 ${ZPOOL} import ${POOL_NAME} || fail 10
211
212 # Verify the devices were created
213 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
214 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 11
215
216 # Destroy the pool and consequently the devices
217 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 12
218
219 # verify the devices were removed
220 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
221 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 13
222
223 ${ZFS_SH} -u || fail 14
224 rm -f ${TMP_CACHE} || fail 15
225
226 pass
227 }
228 run_test 3 "zpool import/export device"
229
230 # zpool insmod/rmmod device check (1 volume, 1 snapshot, 1 clone)
231 test_4() {
232 POOL_NAME=tank
233 ZVOL_NAME=volume
234 SNAP_NAME=snap
235 CLONE_NAME=clone
236 FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
237 FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
238 FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
239 TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
240
241 # Create a pool, volume, snapshot, and clone
242 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
243 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
244 ${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3
245 zconfig_partition /dev/${FULL_ZVOL_NAME} 0 64 || fail 4
246 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5
247 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6
248
249 # Verify the devices were created
250 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
251 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 7
252
253 # Unload the modules
254 ${ZFS_SH} -u || fail 8
255
256 # Verify the devices were removed
257 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
258 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9
259
260 # Load the modules, wait 1 second for udev
261 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 10
262
263 # Verify the devices were created
264 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
265 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 11
266
267 # Destroy the pool and consequently the devices
268 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 12
269
270 # Verify the devices were removed
271 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
272 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 13
273
274 ${ZFS_SH} -u || fail 14
275 rm -f ${TMP_CACHE} || fail 15
276
277 pass
278 }
279 run_test 4 "zpool insmod/rmmod device"
280
281 # ZVOL volume sanity check
282 test_5() {
283 local POOL_NAME=tank
284 local ZVOL_NAME=fish
285 local FULL_NAME=${POOL_NAME}/${ZVOL_NAME}
286 local SRC_DIR=/bin/
287 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
288
289 # Create a pool and volume.
290 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
291 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
292 ${ZFS} create -V 400M ${FULL_NAME} || fail 3
293
294 # Partition the volume, for a 400M volume there will be
295 # 812 cylinders, 16 heads, and 63 sectors per track.
296 zconfig_partition /dev/${FULL_NAME} 0 812
297
298 # Format the partition with ext3.
299 /sbin/mkfs.ext3 -q /dev/${FULL_NAME}1 || fail 5
300
301 # Mount the ext3 filesystem and copy some data to it.
302 mkdir -p /tmp/${ZVOL_NAME}1 || fail 6
303 mount /dev/${FULL_NAME}1 /tmp/${ZVOL_NAME}1 || fail 7
304 cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}1 || fail 8
305 sync
306
307 # Verify the copied files match the original files.
308 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}1${SRC_DIR} &>/dev/null || fail 9
309
310 # Remove the files, umount, destroy the volume and pool.
311 rm -Rf /tmp/${ZVOL_NAME}1${SRC_DIR}* || fail 10
312 umount /tmp/${ZVOL_NAME}1 || fail 11
313 rmdir /tmp/${ZVOL_NAME}1 || fail 12
314
315 ${ZFS} destroy ${FULL_NAME} || fail 13
316 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 14
317 ${ZFS_SH} -u || fail 15
318 rm -f ${TMP_CACHE} || fail 16
319
320 pass
321 }
322 run_test 5 "zvol+ext3 volume"
323
324 # ZVOL snapshot sanity check
325 test_6() {
326 local POOL_NAME=tank
327 local ZVOL_NAME=fish
328 local SNAP_NAME=pristine
329 local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
330 local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
331 local SRC_DIR=/bin/
332 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
333
334 # Create a pool and volume.
335 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
336 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
337 ${ZFS} create -V 400M ${FULL_ZVOL_NAME} || fail 3
338
339 # Partition the volume, for a 400M volume there will be
340 # 812 cylinders, 16 heads, and 63 sectors per track.
341 zconfig_partition /dev/${FULL_ZVOL_NAME} 0 812
342
343 # Format the partition with ext2 (no journal).
344 /sbin/mkfs.ext2 -q /dev/${FULL_ZVOL_NAME}1 || fail 5
345
346 # Mount the ext3 filesystem and copy some data to it.
347 mkdir -p /tmp/${ZVOL_NAME}1 || fail 6
348 mount /dev/${FULL_ZVOL_NAME}1 /tmp/${ZVOL_NAME}1 || fail 7
349
350 # Snapshot the pristine ext2 filesystem and mount it read-only.
351 ${ZFS} snapshot ${FULL_SNAP_NAME} && sleep 1 || fail 8
352 mkdir -p /tmp/${SNAP_NAME}1 || fail 9
353 mount /dev/${FULL_SNAP_NAME}1 /tmp/${SNAP_NAME}1 &>/dev/null || fail 10
354
355 # Copy to original volume
356 cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}1 || fail 11
357 sync
358
359 # Verify the copied files match the original files,
360 # and the copied files do NOT appear in the snapshot.
361 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}1${SRC_DIR} &>/dev/null || fail 12
362 diff -ur ${SRC_DIR} /tmp/${SNAP_NAME}1${SRC_DIR} &>/dev/null && fail 13
363
364 # umount, destroy the snapshot, volume, and pool.
365 umount /tmp/${SNAP_NAME}1 || fail 14
366 rmdir /tmp/${SNAP_NAME}1 || fail 15
367 ${ZFS} destroy ${FULL_SNAP_NAME} || fail 16
368
369 umount /tmp/${ZVOL_NAME}1 || fail 17
370 rmdir /tmp/${ZVOL_NAME}1 || fail 18
371 ${ZFS} destroy ${FULL_ZVOL_NAME} || fail 19
372
373 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 20
374 ${ZFS_SH} -u || fail 21
375 rm -f ${TMP_CACHE} || fail 22
376
377 pass
378 }
379 run_test 6 "zvol+ext2 snapshot"
380
381 # ZVOL clone sanity check
382 test_7() {
383 local POOL_NAME=tank
384 local ZVOL_NAME=fish
385 local SNAP_NAME=pristine
386 local CLONE_NAME=clone
387 local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
388 local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
389 local FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
390 local SRC_DIR=/bin/
391 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
392
393 # Create a pool and volume.
394 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
395 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
396 ${ZFS} create -V 400M ${FULL_ZVOL_NAME} || fail 3
397
398 # Partition the volume, for a 400M volume there will be
399 # 812 cylinders, 16 heads, and 63 sectors per track.
400 zconfig_partition /dev/${FULL_ZVOL_NAME} 0 812
401
402 # Format the partition with ext2 (no journal).
403 /sbin/mkfs.ext2 -q /dev/${FULL_ZVOL_NAME}1 || fail 5
404
405 # Mount the ext3 filesystem and copy some data to it.
406 mkdir -p /tmp/${ZVOL_NAME}1 || fail 6
407 mount /dev/${FULL_ZVOL_NAME}1 /tmp/${ZVOL_NAME}1 || fail 7
408
409 # Snapshot the pristine ext2 filesystem and mount it read-only.
410 ${ZFS} snapshot ${FULL_SNAP_NAME} && sleep 1 || fail 8
411 mkdir -p /tmp/${SNAP_NAME}1 || fail 9
412 mount /dev/${FULL_SNAP_NAME}1 /tmp/${SNAP_NAME}1 &>/dev/null || fail 10
413
414 # Copy to original volume.
415 cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}1 || fail 11
416 sync
417
418 # Verify the copied files match the original files,
419 # and the copied files do NOT appear in the snapshot.
420 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}1${SRC_DIR} &>/dev/null || fail 12
421 diff -ur ${SRC_DIR} /tmp/${SNAP_NAME}1${SRC_DIR} &>/dev/null && fail 13
422
423 # Clone from the original pristine snapshot
424 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} && sleep 1 || fail 14
425 mkdir -p /tmp/${CLONE_NAME}1 || fail 15
426 mount /dev/${FULL_CLONE_NAME}1 /tmp/${CLONE_NAME}1 || fail 16
427
428 # Verify the clone matches the pristine snapshot,
429 # and the files copied to the original volume are NOT there.
430 diff -ur /tmp/${SNAP_NAME}1 /tmp/${CLONE_NAME}1 &>/dev/null || fail 17
431 diff -ur /tmp/${ZVOL_NAME}1 /tmp/${CLONE_NAME}1 &>/dev/null && fail 18
432
433 # Copy to cloned volume.
434 cp -RL ${SRC_DIR} /tmp/${CLONE_NAME}1 || fail 19
435 sync
436
437 # Verify the clone matches the modified original volume.
438 diff -ur /tmp/${ZVOL_NAME}1 /tmp/${CLONE_NAME}1 &>/dev/null || fail 20
439
440 # umount, destroy the snapshot, volume, and pool.
441 umount /tmp/${CLONE_NAME}1 || fail 21
442 rmdir /tmp/${CLONE_NAME}1 || fail 22
443 ${ZFS} destroy ${FULL_CLONE_NAME} || fail 23
444
445 umount /tmp/${SNAP_NAME}1 || fail 24
446 rmdir /tmp/${SNAP_NAME}1 || fail 25
447 ${ZFS} destroy ${FULL_SNAP_NAME} || fail 26
448
449 umount /tmp/${ZVOL_NAME}1 || fail 27
450 rmdir /tmp/${ZVOL_NAME}1 || fail 28
451 ${ZFS} destroy ${FULL_ZVOL_NAME} || fail 29
452
453 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 30
454 ${ZFS_SH} -u || fail 31
455 rm -f ${TMP_CACHE} || fail 32
456
457 pass
458 }
459 run_test 7 "zvol+ext2 clone"
460
461 # Send/Receive sanity check
462 test_8() {
463 local POOL_NAME1=tank1
464 local POOL_NAME2=tank2
465 local ZVOL_NAME=fish
466 local SNAP_NAME=snap
467 local FULL_ZVOL_NAME1=${POOL_NAME1}/${ZVOL_NAME}
468 local FULL_ZVOL_NAME2=${POOL_NAME2}/${ZVOL_NAME}
469 local FULL_SNAP_NAME1=${POOL_NAME1}/${ZVOL_NAME}@${SNAP_NAME}
470 local FULL_SNAP_NAME2=${POOL_NAME2}/${ZVOL_NAME}@${SNAP_NAME}
471 local SRC_DIR=/bin/
472 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
473
474 # Create two pools and a volume
475 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
476 ${ZPOOL_CREATE_SH} -p ${POOL_NAME1} -c lo-raidz2 || fail 2
477 ${ZPOOL_CREATE_SH} -p ${POOL_NAME2} -c lo-raidz2 || fail 3
478 ${ZFS} create -V 400M ${FULL_ZVOL_NAME1} || fail 4
479
480 # Partition the volume, for a 400M volume there will be
481 # 812 cylinders, 16 heads, and 63 sectors per track.
482 zconfig_partition /dev/${FULL_ZVOL_NAME1} 0 812
483
484 # Format the partition with ext2.
485 /sbin/mkfs.ext2 -q /dev/${FULL_ZVOL_NAME1}1 || fail 5
486
487 # Mount the ext3 filesystem and copy some data to it.
488 mkdir -p /tmp/${FULL_ZVOL_NAME1}1 || fail 6
489 mount /dev/${FULL_ZVOL_NAME1}1 /tmp/${FULL_ZVOL_NAME1}1 || fail 7
490 cp -RL ${SRC_DIR} /tmp/${FULL_ZVOL_NAME1}1 || fail 8
491 sync || fail 9
492
493 # Snapshot the ext3 filesystem so it may be sent.
494 ${ZFS} snapshot ${FULL_SNAP_NAME1} && sleep 1 || fail 11
495
496 # Send/receive the snapshot from POOL_NAME1 to POOL_NAME2
497 (${ZFS} send ${FULL_SNAP_NAME1} | \
498 ${ZFS} receive ${FULL_ZVOL_NAME2}) && sleep 1 || fail 12
499
500 # Mount the sent ext3 filesystem.
501 mkdir -p /tmp/${FULL_ZVOL_NAME2}1 || fail 13
502 mount /dev/${FULL_ZVOL_NAME2}1 /tmp/${FULL_ZVOL_NAME2}1 || fail 14
503
504 # Verify the contents of the volumes match
505 diff -ur /tmp/${FULL_ZVOL_NAME1}1 /tmp/${FULL_ZVOL_NAME2}1 \
506 &>/dev/null || fail 15
507
508 # Umount, destroy the volume and pool.
509 umount /tmp/${FULL_ZVOL_NAME1}1 || fail 16
510 umount /tmp/${FULL_ZVOL_NAME2}1 || fail 17
511 rmdir /tmp/${FULL_ZVOL_NAME1}1 || fail 18
512 rmdir /tmp/${FULL_ZVOL_NAME2}1 || fail 19
513 rmdir /tmp/${POOL_NAME1} || fail 20
514 rmdir /tmp/${POOL_NAME2} || fail 21
515
516 ${ZFS} destroy ${FULL_SNAP_NAME1} || fail 22
517 ${ZFS} destroy ${FULL_SNAP_NAME2} || fail 23
518 ${ZFS} destroy ${FULL_ZVOL_NAME1} || fail 24
519 ${ZFS} destroy ${FULL_ZVOL_NAME2} || fail 25
520 ${ZPOOL_CREATE_SH} -p ${POOL_NAME1} -c lo-raidz2 -d || fail 26
521 ${ZPOOL_CREATE_SH} -p ${POOL_NAME2} -c lo-raidz2 -d || fail 27
522 ${ZFS_SH} -u || fail 28
523 rm -f ${TMP_CACHE} || fail 29
524
525 pass
526 }
527 run_test 8 "zfs send/receive"
528
529 # zpool event sanity check
530 test_9() {
531 local POOL_NAME=tank
532 local ZVOL_NAME=fish
533 local FULL_NAME=${POOL_NAME}/${ZVOL_NAME}
534 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
535 local TMP_EVENTS=`mktemp -p /tmp zpool.events.XXXXXXXX`
536
537 # Create a pool and volume.
538 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
539 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
540 ${ZFS} create -V 400M ${FULL_NAME} || fail 3
541
542 # Dump the events, there should be at least 5 lines.
543 ${ZPOOL} events >${TMP_EVENTS} || fail 4
544 EVENTS=`wc -l ${TMP_EVENTS} | cut -f1 -d' '`
545 [ $EVENTS -lt 5 ] && fail 5
546
547 # Clear the events and ensure there are none.
548 ${ZPOOL} events -c >/dev/null || fail 6
549 ${ZPOOL} events >${TMP_EVENTS} || fail 7
550 EVENTS=`wc -l ${TMP_EVENTS} | cut -f1 -d' '`
551 [ $EVENTS -gt 1 ] && fail 8
552
553 ${ZFS} destroy ${FULL_NAME} || fail 9
554 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 10
555 ${ZFS_SH} -u || fail 11
556 rm -f ${TMP_CACHE} || fail 12
557 rm -f ${TMP_EVENTS} || fail 13
558
559 pass
560 }
561 run_test 9 "zpool events"
562
563 exit 0
564