]> git.proxmox.com Git - mirror_zfs-debian.git/blame - scripts/zconfig.sh
Add FAILFAST support
[mirror_zfs-debian.git] / scripts / zconfig.sh
CommitLineData
c9c0d073
BB
1#!/bin/bash
2#
3# ZFS/ZPOOL configuration test script.
4
5basedir="$(dirname $0)"
6
7SCRIPT_COMMON=common.sh
8if [ -f "${basedir}/${SCRIPT_COMMON}" ]; then
9. "${basedir}/${SCRIPT_COMMON}"
10else
11echo "Missing helper script ${SCRIPT_COMMON}" && exit 1
12fi
13
14PROG=zconfig.sh
15
16usage() {
17cat << EOF
18USAGE:
19$0 [hvc]
20
21DESCRIPTION:
22 ZFS/ZPOOL configuration tests
23
24OPTIONS:
25 -h Show this message
26 -v Verbose
27 -c Cleanup lo+file devices at start
28
29EOF
30}
31
325f0235 32while getopts 'hvct:s:?' OPTION; do
c9c0d073
BB
33 case $OPTION in
34 h)
35 usage
36 exit 1
37 ;;
38 v)
39 VERBOSE=1
40 ;;
41 c)
42 CLEANUP=1
43 ;;
325f0235
BB
44 t)
45 TESTS_RUN=($OPTARG)
46 ;;
47 s)
48 TESTS_SKIP=($OPTARG)
49 ;;
c9c0d073
BB
50 ?)
51 usage
52 exit
53 ;;
54 esac
55done
56
57if [ $(id -u) != 0 ]; then
58 die "Must run as root"
59fi
60
61# Perform pre-cleanup is requested
62if [ ${CLEANUP} ]; then
63 cleanup_loop_devices
64 rm -f /tmp/zpool.cache.*
65fi
66
67zconfig_partition() {
68 local DEVICE=$1
69 local START=$2
70 local END=$3
71 local TMP_FILE=`mktemp`
72
73 /sbin/sfdisk -q ${DEVICE} << EOF &>${TMP_FILE} || fail 4
74${START},${END}
75;
76;
77;
78EOF
79
80 rm ${TMP_FILE}
81}
82
83# Validate persistent zpool.cache configuration.
325f0235 84test_1() {
c9c0d073
BB
85 local POOL_NAME=test1
86 local TMP_FILE1=`mktemp`
87 local TMP_FILE2=`mktemp`
88 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
89
c9c0d073
BB
90 # Create a pool save its status for comparison.
91 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
92 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
93 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3
94
95 # Unload/load the module stack and verify the pool persists.
96 ${ZFS_SH} -u || fail 4
97 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 5
98 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 6
99 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 7
100
101 # Cleanup the test pool and temporary files
102 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 8
103 rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_CACHE} || fail 9
104 ${ZFS_SH} -u || fail 10
105
106 pass
107}
325f0235 108run_test 1 "persistent zpool.cache"
c9c0d073
BB
109
110# Validate ZFS disk scanning and import w/out zpool.cache configuration.
325f0235 111test_2() {
c9c0d073
BB
112 local POOL_NAME=test2
113 local TMP_FILE1=`mktemp`
114 local TMP_FILE2=`mktemp`
115 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
116
c9c0d073
BB
117 # Create a pool save its status for comparison.
118 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
119 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
120 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3
121
122 # Unload the module stack, remove the cache file, load the module
123 # stack and attempt to probe the disks to import the pool. As
124 # a cross check verify the old pool state against the imported.
125 ${ZFS_SH} -u || fail 4
126 rm -f ${TMP_CACHE} || fail 5
127 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 6
128 ${ZPOOL} import | grep ${POOL_NAME} >/dev/null || fail 7
129 ${ZPOOL} import ${POOL_NAME} || fail 8
130 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 9
131 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 10
132
133 # Cleanup the test pool and temporary files
134 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 11
135 rm -f ${TMP_FILE1} ${TMP_FILE2} || fail 12
136 ${ZFS_SH} -u || fail 13
137
138 pass
139}
325f0235 140run_test 2 "scan disks for pools to import"
c9c0d073
BB
141
142zconfig_zvol_device_stat() {
143 local EXPECT=$1
144 local POOL_NAME=/dev/$2
145 local ZVOL_NAME=/dev/$3
146 local SNAP_NAME=/dev/$4
147 local CLONE_NAME=/dev/$5
148 local COUNT=0
149
150 # Briefly delay for udev
2c4834f8 151 sleep 3
c9c0d073
BB
152
153 # Pool exists
154 stat ${POOL_NAME} &>/dev/null && let COUNT=$COUNT+1
155
156 # Volume and partitions
157 stat ${ZVOL_NAME} &>/dev/null && let COUNT=$COUNT+1
158 stat ${ZVOL_NAME}1 &>/dev/null && let COUNT=$COUNT+1
159 stat ${ZVOL_NAME}2 &>/dev/null && let COUNT=$COUNT+1
160
161 # Snapshot with partitions
162 stat ${SNAP_NAME} &>/dev/null && let COUNT=$COUNT+1
163 stat ${SNAP_NAME}1 &>/dev/null && let COUNT=$COUNT+1
164 stat ${SNAP_NAME}2 &>/dev/null && let COUNT=$COUNT+1
165
166 # Clone with partitions
167 stat ${CLONE_NAME} &>/dev/null && let COUNT=$COUNT+1
168 stat ${CLONE_NAME}1 &>/dev/null && let COUNT=$COUNT+1
169 stat ${CLONE_NAME}2 &>/dev/null && let COUNT=$COUNT+1
170
171 if [ $EXPECT -ne $COUNT ]; then
172 return 1
173 fi
174
175 return 0
176}
177
178# zpool import/export device check
179# (1 volume, 2 partitions, 1 snapshot, 1 clone)
325f0235 180test_3() {
c9c0d073
BB
181 local POOL_NAME=tank
182 local ZVOL_NAME=volume
183 local SNAP_NAME=snap
184 local CLONE_NAME=clone
185 local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
186 local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
187 local FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
188 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
189
c9c0d073
BB
190 # Create a pool, volume, partition, snapshot, and clone.
191 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
192 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
193 ${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3
194 zconfig_partition /dev/${FULL_ZVOL_NAME} 0 64 || fail 4
195 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5
196 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6
197
198 # Verify the devices were created
199 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
200 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 7
201
202 # Export the pool
203 ${ZPOOL} export ${POOL_NAME} || fail 8
204
205 # verify the devices were removed
206 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
207 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9
208
209 # Import the pool, wait 1 second for udev
210 ${ZPOOL} import ${POOL_NAME} || fail 10
211
212 # Verify the devices were created
213 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
214 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 11
215
216 # Destroy the pool and consequently the devices
217 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 12
218
219 # verify the devices were removed
220 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
221 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 13
222
223 ${ZFS_SH} -u || fail 14
224 rm -f ${TMP_CACHE} || fail 15
225
226 pass
227}
325f0235 228run_test 3 "zpool import/export device"
c9c0d073
BB
229
230# zpool insmod/rmmod device check (1 volume, 1 snapshot, 1 clone)
325f0235 231test_4() {
c9c0d073
BB
232 POOL_NAME=tank
233 ZVOL_NAME=volume
234 SNAP_NAME=snap
235 CLONE_NAME=clone
236 FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
237 FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
238 FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
239 TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
240
c9c0d073
BB
241 # Create a pool, volume, snapshot, and clone
242 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
243 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
244 ${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3
245 zconfig_partition /dev/${FULL_ZVOL_NAME} 0 64 || fail 4
246 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5
247 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6
248
249 # Verify the devices were created
250 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
251 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 7
252
253 # Unload the modules
254 ${ZFS_SH} -u || fail 8
255
256 # Verify the devices were removed
257 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
258 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9
259
260 # Load the modules, wait 1 second for udev
261 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 10
262
263 # Verify the devices were created
264 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
265 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 11
266
267 # Destroy the pool and consequently the devices
268 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 12
269
270 # Verify the devices were removed
271 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
272 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 13
273
274 ${ZFS_SH} -u || fail 14
275 rm -f ${TMP_CACHE} || fail 15
276
277 pass
278}
325f0235 279run_test 4 "zpool insmod/rmmod device"
c9c0d073
BB
280
281# ZVOL volume sanity check
325f0235 282test_5() {
c9c0d073
BB
283 local POOL_NAME=tank
284 local ZVOL_NAME=fish
285 local FULL_NAME=${POOL_NAME}/${ZVOL_NAME}
286 local SRC_DIR=/bin/
287 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
288
c9c0d073
BB
289 # Create a pool and volume.
290 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
291 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
292 ${ZFS} create -V 400M ${FULL_NAME} || fail 3
293
294 # Partition the volume, for a 400M volume there will be
295 # 812 cylinders, 16 heads, and 63 sectors per track.
296 zconfig_partition /dev/${FULL_NAME} 0 812
297
298 # Format the partition with ext3.
299 /sbin/mkfs.ext3 -q /dev/${FULL_NAME}1 || fail 5
300
301 # Mount the ext3 filesystem and copy some data to it.
302 mkdir -p /tmp/${ZVOL_NAME}1 || fail 6
303 mount /dev/${FULL_NAME}1 /tmp/${ZVOL_NAME}1 || fail 7
304 cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}1 || fail 8
305 sync
306
307 # Verify the copied files match the original files.
308 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}1${SRC_DIR} &>/dev/null || fail 9
309
310 # Remove the files, umount, destroy the volume and pool.
311 rm -Rf /tmp/${ZVOL_NAME}1${SRC_DIR}* || fail 10
312 umount /tmp/${ZVOL_NAME}1 || fail 11
313 rmdir /tmp/${ZVOL_NAME}1 || fail 12
314
315 ${ZFS} destroy ${FULL_NAME} || fail 13
316 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 14
317 ${ZFS_SH} -u || fail 15
318 rm -f ${TMP_CACHE} || fail 16
319
320 pass
321}
325f0235 322run_test 5 "zvol+ext3 volume"
c9c0d073
BB
323
324# ZVOL snapshot sanity check
325f0235 325test_6() {
c9c0d073
BB
326 local POOL_NAME=tank
327 local ZVOL_NAME=fish
328 local SNAP_NAME=pristine
329 local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
330 local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
331 local SRC_DIR=/bin/
332 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
333
c9c0d073
BB
334 # Create a pool and volume.
335 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
336 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
337 ${ZFS} create -V 400M ${FULL_ZVOL_NAME} || fail 3
338
339 # Partition the volume, for a 400M volume there will be
340 # 812 cylinders, 16 heads, and 63 sectors per track.
341 zconfig_partition /dev/${FULL_ZVOL_NAME} 0 812
342
343 # Format the partition with ext2 (no journal).
344 /sbin/mkfs.ext2 -q /dev/${FULL_ZVOL_NAME}1 || fail 5
345
346 # Mount the ext3 filesystem and copy some data to it.
347 mkdir -p /tmp/${ZVOL_NAME}1 || fail 6
348 mount /dev/${FULL_ZVOL_NAME}1 /tmp/${ZVOL_NAME}1 || fail 7
349
350 # Snapshot the pristine ext2 filesystem and mount it read-only.
2c4834f8
BB
351 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 8
352 wait_udev /dev/${FULL_SNAP_NAME}1 30 || fail 8
c9c0d073
BB
353 mkdir -p /tmp/${SNAP_NAME}1 || fail 9
354 mount /dev/${FULL_SNAP_NAME}1 /tmp/${SNAP_NAME}1 &>/dev/null || fail 10
355
356 # Copy to original volume
357 cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}1 || fail 11
358 sync
359
360 # Verify the copied files match the original files,
361 # and the copied files do NOT appear in the snapshot.
362 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}1${SRC_DIR} &>/dev/null || fail 12
363 diff -ur ${SRC_DIR} /tmp/${SNAP_NAME}1${SRC_DIR} &>/dev/null && fail 13
364
365 # umount, destroy the snapshot, volume, and pool.
366 umount /tmp/${SNAP_NAME}1 || fail 14
367 rmdir /tmp/${SNAP_NAME}1 || fail 15
368 ${ZFS} destroy ${FULL_SNAP_NAME} || fail 16
369
370 umount /tmp/${ZVOL_NAME}1 || fail 17
371 rmdir /tmp/${ZVOL_NAME}1 || fail 18
372 ${ZFS} destroy ${FULL_ZVOL_NAME} || fail 19
373
374 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 20
375 ${ZFS_SH} -u || fail 21
376 rm -f ${TMP_CACHE} || fail 22
377
378 pass
379}
325f0235 380run_test 6 "zvol+ext2 snapshot"
c9c0d073
BB
381
382# ZVOL clone sanity check
325f0235 383test_7() {
c9c0d073
BB
384 local POOL_NAME=tank
385 local ZVOL_NAME=fish
386 local SNAP_NAME=pristine
387 local CLONE_NAME=clone
388 local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
389 local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
390 local FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
391 local SRC_DIR=/bin/
392 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
393
c9c0d073
BB
394 # Create a pool and volume.
395 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
396 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
ac063c48 397 ${ZFS} create -V 300M ${FULL_ZVOL_NAME} || fail 3
c9c0d073 398
ac063c48
BB
399 # Partition the volume, for a 300M volume there will be
400 # 609 cylinders, 16 heads, and 63 sectors per track.
401 zconfig_partition /dev/${FULL_ZVOL_NAME} 0 609
c9c0d073
BB
402
403 # Format the partition with ext2 (no journal).
404 /sbin/mkfs.ext2 -q /dev/${FULL_ZVOL_NAME}1 || fail 5
405
406 # Mount the ext3 filesystem and copy some data to it.
407 mkdir -p /tmp/${ZVOL_NAME}1 || fail 6
408 mount /dev/${FULL_ZVOL_NAME}1 /tmp/${ZVOL_NAME}1 || fail 7
409
410 # Snapshot the pristine ext2 filesystem and mount it read-only.
2c4834f8
BB
411 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 8
412 wait_udev /dev/${FULL_SNAP_NAME}1 30 || fail 8
c9c0d073
BB
413 mkdir -p /tmp/${SNAP_NAME}1 || fail 9
414 mount /dev/${FULL_SNAP_NAME}1 /tmp/${SNAP_NAME}1 &>/dev/null || fail 10
415
416 # Copy to original volume.
417 cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}1 || fail 11
418 sync
419
420 # Verify the copied files match the original files,
421 # and the copied files do NOT appear in the snapshot.
422 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}1${SRC_DIR} &>/dev/null || fail 12
423 diff -ur ${SRC_DIR} /tmp/${SNAP_NAME}1${SRC_DIR} &>/dev/null && fail 13
424
425 # Clone from the original pristine snapshot
2c4834f8
BB
426 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 14
427 wait_udev /dev/${FULL_CLONE_NAME}1 30 || fail 14
c9c0d073
BB
428 mkdir -p /tmp/${CLONE_NAME}1 || fail 15
429 mount /dev/${FULL_CLONE_NAME}1 /tmp/${CLONE_NAME}1 || fail 16
430
431 # Verify the clone matches the pristine snapshot,
432 # and the files copied to the original volume are NOT there.
433 diff -ur /tmp/${SNAP_NAME}1 /tmp/${CLONE_NAME}1 &>/dev/null || fail 17
434 diff -ur /tmp/${ZVOL_NAME}1 /tmp/${CLONE_NAME}1 &>/dev/null && fail 18
435
436 # Copy to cloned volume.
437 cp -RL ${SRC_DIR} /tmp/${CLONE_NAME}1 || fail 19
438 sync
439
440 # Verify the clone matches the modified original volume.
441 diff -ur /tmp/${ZVOL_NAME}1 /tmp/${CLONE_NAME}1 &>/dev/null || fail 20
442
443 # umount, destroy the snapshot, volume, and pool.
444 umount /tmp/${CLONE_NAME}1 || fail 21
445 rmdir /tmp/${CLONE_NAME}1 || fail 22
446 ${ZFS} destroy ${FULL_CLONE_NAME} || fail 23
447
448 umount /tmp/${SNAP_NAME}1 || fail 24
449 rmdir /tmp/${SNAP_NAME}1 || fail 25
450 ${ZFS} destroy ${FULL_SNAP_NAME} || fail 26
451
452 umount /tmp/${ZVOL_NAME}1 || fail 27
453 rmdir /tmp/${ZVOL_NAME}1 || fail 28
454 ${ZFS} destroy ${FULL_ZVOL_NAME} || fail 29
455
456 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 30
457 ${ZFS_SH} -u || fail 31
458 rm -f ${TMP_CACHE} || fail 32
459
460 pass
461}
325f0235 462run_test 7 "zvol+ext2 clone"
c9c0d073
BB
463
464# Send/Receive sanity check
465test_8() {
466 local POOL_NAME1=tank1
467 local POOL_NAME2=tank2
468 local ZVOL_NAME=fish
469 local SNAP_NAME=snap
470 local FULL_ZVOL_NAME1=${POOL_NAME1}/${ZVOL_NAME}
471 local FULL_ZVOL_NAME2=${POOL_NAME2}/${ZVOL_NAME}
472 local FULL_SNAP_NAME1=${POOL_NAME1}/${ZVOL_NAME}@${SNAP_NAME}
473 local FULL_SNAP_NAME2=${POOL_NAME2}/${ZVOL_NAME}@${SNAP_NAME}
474 local SRC_DIR=/bin/
475 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
476
477 # Create two pools and a volume
478 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
479 ${ZPOOL_CREATE_SH} -p ${POOL_NAME1} -c lo-raidz2 || fail 2
480 ${ZPOOL_CREATE_SH} -p ${POOL_NAME2} -c lo-raidz2 || fail 3
ac063c48 481 ${ZFS} create -V 300M ${FULL_ZVOL_NAME1} || fail 4
c9c0d073 482
ac063c48
BB
483 # Partition the volume, for a 300M volume there will be
484 # 609 cylinders, 16 heads, and 63 sectors per track.
485 zconfig_partition /dev/${FULL_ZVOL_NAME1} 0 609
c9c0d073
BB
486
487 # Format the partition with ext2.
488 /sbin/mkfs.ext2 -q /dev/${FULL_ZVOL_NAME1}1 || fail 5
489
490 # Mount the ext3 filesystem and copy some data to it.
491 mkdir -p /tmp/${FULL_ZVOL_NAME1}1 || fail 6
492 mount /dev/${FULL_ZVOL_NAME1}1 /tmp/${FULL_ZVOL_NAME1}1 || fail 7
493 cp -RL ${SRC_DIR} /tmp/${FULL_ZVOL_NAME1}1 || fail 8
494 sync || fail 9
495
496 # Snapshot the ext3 filesystem so it may be sent.
2c4834f8
BB
497 ${ZFS} snapshot ${FULL_SNAP_NAME1} || fail 11
498 wait_udev /dev/${FULL_SNAP_NAME1} 30 || fail 11
c9c0d073
BB
499
500 # Send/receive the snapshot from POOL_NAME1 to POOL_NAME2
501 (${ZFS} send ${FULL_SNAP_NAME1} | \
2c4834f8
BB
502 ${ZFS} receive ${FULL_ZVOL_NAME2}) || fail 12
503 wait_udev /dev/${FULL_ZVOL_NAME2}1 30 || fail 12
c9c0d073
BB
504
505 # Mount the sent ext3 filesystem.
506 mkdir -p /tmp/${FULL_ZVOL_NAME2}1 || fail 13
507 mount /dev/${FULL_ZVOL_NAME2}1 /tmp/${FULL_ZVOL_NAME2}1 || fail 14
508
509 # Verify the contents of the volumes match
510 diff -ur /tmp/${FULL_ZVOL_NAME1}1 /tmp/${FULL_ZVOL_NAME2}1 \
511 &>/dev/null || fail 15
512
513 # Umount, destroy the volume and pool.
514 umount /tmp/${FULL_ZVOL_NAME1}1 || fail 16
515 umount /tmp/${FULL_ZVOL_NAME2}1 || fail 17
516 rmdir /tmp/${FULL_ZVOL_NAME1}1 || fail 18
517 rmdir /tmp/${FULL_ZVOL_NAME2}1 || fail 19
518 rmdir /tmp/${POOL_NAME1} || fail 20
519 rmdir /tmp/${POOL_NAME2} || fail 21
520
521 ${ZFS} destroy ${FULL_SNAP_NAME1} || fail 22
522 ${ZFS} destroy ${FULL_SNAP_NAME2} || fail 23
523 ${ZFS} destroy ${FULL_ZVOL_NAME1} || fail 24
524 ${ZFS} destroy ${FULL_ZVOL_NAME2} || fail 25
525 ${ZPOOL_CREATE_SH} -p ${POOL_NAME1} -c lo-raidz2 -d || fail 26
526 ${ZPOOL_CREATE_SH} -p ${POOL_NAME2} -c lo-raidz2 -d || fail 27
527 ${ZFS_SH} -u || fail 28
528 rm -f ${TMP_CACHE} || fail 29
529
530 pass
531}
532run_test 8 "zfs send/receive"
533
534# zpool event sanity check
535test_9() {
536 local POOL_NAME=tank
537 local ZVOL_NAME=fish
538 local FULL_NAME=${POOL_NAME}/${ZVOL_NAME}
539 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
540 local TMP_EVENTS=`mktemp -p /tmp zpool.events.XXXXXXXX`
541
542 # Create a pool and volume.
543 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
544 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
ac063c48 545 ${ZFS} create -V 300M ${FULL_NAME} || fail 3
c9c0d073
BB
546
547 # Dump the events, there should be at least 5 lines.
548 ${ZPOOL} events >${TMP_EVENTS} || fail 4
549 EVENTS=`wc -l ${TMP_EVENTS} | cut -f1 -d' '`
550 [ $EVENTS -lt 5 ] && fail 5
551
552 # Clear the events and ensure there are none.
553 ${ZPOOL} events -c >/dev/null || fail 6
554 ${ZPOOL} events >${TMP_EVENTS} || fail 7
555 EVENTS=`wc -l ${TMP_EVENTS} | cut -f1 -d' '`
556 [ $EVENTS -gt 1 ] && fail 8
557
558 ${ZFS} destroy ${FULL_NAME} || fail 9
559 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 10
560 ${ZFS_SH} -u || fail 11
561 rm -f ${TMP_CACHE} || fail 12
562 rm -f ${TMP_EVENTS} || fail 13
563
564 pass
565}
566run_test 9 "zpool events"
567
568exit 0
569