]> git.proxmox.com Git - mirror_zfs-debian.git/blame - scripts/zconfig.sh
Remove solaris-specific code from make_leaf_vdev()
[mirror_zfs-debian.git] / scripts / zconfig.sh
CommitLineData
c9c0d073
BB
1#!/bin/bash
2#
3# ZFS/ZPOOL configuration test script.
4
5basedir="$(dirname $0)"
6
7SCRIPT_COMMON=common.sh
8if [ -f "${basedir}/${SCRIPT_COMMON}" ]; then
9. "${basedir}/${SCRIPT_COMMON}"
10else
11echo "Missing helper script ${SCRIPT_COMMON}" && exit 1
12fi
13
14PROG=zconfig.sh
15
16usage() {
17cat << EOF
18USAGE:
19$0 [hvc]
20
21DESCRIPTION:
22 ZFS/ZPOOL configuration tests
23
24OPTIONS:
25 -h Show this message
26 -v Verbose
27 -c Cleanup lo+file devices at start
28
29EOF
30}
31
325f0235 32while getopts 'hvct:s:?' OPTION; do
c9c0d073
BB
33 case $OPTION in
34 h)
35 usage
36 exit 1
37 ;;
38 v)
39 VERBOSE=1
40 ;;
41 c)
42 CLEANUP=1
43 ;;
325f0235
BB
44 t)
45 TESTS_RUN=($OPTARG)
46 ;;
47 s)
48 TESTS_SKIP=($OPTARG)
49 ;;
c9c0d073
BB
50 ?)
51 usage
52 exit
53 ;;
54 esac
55done
56
57if [ $(id -u) != 0 ]; then
58 die "Must run as root"
59fi
60
61# Perform pre-cleanup is requested
62if [ ${CLEANUP} ]; then
0ee8118b 63 cleanup_md_devices
c9c0d073
BB
64 cleanup_loop_devices
65 rm -f /tmp/zpool.cache.*
66fi
67
68zconfig_partition() {
69 local DEVICE=$1
70 local START=$2
71 local END=$3
72 local TMP_FILE=`mktemp`
73
74 /sbin/sfdisk -q ${DEVICE} << EOF &>${TMP_FILE} || fail 4
75${START},${END}
76;
77;
78;
79EOF
80
81 rm ${TMP_FILE}
82}
83
84# Validate persistent zpool.cache configuration.
325f0235 85test_1() {
c9c0d073
BB
86 local POOL_NAME=test1
87 local TMP_FILE1=`mktemp`
88 local TMP_FILE2=`mktemp`
89 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
90
c9c0d073
BB
91 # Create a pool save its status for comparison.
92 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
93 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
94 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3
95
96 # Unload/load the module stack and verify the pool persists.
97 ${ZFS_SH} -u || fail 4
98 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 5
99 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 6
100 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 7
101
102 # Cleanup the test pool and temporary files
103 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 8
104 rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_CACHE} || fail 9
105 ${ZFS_SH} -u || fail 10
106
107 pass
108}
325f0235 109run_test 1 "persistent zpool.cache"
c9c0d073
BB
110
111# Validate ZFS disk scanning and import w/out zpool.cache configuration.
325f0235 112test_2() {
c9c0d073
BB
113 local POOL_NAME=test2
114 local TMP_FILE1=`mktemp`
115 local TMP_FILE2=`mktemp`
116 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
117
c9c0d073
BB
118 # Create a pool save its status for comparison.
119 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
120 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
121 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3
122
123 # Unload the module stack, remove the cache file, load the module
124 # stack and attempt to probe the disks to import the pool. As
125 # a cross check verify the old pool state against the imported.
126 ${ZFS_SH} -u || fail 4
127 rm -f ${TMP_CACHE} || fail 5
128 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 6
129 ${ZPOOL} import | grep ${POOL_NAME} >/dev/null || fail 7
130 ${ZPOOL} import ${POOL_NAME} || fail 8
131 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 9
132 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 10
133
134 # Cleanup the test pool and temporary files
135 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 11
136 rm -f ${TMP_FILE1} ${TMP_FILE2} || fail 12
137 ${ZFS_SH} -u || fail 13
138
139 pass
140}
325f0235 141run_test 2 "scan disks for pools to import"
c9c0d073
BB
142
143zconfig_zvol_device_stat() {
144 local EXPECT=$1
145 local POOL_NAME=/dev/$2
146 local ZVOL_NAME=/dev/$3
147 local SNAP_NAME=/dev/$4
148 local CLONE_NAME=/dev/$5
149 local COUNT=0
150
151 # Briefly delay for udev
2c4834f8 152 sleep 3
c9c0d073
BB
153
154 # Pool exists
155 stat ${POOL_NAME} &>/dev/null && let COUNT=$COUNT+1
156
157 # Volume and partitions
158 stat ${ZVOL_NAME} &>/dev/null && let COUNT=$COUNT+1
159 stat ${ZVOL_NAME}1 &>/dev/null && let COUNT=$COUNT+1
160 stat ${ZVOL_NAME}2 &>/dev/null && let COUNT=$COUNT+1
161
162 # Snapshot with partitions
163 stat ${SNAP_NAME} &>/dev/null && let COUNT=$COUNT+1
164 stat ${SNAP_NAME}1 &>/dev/null && let COUNT=$COUNT+1
165 stat ${SNAP_NAME}2 &>/dev/null && let COUNT=$COUNT+1
166
167 # Clone with partitions
168 stat ${CLONE_NAME} &>/dev/null && let COUNT=$COUNT+1
169 stat ${CLONE_NAME}1 &>/dev/null && let COUNT=$COUNT+1
170 stat ${CLONE_NAME}2 &>/dev/null && let COUNT=$COUNT+1
171
172 if [ $EXPECT -ne $COUNT ]; then
173 return 1
174 fi
175
176 return 0
177}
178
179# zpool import/export device check
180# (1 volume, 2 partitions, 1 snapshot, 1 clone)
325f0235 181test_3() {
c9c0d073
BB
182 local POOL_NAME=tank
183 local ZVOL_NAME=volume
184 local SNAP_NAME=snap
185 local CLONE_NAME=clone
186 local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
187 local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
188 local FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
189 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
190
c9c0d073
BB
191 # Create a pool, volume, partition, snapshot, and clone.
192 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
193 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
194 ${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3
195 zconfig_partition /dev/${FULL_ZVOL_NAME} 0 64 || fail 4
196 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5
197 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6
198
199 # Verify the devices were created
200 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
201 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 7
202
203 # Export the pool
204 ${ZPOOL} export ${POOL_NAME} || fail 8
205
206 # verify the devices were removed
207 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
208 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9
209
210 # Import the pool, wait 1 second for udev
211 ${ZPOOL} import ${POOL_NAME} || fail 10
212
213 # Verify the devices were created
214 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
215 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 11
216
217 # Destroy the pool and consequently the devices
218 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 12
219
220 # verify the devices were removed
221 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
222 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 13
223
224 ${ZFS_SH} -u || fail 14
225 rm -f ${TMP_CACHE} || fail 15
226
227 pass
228}
325f0235 229run_test 3 "zpool import/export device"
c9c0d073
BB
230
231# zpool insmod/rmmod device check (1 volume, 1 snapshot, 1 clone)
325f0235 232test_4() {
c9c0d073
BB
233 POOL_NAME=tank
234 ZVOL_NAME=volume
235 SNAP_NAME=snap
236 CLONE_NAME=clone
237 FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
238 FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
239 FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
240 TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
241
c9c0d073
BB
242 # Create a pool, volume, snapshot, and clone
243 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
244 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
245 ${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3
246 zconfig_partition /dev/${FULL_ZVOL_NAME} 0 64 || fail 4
247 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5
248 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6
249
250 # Verify the devices were created
251 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
252 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 7
253
254 # Unload the modules
255 ${ZFS_SH} -u || fail 8
256
257 # Verify the devices were removed
258 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
259 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9
260
261 # Load the modules, wait 1 second for udev
262 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 10
263
264 # Verify the devices were created
265 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
266 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 11
267
268 # Destroy the pool and consequently the devices
269 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 12
270
271 # Verify the devices were removed
272 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
273 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 13
274
275 ${ZFS_SH} -u || fail 14
276 rm -f ${TMP_CACHE} || fail 15
277
278 pass
279}
325f0235 280run_test 4 "zpool insmod/rmmod device"
c9c0d073
BB
281
282# ZVOL volume sanity check
325f0235 283test_5() {
c9c0d073
BB
284 local POOL_NAME=tank
285 local ZVOL_NAME=fish
286 local FULL_NAME=${POOL_NAME}/${ZVOL_NAME}
287 local SRC_DIR=/bin/
288 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
289
c9c0d073
BB
290 # Create a pool and volume.
291 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
292 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
293 ${ZFS} create -V 400M ${FULL_NAME} || fail 3
294
295 # Partition the volume, for a 400M volume there will be
296 # 812 cylinders, 16 heads, and 63 sectors per track.
297 zconfig_partition /dev/${FULL_NAME} 0 812
298
299 # Format the partition with ext3.
300 /sbin/mkfs.ext3 -q /dev/${FULL_NAME}1 || fail 5
301
302 # Mount the ext3 filesystem and copy some data to it.
303 mkdir -p /tmp/${ZVOL_NAME}1 || fail 6
304 mount /dev/${FULL_NAME}1 /tmp/${ZVOL_NAME}1 || fail 7
305 cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}1 || fail 8
306 sync
307
308 # Verify the copied files match the original files.
309 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}1${SRC_DIR} &>/dev/null || fail 9
310
311 # Remove the files, umount, destroy the volume and pool.
312 rm -Rf /tmp/${ZVOL_NAME}1${SRC_DIR}* || fail 10
313 umount /tmp/${ZVOL_NAME}1 || fail 11
314 rmdir /tmp/${ZVOL_NAME}1 || fail 12
315
316 ${ZFS} destroy ${FULL_NAME} || fail 13
317 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 14
318 ${ZFS_SH} -u || fail 15
319 rm -f ${TMP_CACHE} || fail 16
320
321 pass
322}
325f0235 323run_test 5 "zvol+ext3 volume"
c9c0d073
BB
324
325# ZVOL snapshot sanity check
325f0235 326test_6() {
c9c0d073
BB
327 local POOL_NAME=tank
328 local ZVOL_NAME=fish
329 local SNAP_NAME=pristine
330 local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
331 local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
332 local SRC_DIR=/bin/
333 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
334
c9c0d073
BB
335 # Create a pool and volume.
336 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
337 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
338 ${ZFS} create -V 400M ${FULL_ZVOL_NAME} || fail 3
339
340 # Partition the volume, for a 400M volume there will be
341 # 812 cylinders, 16 heads, and 63 sectors per track.
342 zconfig_partition /dev/${FULL_ZVOL_NAME} 0 812
343
344 # Format the partition with ext2 (no journal).
345 /sbin/mkfs.ext2 -q /dev/${FULL_ZVOL_NAME}1 || fail 5
346
347 # Mount the ext3 filesystem and copy some data to it.
348 mkdir -p /tmp/${ZVOL_NAME}1 || fail 6
349 mount /dev/${FULL_ZVOL_NAME}1 /tmp/${ZVOL_NAME}1 || fail 7
350
351 # Snapshot the pristine ext2 filesystem and mount it read-only.
2c4834f8
BB
352 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 8
353 wait_udev /dev/${FULL_SNAP_NAME}1 30 || fail 8
c9c0d073
BB
354 mkdir -p /tmp/${SNAP_NAME}1 || fail 9
355 mount /dev/${FULL_SNAP_NAME}1 /tmp/${SNAP_NAME}1 &>/dev/null || fail 10
356
357 # Copy to original volume
358 cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}1 || fail 11
359 sync
360
361 # Verify the copied files match the original files,
362 # and the copied files do NOT appear in the snapshot.
363 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}1${SRC_DIR} &>/dev/null || fail 12
364 diff -ur ${SRC_DIR} /tmp/${SNAP_NAME}1${SRC_DIR} &>/dev/null && fail 13
365
366 # umount, destroy the snapshot, volume, and pool.
367 umount /tmp/${SNAP_NAME}1 || fail 14
368 rmdir /tmp/${SNAP_NAME}1 || fail 15
369 ${ZFS} destroy ${FULL_SNAP_NAME} || fail 16
370
371 umount /tmp/${ZVOL_NAME}1 || fail 17
372 rmdir /tmp/${ZVOL_NAME}1 || fail 18
373 ${ZFS} destroy ${FULL_ZVOL_NAME} || fail 19
374
375 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 20
376 ${ZFS_SH} -u || fail 21
377 rm -f ${TMP_CACHE} || fail 22
378
379 pass
380}
325f0235 381run_test 6 "zvol+ext2 snapshot"
c9c0d073
BB
382
383# ZVOL clone sanity check
325f0235 384test_7() {
c9c0d073
BB
385 local POOL_NAME=tank
386 local ZVOL_NAME=fish
387 local SNAP_NAME=pristine
388 local CLONE_NAME=clone
389 local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
390 local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
391 local FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
392 local SRC_DIR=/bin/
393 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
394
c9c0d073
BB
395 # Create a pool and volume.
396 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
397 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
ac063c48 398 ${ZFS} create -V 300M ${FULL_ZVOL_NAME} || fail 3
c9c0d073 399
ac063c48
BB
400 # Partition the volume, for a 300M volume there will be
401 # 609 cylinders, 16 heads, and 63 sectors per track.
402 zconfig_partition /dev/${FULL_ZVOL_NAME} 0 609
c9c0d073
BB
403
404 # Format the partition with ext2 (no journal).
405 /sbin/mkfs.ext2 -q /dev/${FULL_ZVOL_NAME}1 || fail 5
406
407 # Mount the ext3 filesystem and copy some data to it.
408 mkdir -p /tmp/${ZVOL_NAME}1 || fail 6
409 mount /dev/${FULL_ZVOL_NAME}1 /tmp/${ZVOL_NAME}1 || fail 7
410
411 # Snapshot the pristine ext2 filesystem and mount it read-only.
2c4834f8
BB
412 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 8
413 wait_udev /dev/${FULL_SNAP_NAME}1 30 || fail 8
c9c0d073
BB
414 mkdir -p /tmp/${SNAP_NAME}1 || fail 9
415 mount /dev/${FULL_SNAP_NAME}1 /tmp/${SNAP_NAME}1 &>/dev/null || fail 10
416
417 # Copy to original volume.
418 cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}1 || fail 11
419 sync
420
421 # Verify the copied files match the original files,
422 # and the copied files do NOT appear in the snapshot.
423 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}1${SRC_DIR} &>/dev/null || fail 12
424 diff -ur ${SRC_DIR} /tmp/${SNAP_NAME}1${SRC_DIR} &>/dev/null && fail 13
425
426 # Clone from the original pristine snapshot
2c4834f8
BB
427 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 14
428 wait_udev /dev/${FULL_CLONE_NAME}1 30 || fail 14
c9c0d073
BB
429 mkdir -p /tmp/${CLONE_NAME}1 || fail 15
430 mount /dev/${FULL_CLONE_NAME}1 /tmp/${CLONE_NAME}1 || fail 16
431
432 # Verify the clone matches the pristine snapshot,
433 # and the files copied to the original volume are NOT there.
434 diff -ur /tmp/${SNAP_NAME}1 /tmp/${CLONE_NAME}1 &>/dev/null || fail 17
435 diff -ur /tmp/${ZVOL_NAME}1 /tmp/${CLONE_NAME}1 &>/dev/null && fail 18
436
437 # Copy to cloned volume.
438 cp -RL ${SRC_DIR} /tmp/${CLONE_NAME}1 || fail 19
439 sync
440
441 # Verify the clone matches the modified original volume.
442 diff -ur /tmp/${ZVOL_NAME}1 /tmp/${CLONE_NAME}1 &>/dev/null || fail 20
443
444 # umount, destroy the snapshot, volume, and pool.
445 umount /tmp/${CLONE_NAME}1 || fail 21
446 rmdir /tmp/${CLONE_NAME}1 || fail 22
447 ${ZFS} destroy ${FULL_CLONE_NAME} || fail 23
448
449 umount /tmp/${SNAP_NAME}1 || fail 24
450 rmdir /tmp/${SNAP_NAME}1 || fail 25
451 ${ZFS} destroy ${FULL_SNAP_NAME} || fail 26
452
453 umount /tmp/${ZVOL_NAME}1 || fail 27
454 rmdir /tmp/${ZVOL_NAME}1 || fail 28
455 ${ZFS} destroy ${FULL_ZVOL_NAME} || fail 29
456
457 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 30
458 ${ZFS_SH} -u || fail 31
459 rm -f ${TMP_CACHE} || fail 32
460
461 pass
462}
325f0235 463run_test 7 "zvol+ext2 clone"
c9c0d073
BB
464
465# Send/Receive sanity check
466test_8() {
467 local POOL_NAME1=tank1
468 local POOL_NAME2=tank2
469 local ZVOL_NAME=fish
470 local SNAP_NAME=snap
471 local FULL_ZVOL_NAME1=${POOL_NAME1}/${ZVOL_NAME}
472 local FULL_ZVOL_NAME2=${POOL_NAME2}/${ZVOL_NAME}
473 local FULL_SNAP_NAME1=${POOL_NAME1}/${ZVOL_NAME}@${SNAP_NAME}
474 local FULL_SNAP_NAME2=${POOL_NAME2}/${ZVOL_NAME}@${SNAP_NAME}
475 local SRC_DIR=/bin/
476 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
477
478 # Create two pools and a volume
479 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
480 ${ZPOOL_CREATE_SH} -p ${POOL_NAME1} -c lo-raidz2 || fail 2
481 ${ZPOOL_CREATE_SH} -p ${POOL_NAME2} -c lo-raidz2 || fail 3
ac063c48 482 ${ZFS} create -V 300M ${FULL_ZVOL_NAME1} || fail 4
c9c0d073 483
ac063c48
BB
484 # Partition the volume, for a 300M volume there will be
485 # 609 cylinders, 16 heads, and 63 sectors per track.
486 zconfig_partition /dev/${FULL_ZVOL_NAME1} 0 609
c9c0d073
BB
487
488 # Format the partition with ext2.
489 /sbin/mkfs.ext2 -q /dev/${FULL_ZVOL_NAME1}1 || fail 5
490
491 # Mount the ext3 filesystem and copy some data to it.
492 mkdir -p /tmp/${FULL_ZVOL_NAME1}1 || fail 6
493 mount /dev/${FULL_ZVOL_NAME1}1 /tmp/${FULL_ZVOL_NAME1}1 || fail 7
494 cp -RL ${SRC_DIR} /tmp/${FULL_ZVOL_NAME1}1 || fail 8
495 sync || fail 9
496
497 # Snapshot the ext3 filesystem so it may be sent.
2c4834f8
BB
498 ${ZFS} snapshot ${FULL_SNAP_NAME1} || fail 11
499 wait_udev /dev/${FULL_SNAP_NAME1} 30 || fail 11
c9c0d073
BB
500
501 # Send/receive the snapshot from POOL_NAME1 to POOL_NAME2
502 (${ZFS} send ${FULL_SNAP_NAME1} | \
2c4834f8
BB
503 ${ZFS} receive ${FULL_ZVOL_NAME2}) || fail 12
504 wait_udev /dev/${FULL_ZVOL_NAME2}1 30 || fail 12
c9c0d073
BB
505
506 # Mount the sent ext3 filesystem.
507 mkdir -p /tmp/${FULL_ZVOL_NAME2}1 || fail 13
508 mount /dev/${FULL_ZVOL_NAME2}1 /tmp/${FULL_ZVOL_NAME2}1 || fail 14
509
510 # Verify the contents of the volumes match
511 diff -ur /tmp/${FULL_ZVOL_NAME1}1 /tmp/${FULL_ZVOL_NAME2}1 \
512 &>/dev/null || fail 15
513
514 # Umount, destroy the volume and pool.
515 umount /tmp/${FULL_ZVOL_NAME1}1 || fail 16
516 umount /tmp/${FULL_ZVOL_NAME2}1 || fail 17
517 rmdir /tmp/${FULL_ZVOL_NAME1}1 || fail 18
518 rmdir /tmp/${FULL_ZVOL_NAME2}1 || fail 19
519 rmdir /tmp/${POOL_NAME1} || fail 20
520 rmdir /tmp/${POOL_NAME2} || fail 21
521
522 ${ZFS} destroy ${FULL_SNAP_NAME1} || fail 22
523 ${ZFS} destroy ${FULL_SNAP_NAME2} || fail 23
524 ${ZFS} destroy ${FULL_ZVOL_NAME1} || fail 24
525 ${ZFS} destroy ${FULL_ZVOL_NAME2} || fail 25
526 ${ZPOOL_CREATE_SH} -p ${POOL_NAME1} -c lo-raidz2 -d || fail 26
527 ${ZPOOL_CREATE_SH} -p ${POOL_NAME2} -c lo-raidz2 -d || fail 27
528 ${ZFS_SH} -u || fail 28
529 rm -f ${TMP_CACHE} || fail 29
530
531 pass
532}
533run_test 8 "zfs send/receive"
534
535# zpool event sanity check
536test_9() {
537 local POOL_NAME=tank
538 local ZVOL_NAME=fish
539 local FULL_NAME=${POOL_NAME}/${ZVOL_NAME}
540 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
541 local TMP_EVENTS=`mktemp -p /tmp zpool.events.XXXXXXXX`
542
543 # Create a pool and volume.
544 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
545 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
ac063c48 546 ${ZFS} create -V 300M ${FULL_NAME} || fail 3
c9c0d073
BB
547
548 # Dump the events, there should be at least 5 lines.
549 ${ZPOOL} events >${TMP_EVENTS} || fail 4
550 EVENTS=`wc -l ${TMP_EVENTS} | cut -f1 -d' '`
551 [ $EVENTS -lt 5 ] && fail 5
552
553 # Clear the events and ensure there are none.
554 ${ZPOOL} events -c >/dev/null || fail 6
555 ${ZPOOL} events >${TMP_EVENTS} || fail 7
556 EVENTS=`wc -l ${TMP_EVENTS} | cut -f1 -d' '`
557 [ $EVENTS -gt 1 ] && fail 8
558
559 ${ZFS} destroy ${FULL_NAME} || fail 9
560 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 10
561 ${ZFS_SH} -u || fail 11
562 rm -f ${TMP_CACHE} || fail 12
563 rm -f ${TMP_EVENTS} || fail 13
564
565 pass
566}
567run_test 9 "zpool events"
568
569exit 0
570