]> git.proxmox.com Git - mirror_zfs-debian.git/blob - scripts/zconfig.sh
Merge branch 'upstream'
[mirror_zfs-debian.git] / scripts / zconfig.sh
1 #!/bin/bash
2 #
3 # ZFS/ZPOOL configuration test script.
4
5 basedir="$(dirname $0)"
6
7 SCRIPT_COMMON=common.sh
8 if [ -f "${basedir}/${SCRIPT_COMMON}" ]; then
9 . "${basedir}/${SCRIPT_COMMON}"
10 else
11 echo "Missing helper script ${SCRIPT_COMMON}" && exit 1
12 fi
13
14 PROG=zconfig.sh
15
16 usage() {
17 cat << EOF
18 USAGE:
19 $0 [hvcts]
20
21 DESCRIPTION:
22 ZFS/ZPOOL configuration tests
23
24 OPTIONS:
25 -h Show this message
26 -v Verbose
27 -c Cleanup lo+file devices at start
28 -t <#> Run listed tests
29 -s <#> Skip listed tests
30
31 EOF
32 }
33
34 while getopts 'hvct:s:?' OPTION; do
35 case $OPTION in
36 h)
37 usage
38 exit 1
39 ;;
40 v)
41 VERBOSE=1
42 ;;
43 c)
44 CLEANUP=1
45 ;;
46 t)
47 TESTS_RUN=($OPTARG)
48 ;;
49 s)
50 TESTS_SKIP=($OPTARG)
51 ;;
52 ?)
53 usage
54 exit
55 ;;
56 esac
57 done
58
59 if [ $(id -u) != 0 ]; then
60 die "Must run as root"
61 fi
62
63 # Initialize the test suite
64 init
65
66 # Perform pre-cleanup is requested
67 if [ ${CLEANUP} ]; then
68 ${ZFS_SH} -u
69 cleanup_md_devices
70 cleanup_loop_devices
71 rm -f /tmp/zpool.cache.*
72 fi
73
74 # Check if we need to skip the tests that require scsi_debug and lsscsi.
75 SCSI_DEBUG=0
76 ${INFOMOD} scsi_debug &>/dev/null && SCSI_DEBUG=1
77 HAVE_LSSCSI=0
78 test -f ${LSSCSI} && HAVE_LSSCSI=1
79 if [ ${SCSI_DEBUG} -eq 0 ] || [ ${HAVE_LSSCSI} -eq 0 ]; then
80 echo "Skipping test 10 which requires the scsi_debug " \
81 "module and the ${LSSCSI} utility"
82 fi
83
84 zconfig_partition() {
85 local DEVICE=$1
86 local START=$2
87 local END=$3
88 local TMP_FILE=`mktemp`
89
90 /sbin/sfdisk -q ${DEVICE} << EOF &>${TMP_FILE} || fail 4
91 ${START},${END}
92 ;
93 ;
94 ;
95 EOF
96
97 rm ${TMP_FILE}
98 udev_trigger
99 }
100
101 # Validate persistent zpool.cache configuration.
102 test_1() {
103 local POOL_NAME=test1
104 local TMP_FILE1=`mktemp`
105 local TMP_FILE2=`mktemp`
106 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
107
108 # Create a pool save its status for comparison.
109 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
110 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
111 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3
112
113 # Unload/load the module stack and verify the pool persists.
114 ${ZFS_SH} -u || fail 4
115 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 5
116 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 6
117 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 7
118
119 # Cleanup the test pool and temporary files
120 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 8
121 rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_CACHE} || fail 9
122 ${ZFS_SH} -u || fail 10
123
124 pass
125 }
126 run_test 1 "persistent zpool.cache"
127
128 # Validate ZFS disk scanning and import w/out zpool.cache configuration.
129 test_2() {
130 local POOL_NAME=test2
131 local TMP_FILE1=`mktemp`
132 local TMP_FILE2=`mktemp`
133 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
134
135 # Create a pool save its status for comparison.
136 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
137 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
138 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3
139
140 # Unload the module stack, remove the cache file, load the module
141 # stack and attempt to probe the disks to import the pool. As
142 # a cross check verify the old pool state against the imported.
143 ${ZFS_SH} -u || fail 4
144 rm -f ${TMP_CACHE} || fail 5
145 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 6
146 ${ZPOOL} import | grep ${POOL_NAME} >/dev/null || fail 7
147 ${ZPOOL} import -f ${POOL_NAME} || fail 8
148 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 9
149 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 10
150
151 # Cleanup the test pool and temporary files
152 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 11
153 rm -f ${TMP_FILE1} ${TMP_FILE2} || fail 12
154 ${ZFS_SH} -u || fail 13
155
156 pass
157 }
158 run_test 2 "scan disks for pools to import"
159
160 zconfig_zvol_device_stat() {
161 local EXPECT=$1
162 local POOL_NAME=/dev/zvol/$2
163 local ZVOL_NAME=/dev/zvol/$3
164 local SNAP_NAME=/dev/zvol/$4
165 local CLONE_NAME=/dev/zvol/$5
166 local COUNT=0
167
168 # Briefly delay for udev
169 udev_trigger
170
171 # Pool exists
172 stat ${POOL_NAME} &>/dev/null && let COUNT=$COUNT+1
173
174 # Volume and partitions
175 stat ${ZVOL_NAME} &>/dev/null && let COUNT=$COUNT+1
176 stat ${ZVOL_NAME}-part1 &>/dev/null && let COUNT=$COUNT+1
177 stat ${ZVOL_NAME}-part2 &>/dev/null && let COUNT=$COUNT+1
178
179 # Snapshot with partitions
180 stat ${SNAP_NAME} &>/dev/null && let COUNT=$COUNT+1
181 stat ${SNAP_NAME}-part1 &>/dev/null && let COUNT=$COUNT+1
182 stat ${SNAP_NAME}-part2 &>/dev/null && let COUNT=$COUNT+1
183
184 # Clone with partitions
185 stat ${CLONE_NAME} &>/dev/null && let COUNT=$COUNT+1
186 stat ${CLONE_NAME}-part1 &>/dev/null && let COUNT=$COUNT+1
187 stat ${CLONE_NAME}-part2 &>/dev/null && let COUNT=$COUNT+1
188
189 if [ $EXPECT -ne $COUNT ]; then
190 return 1
191 fi
192
193 return 0
194 }
195
196 # zpool import/export device check
197 # (1 volume, 2 partitions, 1 snapshot, 1 clone)
198 test_3() {
199 local POOL_NAME=tank
200 local ZVOL_NAME=volume
201 local SNAP_NAME=snap
202 local CLONE_NAME=clone
203 local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
204 local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
205 local FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
206 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
207
208 # Create a pool, volume, partition, snapshot, and clone.
209 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
210 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
211 ${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3
212 zconfig_partition /dev/zvol/${FULL_ZVOL_NAME} 0 64 || fail 4
213 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5
214 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6
215
216 # Verify the devices were created
217 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
218 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 7
219
220 # Export the pool
221 ${ZPOOL} export ${POOL_NAME} || fail 8
222
223 # verify the devices were removed
224 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
225 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9
226
227 # Import the pool, wait 1 second for udev
228 ${ZPOOL} import ${POOL_NAME} || fail 10
229
230 # Verify the devices were created
231 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
232 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 11
233
234 # Destroy the pool and consequently the devices
235 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 12
236
237 # verify the devices were removed
238 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
239 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 13
240
241 ${ZFS_SH} -u || fail 14
242 rm -f ${TMP_CACHE} || fail 15
243
244 pass
245 }
246 run_test 3 "zpool import/export device"
247
248 # zpool insmod/rmmod device check (1 volume, 1 snapshot, 1 clone)
249 test_4() {
250 POOL_NAME=tank
251 ZVOL_NAME=volume
252 SNAP_NAME=snap
253 CLONE_NAME=clone
254 FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
255 FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
256 FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
257 TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
258
259 # Create a pool, volume, snapshot, and clone
260 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
261 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
262 ${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3
263 zconfig_partition /dev/zvol/${FULL_ZVOL_NAME} 0 64 || fail 4
264 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5
265 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6
266
267 # Verify the devices were created
268 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
269 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 7
270
271 # Unload the modules
272 ${ZFS_SH} -u || fail 8
273
274 # Verify the devices were removed
275 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
276 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9
277
278 # Load the modules, wait 1 second for udev
279 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 10
280
281 # Verify the devices were created
282 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
283 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 11
284
285 # Destroy the pool and consequently the devices
286 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 12
287
288 # Verify the devices were removed
289 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
290 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 13
291
292 ${ZFS_SH} -u || fail 14
293 rm -f ${TMP_CACHE} || fail 15
294
295 pass
296 }
297 run_test 4 "zpool insmod/rmmod device"
298
299 # ZVOL volume sanity check
300 test_5() {
301 local POOL_NAME=tank
302 local ZVOL_NAME=fish
303 local FULL_NAME=${POOL_NAME}/${ZVOL_NAME}
304 local SRC_DIR=/bin/
305 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
306
307 # Create a pool and volume.
308 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
309 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raid0 || fail 2
310 ${ZFS} create -V 800M ${FULL_NAME} || fail 3
311
312 # Partition the volume, for a 800M volume there will be
313 # 1624 cylinders, 16 heads, and 63 sectors per track.
314 zconfig_partition /dev/zvol/${FULL_NAME} 0 1624
315
316 # Format the partition with ext3.
317 /sbin/mkfs.ext3 -q /dev/zvol/${FULL_NAME}-part1 || fail 5
318
319 # Mount the ext3 filesystem and copy some data to it.
320 mkdir -p /tmp/${ZVOL_NAME}-part1 || fail 6
321 mount /dev/zvol/${FULL_NAME}-part1 /tmp/${ZVOL_NAME}-part1 || fail 7
322 cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}-part1 || fail 8
323 sync
324
325 # Verify the copied files match the original files.
326 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}-part1${SRC_DIR} \
327 &>/dev/null || fail 9
328
329 # Remove the files, umount, destroy the volume and pool.
330 rm -Rf /tmp/${ZVOL_NAME}-part1${SRC_DIR}* || fail 10
331 umount /tmp/${ZVOL_NAME}-part1 || fail 11
332 rmdir /tmp/${ZVOL_NAME}-part1 || fail 12
333
334 ${ZFS} destroy ${FULL_NAME} || fail 13
335 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 14
336 ${ZFS_SH} -u || fail 15
337 rm -f ${TMP_CACHE} || fail 16
338
339 pass
340 }
341 run_test 5 "zvol+ext3 volume"
342
343 # ZVOL snapshot sanity check
344 test_6() {
345 local POOL_NAME=tank
346 local ZVOL_NAME=fish
347 local SNAP_NAME=pristine
348 local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
349 local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
350 local SRC_DIR=/bin/
351 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
352
353 # Create a pool and volume.
354 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
355 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raid0 || fail 2
356 ${ZFS} create -V 800M ${FULL_ZVOL_NAME} || fail 3
357
358 # Partition the volume, for a 800M volume there will be
359 # 1624 cylinders, 16 heads, and 63 sectors per track.
360 zconfig_partition /dev/zvol/${FULL_ZVOL_NAME} 0 1624
361
362 # Format the partition with ext2 (no journal).
363 /sbin/mkfs.ext2 -q /dev/zvol/${FULL_ZVOL_NAME}-part1 || fail 5
364
365 # Mount the ext3 filesystem and copy some data to it.
366 mkdir -p /tmp/${ZVOL_NAME}-part1 || fail 6
367 mount /dev/zvol/${FULL_ZVOL_NAME}-part1 /tmp/${ZVOL_NAME}-part1 \
368 || fail 7
369
370 # Snapshot the pristine ext2 filesystem and mount it read-only.
371 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 8
372 wait_udev /dev/zvol/${FULL_SNAP_NAME}-part1 30 || fail 8
373 mkdir -p /tmp/${SNAP_NAME}-part1 || fail 9
374 mount /dev/zvol/${FULL_SNAP_NAME}-part1 /tmp/${SNAP_NAME}-part1 \
375 &>/dev/null || fail 10
376
377 # Copy to original volume
378 cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}-part1 || fail 11
379 sync
380
381 # Verify the copied files match the original files,
382 # and the copied files do NOT appear in the snapshot.
383 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}-part1${SRC_DIR} \
384 &>/dev/null || fail 12
385 diff -ur ${SRC_DIR} /tmp/${SNAP_NAME}-part1${SRC_DIR} \
386 &>/dev/null && fail 13
387
388 # umount, destroy the snapshot, volume, and pool.
389 umount /tmp/${SNAP_NAME}-part1 || fail 14
390 rmdir /tmp/${SNAP_NAME}-part1 || fail 15
391 ${ZFS} destroy ${FULL_SNAP_NAME} || fail 16
392
393 umount /tmp/${ZVOL_NAME}-part1 || fail 17
394 rmdir /tmp/${ZVOL_NAME}-part1 || fail 18
395 ${ZFS} destroy ${FULL_ZVOL_NAME} || fail 19
396
397 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 20
398 ${ZFS_SH} -u || fail 21
399 rm -f ${TMP_CACHE} || fail 22
400
401 pass
402 }
403 run_test 6 "zvol+ext2 snapshot"
404
405 # ZVOL clone sanity check
406 test_7() {
407 local POOL_NAME=tank
408 local ZVOL_NAME=fish
409 local SNAP_NAME=pristine
410 local CLONE_NAME=clone
411 local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
412 local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
413 local FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
414 local SRC_DIR=/bin/
415 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
416
417 # Create a pool and volume.
418 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
419 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
420 ${ZFS} create -V 300M ${FULL_ZVOL_NAME} || fail 3
421
422 # Partition the volume, for a 300M volume there will be
423 # 609 cylinders, 16 heads, and 63 sectors per track.
424 zconfig_partition /dev/zvol/${FULL_ZVOL_NAME} 0 609
425
426 # Format the partition with ext2 (no journal).
427 /sbin/mkfs.ext2 -q /dev/zvol/${FULL_ZVOL_NAME}-part1 || fail 5
428
429 # Mount the ext3 filesystem and copy some data to it.
430 mkdir -p /tmp/${ZVOL_NAME}-part1 || fail 6
431 mount /dev/zvol/${FULL_ZVOL_NAME}-part1 /tmp/${ZVOL_NAME}-part1 \
432 || fail 7
433
434 # Snapshot the pristine ext2 filesystem and mount it read-only.
435 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 8
436 wait_udev /dev/zvol/${FULL_SNAP_NAME}-part1 30 || fail 8
437 mkdir -p /tmp/${SNAP_NAME}-part1 || fail 9
438 mount /dev/zvol/${FULL_SNAP_NAME}-part1 \
439 /tmp/${SNAP_NAME}-part1 &>/dev/null || fail 10
440
441 # Copy to original volume.
442 cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}-part1 || fail 11
443 sync
444
445 # Verify the copied files match the original files,
446 # and the copied files do NOT appear in the snapshot.
447 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}-part1${SRC_DIR} \
448 &>/dev/null || fail 12
449 diff -ur ${SRC_DIR} /tmp/${SNAP_NAME}-part1${SRC_DIR} \
450 &>/dev/null && fail 13
451
452 # Clone from the original pristine snapshot
453 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 14
454 wait_udev /dev/zvol/${FULL_CLONE_NAME}-part1 30 || fail 14
455 mkdir -p /tmp/${CLONE_NAME}-part1 || fail 15
456 mount /dev/zvol/${FULL_CLONE_NAME}-part1 \
457 /tmp/${CLONE_NAME}-part1 || fail 16
458
459 # Verify the clone matches the pristine snapshot,
460 # and the files copied to the original volume are NOT there.
461 diff -ur /tmp/${SNAP_NAME}-part1 /tmp/${CLONE_NAME}-part1 \
462 &>/dev/null || fail 17
463 diff -ur /tmp/${ZVOL_NAME}-part1 /tmp/${CLONE_NAME}-part1 \
464 &>/dev/null && fail 18
465
466 # Copy to cloned volume.
467 cp -RL ${SRC_DIR} /tmp/${CLONE_NAME}-part1 || fail 19
468 sync
469
470 # Verify the clone matches the modified original volume.
471 diff -ur /tmp/${ZVOL_NAME}-part1 /tmp/${CLONE_NAME}-part1 \
472 &>/dev/null || fail 20
473
474 # umount, destroy the snapshot, volume, and pool.
475 umount /tmp/${CLONE_NAME}-part1 || fail 21
476 rmdir /tmp/${CLONE_NAME}-part1 || fail 22
477 ${ZFS} destroy ${FULL_CLONE_NAME} || fail 23
478
479 umount /tmp/${SNAP_NAME}-part1 || fail 24
480 rmdir /tmp/${SNAP_NAME}-part1 || fail 25
481 ${ZFS} destroy ${FULL_SNAP_NAME} || fail 26
482
483 umount /tmp/${ZVOL_NAME}-part1 || fail 27
484 rmdir /tmp/${ZVOL_NAME}-part1 || fail 28
485 ${ZFS} destroy ${FULL_ZVOL_NAME} || fail 29
486
487 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 30
488 ${ZFS_SH} -u || fail 31
489 rm -f ${TMP_CACHE} || fail 32
490
491 pass
492 }
493 run_test 7 "zvol+ext2 clone"
494
495 # Send/Receive sanity check
496 test_8() {
497 local POOL_NAME1=tank1
498 local POOL_NAME2=tank2
499 local ZVOL_NAME=fish
500 local SNAP_NAME=snap
501 local FULL_ZVOL_NAME1=${POOL_NAME1}/${ZVOL_NAME}
502 local FULL_ZVOL_NAME2=${POOL_NAME2}/${ZVOL_NAME}
503 local FULL_SNAP_NAME1=${POOL_NAME1}/${ZVOL_NAME}@${SNAP_NAME}
504 local FULL_SNAP_NAME2=${POOL_NAME2}/${ZVOL_NAME}@${SNAP_NAME}
505 local SRC_DIR=/bin/
506 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
507
508 # Create two pools and a volume
509 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
510 ${ZPOOL_CREATE_SH} -p ${POOL_NAME1} -c lo-raidz2 || fail 2
511 ${ZPOOL_CREATE_SH} -p ${POOL_NAME2} -c lo-raidz2 || fail 3
512 ${ZFS} create -V 300M ${FULL_ZVOL_NAME1} || fail 4
513
514 # Partition the volume, for a 300M volume there will be
515 # 609 cylinders, 16 heads, and 63 sectors per track.
516 zconfig_partition /dev/zvol/${FULL_ZVOL_NAME1} 0 609
517
518 # Format the partition with ext2.
519 /sbin/mkfs.ext2 -q /dev/zvol/${FULL_ZVOL_NAME1}-part1 || fail 5
520
521 # Mount the ext3 filesystem and copy some data to it.
522 mkdir -p /tmp/${FULL_ZVOL_NAME1}-part1 || fail 6
523 mount /dev/zvol/${FULL_ZVOL_NAME1}-part1 \
524 /tmp/${FULL_ZVOL_NAME1}-part1 || fail 7
525 cp -RL ${SRC_DIR} /tmp/${FULL_ZVOL_NAME1}-part1 || fail 8
526 sync || fail 9
527
528 # Snapshot the ext3 filesystem so it may be sent.
529 ${ZFS} snapshot ${FULL_SNAP_NAME1} || fail 11
530 wait_udev /dev/zvol/${FULL_SNAP_NAME1} 30 || fail 11
531
532 # Send/receive the snapshot from POOL_NAME1 to POOL_NAME2
533 (${ZFS} send ${FULL_SNAP_NAME1} | \
534 ${ZFS} receive ${FULL_ZVOL_NAME2}) || fail 12
535 wait_udev /dev/zvol/${FULL_ZVOL_NAME2}-part1 30 || fail 12
536
537 # Mount the sent ext3 filesystem.
538 mkdir -p /tmp/${FULL_ZVOL_NAME2}-part1 || fail 13
539 mount /dev/zvol/${FULL_ZVOL_NAME2}-part1 \
540 /tmp/${FULL_ZVOL_NAME2}-part1 || fail 14
541
542 # Verify the contents of the volumes match
543 diff -ur /tmp/${FULL_ZVOL_NAME1}-part1 /tmp/${FULL_ZVOL_NAME2}-part1 \
544 &>/dev/null || fail 15
545
546 # Umount, destroy the volume and pool.
547 umount /tmp/${FULL_ZVOL_NAME1}-part1 || fail 16
548 umount /tmp/${FULL_ZVOL_NAME2}-part1 || fail 17
549 rmdir /tmp/${FULL_ZVOL_NAME1}-part1 || fail 18
550 rmdir /tmp/${FULL_ZVOL_NAME2}-part1 || fail 19
551 rmdir /tmp/${POOL_NAME1} || fail 20
552 rmdir /tmp/${POOL_NAME2} || fail 21
553
554 ${ZFS} destroy ${FULL_SNAP_NAME1} || fail 22
555 ${ZFS} destroy ${FULL_SNAP_NAME2} || fail 23
556 ${ZFS} destroy ${FULL_ZVOL_NAME1} || fail 24
557 ${ZFS} destroy ${FULL_ZVOL_NAME2} || fail 25
558 ${ZPOOL_CREATE_SH} -p ${POOL_NAME1} -c lo-raidz2 -d || fail 26
559 ${ZPOOL_CREATE_SH} -p ${POOL_NAME2} -c lo-raidz2 -d || fail 27
560 ${ZFS_SH} -u || fail 28
561 rm -f ${TMP_CACHE} || fail 29
562
563 pass
564 }
565 run_test 8 "zfs send/receive"
566
567 # zpool event sanity check
568 test_9() {
569 local POOL_NAME=tank
570 local ZVOL_NAME=fish
571 local FULL_NAME=${POOL_NAME}/${ZVOL_NAME}
572 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
573 local TMP_EVENTS=`mktemp -p /tmp zpool.events.XXXXXXXX`
574
575 # Create a pool and volume.
576 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
577 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
578 ${ZFS} create -V 300M ${FULL_NAME} || fail 3
579
580 # Dump the events, there should be at least 5 lines.
581 ${ZPOOL} events >${TMP_EVENTS} || fail 4
582 EVENTS=`wc -l ${TMP_EVENTS} | cut -f1 -d' '`
583 [ $EVENTS -lt 5 ] && fail 5
584
585 # Clear the events and ensure there are none.
586 ${ZPOOL} events -c >/dev/null || fail 6
587 ${ZPOOL} events >${TMP_EVENTS} || fail 7
588 EVENTS=`wc -l ${TMP_EVENTS} | cut -f1 -d' '`
589 [ $EVENTS -gt 1 ] && fail 8
590
591 ${ZFS} destroy ${FULL_NAME} || fail 9
592 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 10
593 ${ZFS_SH} -u || fail 11
594 rm -f ${TMP_CACHE} || fail 12
595 rm -f ${TMP_EVENTS} || fail 13
596
597 pass
598 }
599 run_test 9 "zpool events"
600
601 zconfig_add_vdev() {
602 local POOL_NAME=$1
603 local TYPE=$2
604 local DEVICE=$3
605 local TMP_FILE1=`mktemp`
606 local TMP_FILE2=`mktemp`
607 local TMP_FILE3=`mktemp`
608
609 BASE_DEVICE=`basename ${DEVICE}`
610
611 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1}
612 ${ZPOOL} add -f ${POOL_NAME} ${TYPE} ${DEVICE} 2>/dev/null || return 1
613 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2}
614 diff ${TMP_FILE1} ${TMP_FILE2} > ${TMP_FILE3}
615
616 [ `wc -l ${TMP_FILE3}|${AWK} '{print $1}'` -eq 3 ] || return 1
617
618 PARENT_VDEV=`tail -2 ${TMP_FILE3} | head -1 | ${AWK} '{print $NF}'`
619 case $TYPE in
620 cache)
621 [ "${PARENT_VDEV}" = "${TYPE}" ] || return 1
622 ;;
623 log)
624 [ "${PARENT_VDEV}" = "logs" ] || return 1
625 ;;
626 esac
627
628 if ! tail -1 ${TMP_FILE3} |
629 egrep -q "^>[[:space:]]+${BASE_DEVICE}[[:space:]]+ONLINE" ; then
630 return 1
631 fi
632 rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_FILE3}
633
634 return 0
635 }
636
637 # zpool add and remove sanity check
638 test_10() {
639 local POOL_NAME=tank
640 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
641 local TMP_FILE1=`mktemp`
642 local TMP_FILE2=`mktemp`
643
644 if [ ${SCSI_DEBUG} -eq 0 ] || [ ${HAVE_LSSCSI} -eq 0 ] ; then
645 skip
646 return
647 fi
648
649 test `${LSMOD} | grep -c scsi_debug` -gt 0 && \
650 (${RMMOD} scsi_debug || exit 1)
651
652 /sbin/modprobe scsi_debug dev_size_mb=128 ||
653 die "Error $? creating scsi_debug device"
654 udev_trigger
655
656 SDDEVICE=`${LSSCSI}|${AWK} '/scsi_debug/ { print $6; exit }'`
657 BASE_SDDEVICE=`basename $SDDEVICE`
658
659 # Create a pool
660 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
661 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
662 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3
663
664 # Add and remove a cache vdev by full path
665 zconfig_add_vdev ${POOL_NAME} cache ${SDDEVICE} || fail 4
666 ${ZPOOL} remove ${POOL_NAME} ${SDDEVICE} || fail 5
667 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 6
668 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 7
669 sleep 1
670
671 # Add and remove a cache vdev by shorthand path
672 zconfig_add_vdev ${POOL_NAME} cache ${BASE_SDDEVICE} || fail 8
673 ${ZPOOL} remove ${POOL_NAME} ${BASE_SDDEVICE} || fail 9
674 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 10
675 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 11
676 sleep 1
677
678 # Add and remove a log vdev
679 zconfig_add_vdev ${POOL_NAME} log ${BASE_SDDEVICE} || fail 12
680 ${ZPOOL} remove ${POOL_NAME} ${BASE_SDDEVICE} || fail 13
681 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 14
682 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 15
683
684 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 16
685 ${ZFS_SH} -u || fail 17
686 ${RMMOD} scsi_debug || fail 18
687
688 rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_CACHE} || fail 19
689
690 pass
691 }
692 run_test 10 "zpool add/remove vdev"
693
694 exit 0