3 # ZFS/ZPOOL configuration test script.
5 basedir
="$(dirname $0)"
7 SCRIPT_COMMON
=common.sh
8 if [ -f "${basedir}/${SCRIPT_COMMON}" ]; then
9 .
"${basedir}/${SCRIPT_COMMON}"
11 echo "Missing helper script ${SCRIPT_COMMON}" && exit 1
22 ZFS/ZPOOL configuration tests
27 -c Cleanup lo+file devices at start
28 -t <#> Run listed tests
29 -s <#> Skip listed tests
34 while getopts 'hvct:s:?' OPTION
; do
59 if [ $
(id
-u) != 0 ]; then
60 die
"Must run as root"
63 # Perform pre-cleanup is requested
64 if [ ${CLEANUP} ]; then
68 rm -f /tmp
/zpool.cache.
*
71 # Check if we need to skip the tests that require scsi_debug and lsscsi.
73 ${INFOMOD} scsi_debug
&>/dev
/null
&& SCSI_DEBUG
=1
75 test -f ${LSSCSI} && HAVE_LSSCSI
=1
76 if [ ${SCSI_DEBUG} -eq 0 ] ||
[ ${HAVE_LSSCSI} -eq 0 ]; then
77 echo "Skipping test 10 which requires the scsi_debug " \
78 "module and the ${LSSCSI} utility"
85 local TMP_FILE
=`mktemp`
87 /sbin
/sfdisk
-q ${DEVICE} << EOF &>${TMP_FILE} || fail 4
98 # Validate persistent zpool.cache configuration.
100 local POOL_NAME
=test1
101 local TMP_FILE1
=`mktemp`
102 local TMP_FILE2
=`mktemp`
103 local TMP_CACHE
=`mktemp -p /tmp zpool.cache.XXXXXXXX`
105 # Create a pool save its status for comparison.
106 ${ZFS_SH} zfs
="spa_config_path=${TMP_CACHE}" || fail
1
107 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail
2
108 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail
3
110 # Unload/load the module stack and verify the pool persists.
111 ${ZFS_SH} -u || fail
4
112 ${ZFS_SH} zfs
="spa_config_path=${TMP_CACHE}" || fail
5
113 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail
6
114 cmp ${TMP_FILE1} ${TMP_FILE2} || fail
7
116 # Cleanup the test pool and temporary files
117 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2
-d || fail
8
118 rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_CACHE} || fail
9
119 ${ZFS_SH} -u || fail
10
123 run_test
1 "persistent zpool.cache"
125 # Validate ZFS disk scanning and import w/out zpool.cache configuration.
127 local POOL_NAME
=test2
128 local TMP_FILE1
=`mktemp`
129 local TMP_FILE2
=`mktemp`
130 local TMP_CACHE
=`mktemp -p /tmp zpool.cache.XXXXXXXX`
132 # Create a pool save its status for comparison.
133 ${ZFS_SH} zfs
="spa_config_path=${TMP_CACHE}" || fail
1
134 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail
2
135 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail
3
137 # Unload the module stack, remove the cache file, load the module
138 # stack and attempt to probe the disks to import the pool. As
139 # a cross check verify the old pool state against the imported.
140 ${ZFS_SH} -u || fail
4
141 rm -f ${TMP_CACHE} || fail
5
142 ${ZFS_SH} zfs
="spa_config_path=${TMP_CACHE}" || fail
6
143 ${ZPOOL} import |
grep ${POOL_NAME} >/dev
/null || fail
7
144 ${ZPOOL} import
-f ${POOL_NAME} || fail
8
145 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail
9
146 cmp ${TMP_FILE1} ${TMP_FILE2} || fail
10
148 # Cleanup the test pool and temporary files
149 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2
-d || fail
11
150 rm -f ${TMP_FILE1} ${TMP_FILE2} || fail
12
151 ${ZFS_SH} -u || fail
13
155 run_test
2 "scan disks for pools to import"
157 zconfig_zvol_device_stat
() {
159 local POOL_NAME
=/dev
/zvol
/$2
160 local ZVOL_NAME
=/dev
/zvol
/$3
161 local SNAP_NAME
=/dev
/zvol
/$4
162 local CLONE_NAME
=/dev
/zvol
/$5
165 # Briefly delay for udev
169 stat
${POOL_NAME} &>/dev
/null
&& let COUNT
=$COUNT+1
171 # Volume and partitions
172 stat
${ZVOL_NAME} &>/dev
/null
&& let COUNT
=$COUNT+1
173 stat
${ZVOL_NAME}-part1 &>/dev
/null
&& let COUNT
=$COUNT+1
174 stat
${ZVOL_NAME}-part2 &>/dev
/null
&& let COUNT
=$COUNT+1
176 # Snapshot with partitions
177 stat
${SNAP_NAME} &>/dev
/null
&& let COUNT
=$COUNT+1
178 stat
${SNAP_NAME}-part1 &>/dev
/null
&& let COUNT
=$COUNT+1
179 stat
${SNAP_NAME}-part2 &>/dev
/null
&& let COUNT
=$COUNT+1
181 # Clone with partitions
182 stat
${CLONE_NAME} &>/dev
/null
&& let COUNT
=$COUNT+1
183 stat
${CLONE_NAME}-part1 &>/dev
/null
&& let COUNT
=$COUNT+1
184 stat
${CLONE_NAME}-part2 &>/dev
/null
&& let COUNT
=$COUNT+1
186 if [ $EXPECT -ne $COUNT ]; then
193 # zpool import/export device check
194 # (1 volume, 2 partitions, 1 snapshot, 1 clone)
197 local ZVOL_NAME
=volume
199 local CLONE_NAME
=clone
200 local FULL_ZVOL_NAME
=${POOL_NAME}/${ZVOL_NAME}
201 local FULL_SNAP_NAME
=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
202 local FULL_CLONE_NAME
=${POOL_NAME}/${CLONE_NAME}
203 local TMP_CACHE
=`mktemp -p /tmp zpool.cache.XXXXXXXX`
205 # Create a pool, volume, partition, snapshot, and clone.
206 ${ZFS_SH} zfs
="spa_config_path=${TMP_CACHE}" || fail
1
207 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail
2
208 ${ZFS} create
-V 100M
${FULL_ZVOL_NAME} || fail
3
209 zconfig_partition
/dev
/zvol
/${FULL_ZVOL_NAME} 0 64 || fail
4
210 ${ZFS} snapshot
${FULL_SNAP_NAME} || fail
5
211 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail
6
213 # Verify the devices were created
214 zconfig_zvol_device_stat
10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
215 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail
7
218 ${ZPOOL} export ${POOL_NAME} || fail
8
220 # verify the devices were removed
221 zconfig_zvol_device_stat
0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
222 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail
9
224 # Import the pool, wait 1 second for udev
225 ${ZPOOL} import
${POOL_NAME} || fail
10
227 # Verify the devices were created
228 zconfig_zvol_device_stat
10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
229 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail
11
231 # Destroy the pool and consequently the devices
232 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2
-d || fail
12
234 # verify the devices were removed
235 zconfig_zvol_device_stat
0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
236 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail
13
238 ${ZFS_SH} -u || fail
14
239 rm -f ${TMP_CACHE} || fail
15
243 run_test
3 "zpool import/export device"
245 # zpool insmod/rmmod device check (1 volume, 1 snapshot, 1 clone)
251 FULL_ZVOL_NAME
=${POOL_NAME}/${ZVOL_NAME}
252 FULL_SNAP_NAME
=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
253 FULL_CLONE_NAME
=${POOL_NAME}/${CLONE_NAME}
254 TMP_CACHE
=`mktemp -p /tmp zpool.cache.XXXXXXXX`
256 # Create a pool, volume, snapshot, and clone
257 ${ZFS_SH} zfs
="spa_config_path=${TMP_CACHE}" || fail
1
258 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail
2
259 ${ZFS} create
-V 100M
${FULL_ZVOL_NAME} || fail
3
260 zconfig_partition
/dev
/zvol
/${FULL_ZVOL_NAME} 0 64 || fail
4
261 ${ZFS} snapshot
${FULL_SNAP_NAME} || fail
5
262 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail
6
264 # Verify the devices were created
265 zconfig_zvol_device_stat
10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
266 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail
7
269 ${ZFS_SH} -u || fail
8
271 # Verify the devices were removed
272 zconfig_zvol_device_stat
0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
273 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail
9
275 # Load the modules, wait 1 second for udev
276 ${ZFS_SH} zfs
="spa_config_path=${TMP_CACHE}" || fail
10
278 # Verify the devices were created
279 zconfig_zvol_device_stat
10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
280 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail
11
282 # Destroy the pool and consequently the devices
283 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2
-d || fail
12
285 # Verify the devices were removed
286 zconfig_zvol_device_stat
0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
287 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail
13
289 ${ZFS_SH} -u || fail
14
290 rm -f ${TMP_CACHE} || fail
15
294 run_test
4 "zpool insmod/rmmod device"
296 # ZVOL volume sanity check
300 local FULL_NAME
=${POOL_NAME}/${ZVOL_NAME}
302 local TMP_CACHE
=`mktemp -p /tmp zpool.cache.XXXXXXXX`
304 # Create a pool and volume.
305 ${ZFS_SH} zfs
="spa_config_path=${TMP_CACHE}" || fail
1
306 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raid0 || fail
2
307 ${ZFS} create
-V 800M
${FULL_NAME} || fail
3
309 # Partition the volume, for a 800M volume there will be
310 # 1624 cylinders, 16 heads, and 63 sectors per track.
311 zconfig_partition
/dev
/zvol
/${FULL_NAME} 0 1624
313 # Format the partition with ext3.
314 /sbin
/mkfs.ext3
-q /dev
/zvol
/${FULL_NAME}-part1 || fail
5
316 # Mount the ext3 filesystem and copy some data to it.
317 mkdir
-p /tmp
/${ZVOL_NAME}-part1 || fail
6
318 mount
/dev
/zvol
/${FULL_NAME}-part1 /tmp
/${ZVOL_NAME}-part1 || fail
7
319 cp -RL ${SRC_DIR} /tmp
/${ZVOL_NAME}-part1 || fail
8
322 # Verify the copied files match the original files.
323 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}-part1${SRC_DIR} \
324 &>/dev
/null || fail
9
326 # Remove the files, umount, destroy the volume and pool.
327 rm -Rf /tmp
/${ZVOL_NAME}-part1${SRC_DIR}* || fail
10
328 umount
/tmp
/${ZVOL_NAME}-part1 || fail
11
329 rmdir /tmp
/${ZVOL_NAME}-part1 || fail
12
331 ${ZFS} destroy
${FULL_NAME} || fail
13
332 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2
-d || fail
14
333 ${ZFS_SH} -u || fail
15
334 rm -f ${TMP_CACHE} || fail
16
338 run_test
5 "zvol+ext3 volume"
340 # ZVOL snapshot sanity check
344 local SNAP_NAME
=pristine
345 local FULL_ZVOL_NAME
=${POOL_NAME}/${ZVOL_NAME}
346 local FULL_SNAP_NAME
=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
348 local TMP_CACHE
=`mktemp -p /tmp zpool.cache.XXXXXXXX`
350 # Create a pool and volume.
351 ${ZFS_SH} zfs
="spa_config_path=${TMP_CACHE}" || fail
1
352 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raid0 || fail
2
353 ${ZFS} create
-V 800M
${FULL_ZVOL_NAME} || fail
3
355 # Partition the volume, for a 800M volume there will be
356 # 1624 cylinders, 16 heads, and 63 sectors per track.
357 zconfig_partition
/dev
/zvol
/${FULL_ZVOL_NAME} 0 1624
359 # Format the partition with ext2 (no journal).
360 /sbin
/mkfs.ext2
-q /dev
/zvol
/${FULL_ZVOL_NAME}-part1 || fail
5
362 # Mount the ext3 filesystem and copy some data to it.
363 mkdir
-p /tmp
/${ZVOL_NAME}-part1 || fail
6
364 mount
/dev
/zvol
/${FULL_ZVOL_NAME}-part1 /tmp
/${ZVOL_NAME}-part1 \
367 # Snapshot the pristine ext2 filesystem and mount it read-only.
368 ${ZFS} snapshot
${FULL_SNAP_NAME} || fail
8
369 wait_udev
/dev
/zvol
/${FULL_SNAP_NAME}-part1 30 || fail
8
370 mkdir
-p /tmp
/${SNAP_NAME}-part1 || fail
9
371 mount
/dev
/zvol
/${FULL_SNAP_NAME}-part1 /tmp
/${SNAP_NAME}-part1 \
372 &>/dev
/null || fail
10
374 # Copy to original volume
375 cp -RL ${SRC_DIR} /tmp
/${ZVOL_NAME}-part1 || fail
11
378 # Verify the copied files match the original files,
379 # and the copied files do NOT appear in the snapshot.
380 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}-part1${SRC_DIR} \
381 &>/dev
/null || fail
12
382 diff -ur ${SRC_DIR} /tmp/${SNAP_NAME}-part1${SRC_DIR} \
383 &>/dev
/null
&& fail
13
385 # umount, destroy the snapshot, volume, and pool.
386 umount
/tmp
/${SNAP_NAME}-part1 || fail
14
387 rmdir /tmp
/${SNAP_NAME}-part1 || fail
15
388 ${ZFS} destroy
${FULL_SNAP_NAME} || fail
16
390 umount
/tmp
/${ZVOL_NAME}-part1 || fail
17
391 rmdir /tmp
/${ZVOL_NAME}-part1 || fail
18
392 ${ZFS} destroy
${FULL_ZVOL_NAME} || fail
19
394 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2
-d || fail
20
395 ${ZFS_SH} -u || fail
21
396 rm -f ${TMP_CACHE} || fail
22
400 run_test
6 "zvol+ext2 snapshot"
402 # ZVOL clone sanity check
406 local SNAP_NAME
=pristine
407 local CLONE_NAME
=clone
408 local FULL_ZVOL_NAME
=${POOL_NAME}/${ZVOL_NAME}
409 local FULL_SNAP_NAME
=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
410 local FULL_CLONE_NAME
=${POOL_NAME}/${CLONE_NAME}
412 local TMP_CACHE
=`mktemp -p /tmp zpool.cache.XXXXXXXX`
414 # Create a pool and volume.
415 ${ZFS_SH} zfs
="spa_config_path=${TMP_CACHE}" || fail
1
416 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail
2
417 ${ZFS} create
-V 300M
${FULL_ZVOL_NAME} || fail
3
419 # Partition the volume, for a 300M volume there will be
420 # 609 cylinders, 16 heads, and 63 sectors per track.
421 zconfig_partition
/dev
/zvol
/${FULL_ZVOL_NAME} 0 609
423 # Format the partition with ext2 (no journal).
424 /sbin
/mkfs.ext2
-q /dev
/zvol
/${FULL_ZVOL_NAME}-part1 || fail
5
426 # Mount the ext3 filesystem and copy some data to it.
427 mkdir
-p /tmp
/${ZVOL_NAME}-part1 || fail
6
428 mount
/dev
/zvol
/${FULL_ZVOL_NAME}-part1 /tmp
/${ZVOL_NAME}-part1 \
431 # Snapshot the pristine ext2 filesystem and mount it read-only.
432 ${ZFS} snapshot
${FULL_SNAP_NAME} || fail
8
433 wait_udev
/dev
/zvol
/${FULL_SNAP_NAME}-part1 30 || fail
8
434 mkdir
-p /tmp
/${SNAP_NAME}-part1 || fail
9
435 mount
/dev
/zvol
/${FULL_SNAP_NAME}-part1 \
436 /tmp
/${SNAP_NAME}-part1 &>/dev
/null || fail
10
438 # Copy to original volume.
439 cp -RL ${SRC_DIR} /tmp
/${ZVOL_NAME}-part1 || fail
11
442 # Verify the copied files match the original files,
443 # and the copied files do NOT appear in the snapshot.
444 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}-part1${SRC_DIR} \
445 &>/dev
/null || fail
12
446 diff -ur ${SRC_DIR} /tmp/${SNAP_NAME}-part1${SRC_DIR} \
447 &>/dev
/null
&& fail
13
449 # Clone from the original pristine snapshot
450 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail
14
451 wait_udev
/dev
/zvol
/${FULL_CLONE_NAME}-part1 30 || fail
14
452 mkdir
-p /tmp
/${CLONE_NAME}-part1 || fail
15
453 mount
/dev
/zvol
/${FULL_CLONE_NAME}-part1 \
454 /tmp
/${CLONE_NAME}-part1 || fail
16
456 # Verify the clone matches the pristine snapshot,
457 # and the files copied to the original volume are NOT there.
458 diff -ur /tmp
/${SNAP_NAME}-part1 /tmp
/${CLONE_NAME}-part1 \
459 &>/dev
/null || fail
17
460 diff -ur /tmp
/${ZVOL_NAME}-part1 /tmp
/${CLONE_NAME}-part1 \
461 &>/dev
/null
&& fail
18
463 # Copy to cloned volume.
464 cp -RL ${SRC_DIR} /tmp
/${CLONE_NAME}-part1 || fail
19
467 # Verify the clone matches the modified original volume.
468 diff -ur /tmp
/${ZVOL_NAME}-part1 /tmp
/${CLONE_NAME}-part1 \
469 &>/dev
/null || fail
20
471 # umount, destroy the snapshot, volume, and pool.
472 umount
/tmp
/${CLONE_NAME}-part1 || fail
21
473 rmdir /tmp
/${CLONE_NAME}-part1 || fail
22
474 ${ZFS} destroy
${FULL_CLONE_NAME} || fail
23
476 umount
/tmp
/${SNAP_NAME}-part1 || fail
24
477 rmdir /tmp
/${SNAP_NAME}-part1 || fail
25
478 ${ZFS} destroy
${FULL_SNAP_NAME} || fail
26
480 umount
/tmp
/${ZVOL_NAME}-part1 || fail
27
481 rmdir /tmp
/${ZVOL_NAME}-part1 || fail
28
482 ${ZFS} destroy
${FULL_ZVOL_NAME} || fail
29
484 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2
-d || fail
30
485 ${ZFS_SH} -u || fail
31
486 rm -f ${TMP_CACHE} || fail
32
490 run_test
7 "zvol+ext2 clone"
492 # Send/Receive sanity check
494 local POOL_NAME1
=tank1
495 local POOL_NAME2
=tank2
498 local FULL_ZVOL_NAME1
=${POOL_NAME1}/${ZVOL_NAME}
499 local FULL_ZVOL_NAME2
=${POOL_NAME2}/${ZVOL_NAME}
500 local FULL_SNAP_NAME1
=${POOL_NAME1}/${ZVOL_NAME}@${SNAP_NAME}
501 local FULL_SNAP_NAME2
=${POOL_NAME2}/${ZVOL_NAME}@${SNAP_NAME}
503 local TMP_CACHE
=`mktemp -p /tmp zpool.cache.XXXXXXXX`
505 # Create two pools and a volume
506 ${ZFS_SH} zfs
="spa_config_path=${TMP_CACHE}" || fail
1
507 ${ZPOOL_CREATE_SH} -p ${POOL_NAME1} -c lo-raidz2 || fail
2
508 ${ZPOOL_CREATE_SH} -p ${POOL_NAME2} -c lo-raidz2 || fail
3
509 ${ZFS} create
-V 300M
${FULL_ZVOL_NAME1} || fail
4
511 # Partition the volume, for a 300M volume there will be
512 # 609 cylinders, 16 heads, and 63 sectors per track.
513 zconfig_partition
/dev
/zvol
/${FULL_ZVOL_NAME1} 0 609
515 # Format the partition with ext2.
516 /sbin
/mkfs.ext2
-q /dev
/zvol
/${FULL_ZVOL_NAME1}-part1 || fail
5
518 # Mount the ext3 filesystem and copy some data to it.
519 mkdir
-p /tmp
/${FULL_ZVOL_NAME1}-part1 || fail
6
520 mount
/dev
/zvol
/${FULL_ZVOL_NAME1}-part1 \
521 /tmp
/${FULL_ZVOL_NAME1}-part1 || fail
7
522 cp -RL ${SRC_DIR} /tmp
/${FULL_ZVOL_NAME1}-part1 || fail
8
525 # Snapshot the ext3 filesystem so it may be sent.
526 ${ZFS} snapshot
${FULL_SNAP_NAME1} || fail
11
527 wait_udev
/dev
/zvol
/${FULL_SNAP_NAME1} 30 || fail
11
529 # Send/receive the snapshot from POOL_NAME1 to POOL_NAME2
530 (${ZFS} send
${FULL_SNAP_NAME1} | \
531 ${ZFS} receive
${FULL_ZVOL_NAME2}) || fail
12
532 wait_udev
/dev
/zvol
/${FULL_ZVOL_NAME2}-part1 30 || fail
12
534 # Mount the sent ext3 filesystem.
535 mkdir
-p /tmp
/${FULL_ZVOL_NAME2}-part1 || fail
13
536 mount
/dev
/zvol
/${FULL_ZVOL_NAME2}-part1 \
537 /tmp
/${FULL_ZVOL_NAME2}-part1 || fail
14
539 # Verify the contents of the volumes match
540 diff -ur /tmp
/${FULL_ZVOL_NAME1}-part1 /tmp
/${FULL_ZVOL_NAME2}-part1 \
541 &>/dev
/null || fail
15
543 # Umount, destroy the volume and pool.
544 umount
/tmp
/${FULL_ZVOL_NAME1}-part1 || fail
16
545 umount
/tmp
/${FULL_ZVOL_NAME2}-part1 || fail
17
546 rmdir /tmp
/${FULL_ZVOL_NAME1}-part1 || fail
18
547 rmdir /tmp
/${FULL_ZVOL_NAME2}-part1 || fail
19
548 rmdir /tmp
/${POOL_NAME1} || fail
20
549 rmdir /tmp
/${POOL_NAME2} || fail
21
551 ${ZFS} destroy
${FULL_SNAP_NAME1} || fail
22
552 ${ZFS} destroy
${FULL_SNAP_NAME2} || fail
23
553 ${ZFS} destroy
${FULL_ZVOL_NAME1} || fail
24
554 ${ZFS} destroy
${FULL_ZVOL_NAME2} || fail
25
555 ${ZPOOL_CREATE_SH} -p ${POOL_NAME1} -c lo-raidz2
-d || fail
26
556 ${ZPOOL_CREATE_SH} -p ${POOL_NAME2} -c lo-raidz2
-d || fail
27
557 ${ZFS_SH} -u || fail
28
558 rm -f ${TMP_CACHE} || fail
29
562 run_test
8 "zfs send/receive"
564 # zpool event sanity check
568 local FULL_NAME
=${POOL_NAME}/${ZVOL_NAME}
569 local TMP_CACHE
=`mktemp -p /tmp zpool.cache.XXXXXXXX`
570 local TMP_EVENTS
=`mktemp -p /tmp zpool.events.XXXXXXXX`
572 # Create a pool and volume.
573 ${ZFS_SH} zfs
="spa_config_path=${TMP_CACHE}" || fail
1
574 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail
2
575 ${ZFS} create
-V 300M
${FULL_NAME} || fail
3
577 # Dump the events, there should be at least 5 lines.
578 ${ZPOOL} events
>${TMP_EVENTS} || fail
4
579 EVENTS
=`wc -l ${TMP_EVENTS} | cut -f1 -d' '`
580 [ $EVENTS -lt 5 ] && fail
5
582 # Clear the events and ensure there are none.
583 ${ZPOOL} events
-c >/dev
/null || fail
6
584 ${ZPOOL} events
>${TMP_EVENTS} || fail
7
585 EVENTS
=`wc -l ${TMP_EVENTS} | cut -f1 -d' '`
586 [ $EVENTS -gt 1 ] && fail
8
588 ${ZFS} destroy
${FULL_NAME} || fail
9
589 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2
-d || fail
10
590 ${ZFS_SH} -u || fail
11
591 rm -f ${TMP_CACHE} || fail
12
592 rm -f ${TMP_EVENTS} || fail
13
596 run_test
9 "zpool events"
602 local TMP_FILE1
=`mktemp`
603 local TMP_FILE2
=`mktemp`
604 local TMP_FILE3
=`mktemp`
606 BASE_DEVICE
=`basename ${DEVICE}`
608 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1}
609 ${ZPOOL} add -f ${POOL_NAME} ${TYPE} ${DEVICE} 2>/dev
/null ||
return 1
610 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2}
611 diff ${TMP_FILE1} ${TMP_FILE2} > ${TMP_FILE3}
613 [ `wc -l ${TMP_FILE3}|${AWK} '{print $1}'` -eq 3 ] ||
return 1
615 PARENT_VDEV
=`tail -2 ${TMP_FILE3} | head -1 | ${AWK} '{print $NF}'`
618 [ "${PARENT_VDEV}" = "${TYPE}" ] ||
return 1
621 [ "${PARENT_VDEV}" = "logs" ] ||
return 1
625 if ! tail -1 ${TMP_FILE3} |
626 egrep -q "^>[[:space:]]+${BASE_DEVICE}[[:space:]]+ONLINE" ; then
629 rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_FILE3}
634 # zpool add and remove sanity check
637 local TMP_CACHE
=`mktemp -p /tmp zpool.cache.XXXXXXXX`
638 local TMP_FILE1
=`mktemp`
639 local TMP_FILE2
=`mktemp`
641 if [ ${SCSI_DEBUG} -eq 0 ] ||
[ ${HAVE_LSSCSI} -eq 0 ] ; then
646 test `${LSMOD} | grep -c scsi_debug` -gt 0 && \
647 (${RMMOD} scsi_debug ||
exit 1)
649 /sbin
/modprobe scsi_debug dev_size_mb
=128 ||
650 die
"Error $? creating scsi_debug device"
653 SDDEVICE
=`${LSSCSI}|${AWK} '/scsi_debug/ { print $6; exit }'`
654 BASE_SDDEVICE
=`basename $SDDEVICE`
657 ${ZFS_SH} zfs
="spa_config_path=${TMP_CACHE}" || fail
1
658 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail
2
659 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail
3
661 # Add and remove a cache vdev by full path
662 zconfig_add_vdev
${POOL_NAME} cache
${SDDEVICE} || fail
4
663 ${ZPOOL} remove ${POOL_NAME} ${SDDEVICE} || fail
5
664 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail
6
665 cmp ${TMP_FILE1} ${TMP_FILE2} || fail
7
668 # Add and remove a cache vdev by shorthand path
669 zconfig_add_vdev
${POOL_NAME} cache
${BASE_SDDEVICE} || fail
8
670 ${ZPOOL} remove ${POOL_NAME} ${BASE_SDDEVICE} || fail
9
671 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail
10
672 cmp ${TMP_FILE1} ${TMP_FILE2} || fail
11
675 # Add and remove a log vdev
676 zconfig_add_vdev
${POOL_NAME} log
${BASE_SDDEVICE} || fail
12
677 ${ZPOOL} remove ${POOL_NAME} ${BASE_SDDEVICE} || fail
13
678 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail
14
679 cmp ${TMP_FILE1} ${TMP_FILE2} || fail
15
681 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2
-d || fail
16
682 ${ZFS_SH} -u || fail
17
683 ${RMMOD} scsi_debug || fail
18
685 rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_CACHE} || fail
19
689 run_test
10 "zpool add/remove vdev"