]>
git.proxmox.com Git - mirror_zfs-debian.git/blob - scripts/zconfig.sh
3 # ZFS/ZPOOL configuration test script.
5 basedir
="$(dirname $0)"
7 SCRIPT_COMMON
=common.sh
8 if [ -f "${basedir}/${SCRIPT_COMMON}" ]; then
9 .
"${basedir}/${SCRIPT_COMMON}"
11 echo "Missing helper script ${SCRIPT_COMMON}" && exit 1
22 ZFS/ZPOOL configuration tests
27 -c Cleanup lo+file devices at start
32 while getopts 'hvct:s:?' OPTION
; do
57 if [ $
(id
-u) != 0 ]; then
58 die
"Must run as root"
61 # Perform pre-cleanup is requested
62 if [ ${CLEANUP} ]; then
64 rm -f /tmp
/zpool.cache.
*
71 local TMP_FILE
=`mktemp`
73 /sbin
/sfdisk
-q ${DEVICE} << EOF &>${TMP_FILE} || fail 4
83 # Validate persistent zpool.cache configuration.
86 local TMP_FILE1
=`mktemp`
87 local TMP_FILE2
=`mktemp`
88 local TMP_CACHE
=`mktemp -p /tmp zpool.cache.XXXXXXXX`
90 # Create a pool save its status for comparison.
91 ${ZFS_SH} zfs
="spa_config_path=${TMP_CACHE}" || fail
1
92 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail
2
93 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail
3
95 # Unload/load the module stack and verify the pool persists.
96 ${ZFS_SH} -u || fail
4
97 ${ZFS_SH} zfs
="spa_config_path=${TMP_CACHE}" || fail
5
98 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail
6
99 cmp ${TMP_FILE1} ${TMP_FILE2} || fail
7
101 # Cleanup the test pool and temporary files
102 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2
-d || fail
8
103 rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_CACHE} || fail
9
104 ${ZFS_SH} -u || fail
10
108 run_test
1 "persistent zpool.cache"
110 # Validate ZFS disk scanning and import w/out zpool.cache configuration.
112 local POOL_NAME
=test2
113 local TMP_FILE1
=`mktemp`
114 local TMP_FILE2
=`mktemp`
115 local TMP_CACHE
=`mktemp -p /tmp zpool.cache.XXXXXXXX`
117 # Create a pool save its status for comparison.
118 ${ZFS_SH} zfs
="spa_config_path=${TMP_CACHE}" || fail
1
119 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail
2
120 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail
3
122 # Unload the module stack, remove the cache file, load the module
123 # stack and attempt to probe the disks to import the pool. As
124 # a cross check verify the old pool state against the imported.
125 ${ZFS_SH} -u || fail
4
126 rm -f ${TMP_CACHE} || fail
5
127 ${ZFS_SH} zfs
="spa_config_path=${TMP_CACHE}" || fail
6
128 ${ZPOOL} import |
grep ${POOL_NAME} >/dev
/null || fail
7
129 ${ZPOOL} import
${POOL_NAME} || fail
8
130 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail
9
131 cmp ${TMP_FILE1} ${TMP_FILE2} || fail
10
133 # Cleanup the test pool and temporary files
134 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2
-d || fail
11
135 rm -f ${TMP_FILE1} ${TMP_FILE2} || fail
12
136 ${ZFS_SH} -u || fail
13
140 run_test
2 "scan disks for pools to import"
142 zconfig_zvol_device_stat
() {
144 local POOL_NAME
=/dev
/$2
145 local ZVOL_NAME
=/dev
/$3
146 local SNAP_NAME
=/dev
/$4
147 local CLONE_NAME
=/dev
/$5
150 # Briefly delay for udev
154 stat
${POOL_NAME} &>/dev
/null
&& let COUNT
=$COUNT+1
156 # Volume and partitions
157 stat
${ZVOL_NAME} &>/dev
/null
&& let COUNT
=$COUNT+1
158 stat
${ZVOL_NAME}1 &>/dev
/null
&& let COUNT
=$COUNT+1
159 stat
${ZVOL_NAME}2 &>/dev
/null
&& let COUNT
=$COUNT+1
161 # Snapshot with partitions
162 stat
${SNAP_NAME} &>/dev
/null
&& let COUNT
=$COUNT+1
163 stat
${SNAP_NAME}1 &>/dev
/null
&& let COUNT
=$COUNT+1
164 stat
${SNAP_NAME}2 &>/dev
/null
&& let COUNT
=$COUNT+1
166 # Clone with partitions
167 stat
${CLONE_NAME} &>/dev
/null
&& let COUNT
=$COUNT+1
168 stat
${CLONE_NAME}1 &>/dev
/null
&& let COUNT
=$COUNT+1
169 stat
${CLONE_NAME}2 &>/dev
/null
&& let COUNT
=$COUNT+1
171 if [ $EXPECT -ne $COUNT ]; then
178 # zpool import/export device check
179 # (1 volume, 2 partitions, 1 snapshot, 1 clone)
182 local ZVOL_NAME
=volume
184 local CLONE_NAME
=clone
185 local FULL_ZVOL_NAME
=${POOL_NAME}/${ZVOL_NAME}
186 local FULL_SNAP_NAME
=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
187 local FULL_CLONE_NAME
=${POOL_NAME}/${CLONE_NAME}
188 local TMP_CACHE
=`mktemp -p /tmp zpool.cache.XXXXXXXX`
190 # Create a pool, volume, partition, snapshot, and clone.
191 ${ZFS_SH} zfs
="spa_config_path=${TMP_CACHE}" || fail
1
192 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail
2
193 ${ZFS} create
-V 100M
${FULL_ZVOL_NAME} || fail
3
194 zconfig_partition
/dev
/${FULL_ZVOL_NAME} 0 64 || fail
4
195 ${ZFS} snapshot
${FULL_SNAP_NAME} || fail
5
196 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail
6
198 # Verify the devices were created
199 zconfig_zvol_device_stat
10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
200 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail
7
203 ${ZPOOL} export ${POOL_NAME} || fail
8
205 # verify the devices were removed
206 zconfig_zvol_device_stat
0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
207 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail
9
209 # Import the pool, wait 1 second for udev
210 ${ZPOOL} import
${POOL_NAME} || fail
10
212 # Verify the devices were created
213 zconfig_zvol_device_stat
10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
214 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail
11
216 # Destroy the pool and consequently the devices
217 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2
-d || fail
12
219 # verify the devices were removed
220 zconfig_zvol_device_stat
0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
221 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail
13
223 ${ZFS_SH} -u || fail
14
224 rm -f ${TMP_CACHE} || fail
15
228 run_test
3 "zpool import/export device"
230 # zpool insmod/rmmod device check (1 volume, 1 snapshot, 1 clone)
236 FULL_ZVOL_NAME
=${POOL_NAME}/${ZVOL_NAME}
237 FULL_SNAP_NAME
=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
238 FULL_CLONE_NAME
=${POOL_NAME}/${CLONE_NAME}
239 TMP_CACHE
=`mktemp -p /tmp zpool.cache.XXXXXXXX`
241 # Create a pool, volume, snapshot, and clone
242 ${ZFS_SH} zfs
="spa_config_path=${TMP_CACHE}" || fail
1
243 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail
2
244 ${ZFS} create
-V 100M
${FULL_ZVOL_NAME} || fail
3
245 zconfig_partition
/dev
/${FULL_ZVOL_NAME} 0 64 || fail
4
246 ${ZFS} snapshot
${FULL_SNAP_NAME} || fail
5
247 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail
6
249 # Verify the devices were created
250 zconfig_zvol_device_stat
10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
251 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail
7
254 ${ZFS_SH} -u || fail
8
256 # Verify the devices were removed
257 zconfig_zvol_device_stat
0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
258 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail
9
260 # Load the modules, wait 1 second for udev
261 ${ZFS_SH} zfs
="spa_config_path=${TMP_CACHE}" || fail
10
263 # Verify the devices were created
264 zconfig_zvol_device_stat
10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
265 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail
11
267 # Destroy the pool and consequently the devices
268 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2
-d || fail
12
270 # Verify the devices were removed
271 zconfig_zvol_device_stat
0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
272 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail
13
274 ${ZFS_SH} -u || fail
14
275 rm -f ${TMP_CACHE} || fail
15
279 run_test
4 "zpool insmod/rmmod device"
281 # ZVOL volume sanity check
285 local FULL_NAME
=${POOL_NAME}/${ZVOL_NAME}
287 local TMP_CACHE
=`mktemp -p /tmp zpool.cache.XXXXXXXX`
289 # Create a pool and volume.
290 ${ZFS_SH} zfs
="spa_config_path=${TMP_CACHE}" || fail
1
291 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail
2
292 ${ZFS} create
-V 400M
${FULL_NAME} || fail
3
294 # Partition the volume, for a 400M volume there will be
295 # 812 cylinders, 16 heads, and 63 sectors per track.
296 zconfig_partition
/dev
/${FULL_NAME} 0 812
298 # Format the partition with ext3.
299 /sbin
/mkfs.ext3
-q /dev
/${FULL_NAME}1 || fail
5
301 # Mount the ext3 filesystem and copy some data to it.
302 mkdir
-p /tmp
/${ZVOL_NAME}1 || fail
6
303 mount
/dev
/${FULL_NAME}1 /tmp
/${ZVOL_NAME}1 || fail
7
304 cp -RL ${SRC_DIR} /tmp
/${ZVOL_NAME}1 || fail
8
307 # Verify the copied files match the original files.
308 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}1${SRC_DIR} &>/dev
/null || fail
9
310 # Remove the files, umount, destroy the volume and pool.
311 rm -Rf /tmp
/${ZVOL_NAME}1${SRC_DIR}* || fail
10
312 umount
/tmp
/${ZVOL_NAME}1 || fail
11
313 rmdir /tmp
/${ZVOL_NAME}1 || fail
12
315 ${ZFS} destroy
${FULL_NAME} || fail
13
316 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2
-d || fail
14
317 ${ZFS_SH} -u || fail
15
318 rm -f ${TMP_CACHE} || fail
16
322 run_test
5 "zvol+ext3 volume"
324 # ZVOL snapshot sanity check
328 local SNAP_NAME
=pristine
329 local FULL_ZVOL_NAME
=${POOL_NAME}/${ZVOL_NAME}
330 local FULL_SNAP_NAME
=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
332 local TMP_CACHE
=`mktemp -p /tmp zpool.cache.XXXXXXXX`
334 # Create a pool and volume.
335 ${ZFS_SH} zfs
="spa_config_path=${TMP_CACHE}" || fail
1
336 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail
2
337 ${ZFS} create
-V 400M
${FULL_ZVOL_NAME} || fail
3
339 # Partition the volume, for a 400M volume there will be
340 # 812 cylinders, 16 heads, and 63 sectors per track.
341 zconfig_partition
/dev
/${FULL_ZVOL_NAME} 0 812
343 # Format the partition with ext2 (no journal).
344 /sbin
/mkfs.ext2
-q /dev
/${FULL_ZVOL_NAME}1 || fail
5
346 # Mount the ext3 filesystem and copy some data to it.
347 mkdir
-p /tmp
/${ZVOL_NAME}1 || fail
6
348 mount
/dev
/${FULL_ZVOL_NAME}1 /tmp
/${ZVOL_NAME}1 || fail
7
350 # Snapshot the pristine ext2 filesystem and mount it read-only.
351 ${ZFS} snapshot
${FULL_SNAP_NAME} && sleep 1 || fail
8
352 mkdir
-p /tmp
/${SNAP_NAME}1 || fail
9
353 mount
/dev
/${FULL_SNAP_NAME}1 /tmp
/${SNAP_NAME}1 &>/dev
/null || fail
10
355 # Copy to original volume
356 cp -RL ${SRC_DIR} /tmp
/${ZVOL_NAME}1 || fail
11
359 # Verify the copied files match the original files,
360 # and the copied files do NOT appear in the snapshot.
361 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}1${SRC_DIR} &>/dev
/null || fail
12
362 diff -ur ${SRC_DIR} /tmp/${SNAP_NAME}1${SRC_DIR} &>/dev
/null
&& fail
13
364 # umount, destroy the snapshot, volume, and pool.
365 umount
/tmp
/${SNAP_NAME}1 || fail
14
366 rmdir /tmp
/${SNAP_NAME}1 || fail
15
367 ${ZFS} destroy
${FULL_SNAP_NAME} || fail
16
369 umount
/tmp
/${ZVOL_NAME}1 || fail
17
370 rmdir /tmp
/${ZVOL_NAME}1 || fail
18
371 ${ZFS} destroy
${FULL_ZVOL_NAME} || fail
19
373 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2
-d || fail
20
374 ${ZFS_SH} -u || fail
21
375 rm -f ${TMP_CACHE} || fail
22
379 run_test
6 "zvol+ext2 snapshot"
381 # ZVOL clone sanity check
385 local SNAP_NAME
=pristine
386 local CLONE_NAME
=clone
387 local FULL_ZVOL_NAME
=${POOL_NAME}/${ZVOL_NAME}
388 local FULL_SNAP_NAME
=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
389 local FULL_CLONE_NAME
=${POOL_NAME}/${CLONE_NAME}
391 local TMP_CACHE
=`mktemp -p /tmp zpool.cache.XXXXXXXX`
393 # Create a pool and volume.
394 ${ZFS_SH} zfs
="spa_config_path=${TMP_CACHE}" || fail
1
395 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail
2
396 ${ZFS} create
-V 400M
${FULL_ZVOL_NAME} || fail
3
398 # Partition the volume, for a 400M volume there will be
399 # 812 cylinders, 16 heads, and 63 sectors per track.
400 zconfig_partition
/dev
/${FULL_ZVOL_NAME} 0 812
402 # Format the partition with ext2 (no journal).
403 /sbin
/mkfs.ext2
-q /dev
/${FULL_ZVOL_NAME}1 || fail
5
405 # Mount the ext3 filesystem and copy some data to it.
406 mkdir
-p /tmp
/${ZVOL_NAME}1 || fail
6
407 mount
/dev
/${FULL_ZVOL_NAME}1 /tmp
/${ZVOL_NAME}1 || fail
7
409 # Snapshot the pristine ext2 filesystem and mount it read-only.
410 ${ZFS} snapshot
${FULL_SNAP_NAME} && sleep 1 || fail
8
411 mkdir
-p /tmp
/${SNAP_NAME}1 || fail
9
412 mount
/dev
/${FULL_SNAP_NAME}1 /tmp
/${SNAP_NAME}1 &>/dev
/null || fail
10
414 # Copy to original volume.
415 cp -RL ${SRC_DIR} /tmp
/${ZVOL_NAME}1 || fail
11
418 # Verify the copied files match the original files,
419 # and the copied files do NOT appear in the snapshot.
420 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}1${SRC_DIR} &>/dev
/null || fail
12
421 diff -ur ${SRC_DIR} /tmp/${SNAP_NAME}1${SRC_DIR} &>/dev
/null
&& fail
13
423 # Clone from the original pristine snapshot
424 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} && sleep 1 || fail
14
425 mkdir
-p /tmp
/${CLONE_NAME}1 || fail
15
426 mount
/dev
/${FULL_CLONE_NAME}1 /tmp
/${CLONE_NAME}1 || fail
16
428 # Verify the clone matches the pristine snapshot,
429 # and the files copied to the original volume are NOT there.
430 diff -ur /tmp
/${SNAP_NAME}1 /tmp
/${CLONE_NAME}1 &>/dev
/null || fail
17
431 diff -ur /tmp
/${ZVOL_NAME}1 /tmp
/${CLONE_NAME}1 &>/dev
/null
&& fail
18
433 # Copy to cloned volume.
434 cp -RL ${SRC_DIR} /tmp
/${CLONE_NAME}1 || fail
19
437 # Verify the clone matches the modified original volume.
438 diff -ur /tmp
/${ZVOL_NAME}1 /tmp
/${CLONE_NAME}1 &>/dev
/null || fail
20
440 # umount, destroy the snapshot, volume, and pool.
441 umount
/tmp
/${CLONE_NAME}1 || fail
21
442 rmdir /tmp
/${CLONE_NAME}1 || fail
22
443 ${ZFS} destroy
${FULL_CLONE_NAME} || fail
23
445 umount
/tmp
/${SNAP_NAME}1 || fail
24
446 rmdir /tmp
/${SNAP_NAME}1 || fail
25
447 ${ZFS} destroy
${FULL_SNAP_NAME} || fail
26
449 umount
/tmp
/${ZVOL_NAME}1 || fail
27
450 rmdir /tmp
/${ZVOL_NAME}1 || fail
28
451 ${ZFS} destroy
${FULL_ZVOL_NAME} || fail
29
453 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2
-d || fail
30
454 ${ZFS_SH} -u || fail
31
455 rm -f ${TMP_CACHE} || fail
32
459 run_test
7 "zvol+ext2 clone"
461 # Send/Receive sanity check
463 local POOL_NAME1
=tank1
464 local POOL_NAME2
=tank2
467 local FULL_ZVOL_NAME1
=${POOL_NAME1}/${ZVOL_NAME}
468 local FULL_ZVOL_NAME2
=${POOL_NAME2}/${ZVOL_NAME}
469 local FULL_SNAP_NAME1
=${POOL_NAME1}/${ZVOL_NAME}@${SNAP_NAME}
470 local FULL_SNAP_NAME2
=${POOL_NAME2}/${ZVOL_NAME}@${SNAP_NAME}
472 local TMP_CACHE
=`mktemp -p /tmp zpool.cache.XXXXXXXX`
474 # Create two pools and a volume
475 ${ZFS_SH} zfs
="spa_config_path=${TMP_CACHE}" || fail
1
476 ${ZPOOL_CREATE_SH} -p ${POOL_NAME1} -c lo-raidz2 || fail
2
477 ${ZPOOL_CREATE_SH} -p ${POOL_NAME2} -c lo-raidz2 || fail
3
478 ${ZFS} create
-V 400M
${FULL_ZVOL_NAME1} || fail
4
480 # Partition the volume, for a 400M volume there will be
481 # 812 cylinders, 16 heads, and 63 sectors per track.
482 zconfig_partition
/dev
/${FULL_ZVOL_NAME1} 0 812
484 # Format the partition with ext2.
485 /sbin
/mkfs.ext2
-q /dev
/${FULL_ZVOL_NAME1}1 || fail
5
487 # Mount the ext3 filesystem and copy some data to it.
488 mkdir
-p /tmp
/${FULL_ZVOL_NAME1}1 || fail
6
489 mount
/dev
/${FULL_ZVOL_NAME1}1 /tmp
/${FULL_ZVOL_NAME1}1 || fail
7
490 cp -RL ${SRC_DIR} /tmp
/${FULL_ZVOL_NAME1}1 || fail
8
493 # Snapshot the ext3 filesystem so it may be sent.
494 ${ZFS} snapshot
${FULL_SNAP_NAME1} && sleep 1 || fail
11
496 # Send/receive the snapshot from POOL_NAME1 to POOL_NAME2
497 (${ZFS} send
${FULL_SNAP_NAME1} | \
498 ${ZFS} receive
${FULL_ZVOL_NAME2}) && sleep 1 || fail
12
500 # Mount the sent ext3 filesystem.
501 mkdir
-p /tmp
/${FULL_ZVOL_NAME2}1 || fail
13
502 mount
/dev
/${FULL_ZVOL_NAME2}1 /tmp
/${FULL_ZVOL_NAME2}1 || fail
14
504 # Verify the contents of the volumes match
505 diff -ur /tmp
/${FULL_ZVOL_NAME1}1 /tmp
/${FULL_ZVOL_NAME2}1 \
506 &>/dev
/null || fail
15
508 # Umount, destroy the volume and pool.
509 umount
/tmp
/${FULL_ZVOL_NAME1}1 || fail
16
510 umount
/tmp
/${FULL_ZVOL_NAME2}1 || fail
17
511 rmdir /tmp
/${FULL_ZVOL_NAME1}1 || fail
18
512 rmdir /tmp
/${FULL_ZVOL_NAME2}1 || fail
19
513 rmdir /tmp
/${POOL_NAME1} || fail
20
514 rmdir /tmp
/${POOL_NAME2} || fail
21
516 ${ZFS} destroy
${FULL_SNAP_NAME1} || fail
22
517 ${ZFS} destroy
${FULL_SNAP_NAME2} || fail
23
518 ${ZFS} destroy
${FULL_ZVOL_NAME1} || fail
24
519 ${ZFS} destroy
${FULL_ZVOL_NAME2} || fail
25
520 ${ZPOOL_CREATE_SH} -p ${POOL_NAME1} -c lo-raidz2
-d || fail
26
521 ${ZPOOL_CREATE_SH} -p ${POOL_NAME2} -c lo-raidz2
-d || fail
27
522 ${ZFS_SH} -u || fail
28
523 rm -f ${TMP_CACHE} || fail
29
527 run_test
8 "zfs send/receive"
529 # zpool event sanity check
533 local FULL_NAME
=${POOL_NAME}/${ZVOL_NAME}
534 local TMP_CACHE
=`mktemp -p /tmp zpool.cache.XXXXXXXX`
535 local TMP_EVENTS
=`mktemp -p /tmp zpool.events.XXXXXXXX`
537 # Create a pool and volume.
538 ${ZFS_SH} zfs
="spa_config_path=${TMP_CACHE}" || fail
1
539 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail
2
540 ${ZFS} create
-V 400M
${FULL_NAME} || fail
3
542 # Dump the events, there should be at least 5 lines.
543 ${ZPOOL} events
>${TMP_EVENTS} || fail
4
544 EVENTS
=`wc -l ${TMP_EVENTS} | cut -f1 -d' '`
545 [ $EVENTS -lt 5 ] && fail
5
547 # Clear the events and ensure there are none.
548 ${ZPOOL} events
-c >/dev
/null || fail
6
549 ${ZPOOL} events
>${TMP_EVENTS} || fail
7
550 EVENTS
=`wc -l ${TMP_EVENTS} | cut -f1 -d' '`
551 [ $EVENTS -gt 1 ] && fail
8
553 ${ZFS} destroy
${FULL_NAME} || fail
9
554 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2
-d || fail
10
555 ${ZFS_SH} -u || fail
11
556 rm -f ${TMP_CACHE} || fail
12
557 rm -f ${TMP_EVENTS} || fail
13
561 run_test
9 "zpool events"