]> git.proxmox.com Git - mirror_zfs-debian.git/blame - scripts/zconfig.sh
Merge branch 'upstream'
[mirror_zfs-debian.git] / scripts / zconfig.sh
CommitLineData
c9c0d073
BB
1#!/bin/bash
2#
3# ZFS/ZPOOL configuration test script.
4
5basedir="$(dirname $0)"
6
7SCRIPT_COMMON=common.sh
8if [ -f "${basedir}/${SCRIPT_COMMON}" ]; then
9. "${basedir}/${SCRIPT_COMMON}"
10else
11echo "Missing helper script ${SCRIPT_COMMON}" && exit 1
12fi
13
14PROG=zconfig.sh
15
16usage() {
17cat << EOF
18USAGE:
e0f3df67 19$0 [hvcts]
c9c0d073
BB
20
21DESCRIPTION:
22 ZFS/ZPOOL configuration tests
23
24OPTIONS:
25 -h Show this message
26 -v Verbose
27 -c Cleanup lo+file devices at start
e0f3df67
BB
28 -t <#> Run listed tests
29 -s <#> Skip listed tests
c9c0d073
BB
30
31EOF
32}
33
325f0235 34while getopts 'hvct:s:?' OPTION; do
c9c0d073
BB
35 case $OPTION in
36 h)
37 usage
38 exit 1
39 ;;
40 v)
41 VERBOSE=1
42 ;;
43 c)
44 CLEANUP=1
45 ;;
325f0235
BB
46 t)
47 TESTS_RUN=($OPTARG)
48 ;;
49 s)
50 TESTS_SKIP=($OPTARG)
51 ;;
c9c0d073
BB
52 ?)
53 usage
54 exit
55 ;;
56 esac
57done
58
59if [ $(id -u) != 0 ]; then
60 die "Must run as root"
61fi
62
5cbf6db9
BB
63# Initialize the test suite
64init
65
c9c0d073
BB
66# Perform pre-cleanup is requested
67if [ ${CLEANUP} ]; then
7dc3830c 68 ${ZFS_SH} -u
0ee8118b 69 cleanup_md_devices
c9c0d073
BB
70 cleanup_loop_devices
71 rm -f /tmp/zpool.cache.*
72fi
73
d4055aac
NB
74# Check if we need to skip the tests that require scsi_debug and lsscsi.
75SCSI_DEBUG=0
76${INFOMOD} scsi_debug &>/dev/null && SCSI_DEBUG=1
77HAVE_LSSCSI=0
78test -f ${LSSCSI} && HAVE_LSSCSI=1
79if [ ${SCSI_DEBUG} -eq 0 ] || [ ${HAVE_LSSCSI} -eq 0 ]; then
80 echo "Skipping test 10 which requires the scsi_debug " \
81 "module and the ${LSSCSI} utility"
82fi
83
c9c0d073
BB
84zconfig_partition() {
85 local DEVICE=$1
86 local START=$2
87 local END=$3
88 local TMP_FILE=`mktemp`
89
90 /sbin/sfdisk -q ${DEVICE} << EOF &>${TMP_FILE} || fail 4
91${START},${END}
92;
93;
94;
95EOF
96
97 rm ${TMP_FILE}
3fce1d09 98 udev_trigger
c9c0d073
BB
99}
100
101# Validate persistent zpool.cache configuration.
325f0235 102test_1() {
c9c0d073
BB
103 local POOL_NAME=test1
104 local TMP_FILE1=`mktemp`
105 local TMP_FILE2=`mktemp`
106 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
107
c9c0d073
BB
108 # Create a pool save its status for comparison.
109 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
110 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
111 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3
112
113 # Unload/load the module stack and verify the pool persists.
114 ${ZFS_SH} -u || fail 4
115 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 5
116 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 6
117 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 7
118
119 # Cleanup the test pool and temporary files
120 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 8
121 rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_CACHE} || fail 9
122 ${ZFS_SH} -u || fail 10
123
124 pass
125}
325f0235 126run_test 1 "persistent zpool.cache"
c9c0d073
BB
127
128# Validate ZFS disk scanning and import w/out zpool.cache configuration.
325f0235 129test_2() {
c9c0d073
BB
130 local POOL_NAME=test2
131 local TMP_FILE1=`mktemp`
132 local TMP_FILE2=`mktemp`
133 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
134
c9c0d073
BB
135 # Create a pool save its status for comparison.
136 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
137 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
138 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3
139
140 # Unload the module stack, remove the cache file, load the module
141 # stack and attempt to probe the disks to import the pool. As
142 # a cross check verify the old pool state against the imported.
143 ${ZFS_SH} -u || fail 4
144 rm -f ${TMP_CACHE} || fail 5
145 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 6
146 ${ZPOOL} import | grep ${POOL_NAME} >/dev/null || fail 7
da88a7fb 147 ${ZPOOL} import -f ${POOL_NAME} || fail 8
c9c0d073
BB
148 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 9
149 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 10
150
151 # Cleanup the test pool and temporary files
152 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 11
153 rm -f ${TMP_FILE1} ${TMP_FILE2} || fail 12
154 ${ZFS_SH} -u || fail 13
155
156 pass
157}
325f0235 158run_test 2 "scan disks for pools to import"
c9c0d073
BB
159
160zconfig_zvol_device_stat() {
161 local EXPECT=$1
3fce1d09
BB
162 local POOL_NAME=/dev/zvol/$2
163 local ZVOL_NAME=/dev/zvol/$3
164 local SNAP_NAME=/dev/zvol/$4
165 local CLONE_NAME=/dev/zvol/$5
c9c0d073
BB
166 local COUNT=0
167
168 # Briefly delay for udev
3fce1d09 169 udev_trigger
c9c0d073
BB
170
171 # Pool exists
172 stat ${POOL_NAME} &>/dev/null && let COUNT=$COUNT+1
173
174 # Volume and partitions
175 stat ${ZVOL_NAME} &>/dev/null && let COUNT=$COUNT+1
3fce1d09
BB
176 stat ${ZVOL_NAME}-part1 &>/dev/null && let COUNT=$COUNT+1
177 stat ${ZVOL_NAME}-part2 &>/dev/null && let COUNT=$COUNT+1
c9c0d073
BB
178
179 # Snapshot with partitions
180 stat ${SNAP_NAME} &>/dev/null && let COUNT=$COUNT+1
3fce1d09
BB
181 stat ${SNAP_NAME}-part1 &>/dev/null && let COUNT=$COUNT+1
182 stat ${SNAP_NAME}-part2 &>/dev/null && let COUNT=$COUNT+1
c9c0d073
BB
183
184 # Clone with partitions
185 stat ${CLONE_NAME} &>/dev/null && let COUNT=$COUNT+1
3fce1d09
BB
186 stat ${CLONE_NAME}-part1 &>/dev/null && let COUNT=$COUNT+1
187 stat ${CLONE_NAME}-part2 &>/dev/null && let COUNT=$COUNT+1
c9c0d073
BB
188
189 if [ $EXPECT -ne $COUNT ]; then
190 return 1
191 fi
192
193 return 0
194}
195
196# zpool import/export device check
197# (1 volume, 2 partitions, 1 snapshot, 1 clone)
325f0235 198test_3() {
c9c0d073
BB
199 local POOL_NAME=tank
200 local ZVOL_NAME=volume
201 local SNAP_NAME=snap
202 local CLONE_NAME=clone
203 local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
204 local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
205 local FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
206 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
207
c9c0d073
BB
208 # Create a pool, volume, partition, snapshot, and clone.
209 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
210 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
211 ${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3
3fce1d09 212 zconfig_partition /dev/zvol/${FULL_ZVOL_NAME} 0 64 || fail 4
c9c0d073
BB
213 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5
214 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6
215
216 # Verify the devices were created
217 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
218 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 7
219
220 # Export the pool
221 ${ZPOOL} export ${POOL_NAME} || fail 8
222
223 # verify the devices were removed
224 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
225 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9
226
227 # Import the pool, wait 1 second for udev
228 ${ZPOOL} import ${POOL_NAME} || fail 10
229
230 # Verify the devices were created
231 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
232 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 11
233
234 # Destroy the pool and consequently the devices
235 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 12
236
237 # verify the devices were removed
238 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
239 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 13
240
241 ${ZFS_SH} -u || fail 14
242 rm -f ${TMP_CACHE} || fail 15
243
244 pass
245}
325f0235 246run_test 3 "zpool import/export device"
c9c0d073
BB
247
248# zpool insmod/rmmod device check (1 volume, 1 snapshot, 1 clone)
325f0235 249test_4() {
c9c0d073
BB
250 POOL_NAME=tank
251 ZVOL_NAME=volume
252 SNAP_NAME=snap
253 CLONE_NAME=clone
254 FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
255 FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
256 FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
257 TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
258
c9c0d073
BB
259 # Create a pool, volume, snapshot, and clone
260 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
261 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
262 ${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3
3fce1d09 263 zconfig_partition /dev/zvol/${FULL_ZVOL_NAME} 0 64 || fail 4
c9c0d073
BB
264 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5
265 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6
266
267 # Verify the devices were created
268 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
269 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 7
270
271 # Unload the modules
272 ${ZFS_SH} -u || fail 8
273
274 # Verify the devices were removed
275 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
276 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9
277
278 # Load the modules, wait 1 second for udev
279 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 10
280
281 # Verify the devices were created
282 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
283 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 11
284
285 # Destroy the pool and consequently the devices
286 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 12
287
288 # Verify the devices were removed
289 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
290 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 13
291
292 ${ZFS_SH} -u || fail 14
293 rm -f ${TMP_CACHE} || fail 15
294
295 pass
296}
325f0235 297run_test 4 "zpool insmod/rmmod device"
c9c0d073
BB
298
299# ZVOL volume sanity check
325f0235 300test_5() {
c9c0d073
BB
301 local POOL_NAME=tank
302 local ZVOL_NAME=fish
303 local FULL_NAME=${POOL_NAME}/${ZVOL_NAME}
304 local SRC_DIR=/bin/
305 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
306
c9c0d073
BB
307 # Create a pool and volume.
308 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
3fce1d09
BB
309 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raid0 || fail 2
310 ${ZFS} create -V 800M ${FULL_NAME} || fail 3
c9c0d073 311
3fce1d09
BB
312 # Partition the volume, for a 800M volume there will be
313 # 1624 cylinders, 16 heads, and 63 sectors per track.
314 zconfig_partition /dev/zvol/${FULL_NAME} 0 1624
c9c0d073
BB
315
316 # Format the partition with ext3.
3fce1d09 317 /sbin/mkfs.ext3 -q /dev/zvol/${FULL_NAME}-part1 || fail 5
c9c0d073
BB
318
319 # Mount the ext3 filesystem and copy some data to it.
3fce1d09
BB
320 mkdir -p /tmp/${ZVOL_NAME}-part1 || fail 6
321 mount /dev/zvol/${FULL_NAME}-part1 /tmp/${ZVOL_NAME}-part1 || fail 7
322 cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}-part1 || fail 8
c9c0d073
BB
323 sync
324
325 # Verify the copied files match the original files.
3fce1d09
BB
326 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}-part1${SRC_DIR} \
327 &>/dev/null || fail 9
c9c0d073
BB
328
329 # Remove the files, umount, destroy the volume and pool.
3fce1d09
BB
330 rm -Rf /tmp/${ZVOL_NAME}-part1${SRC_DIR}* || fail 10
331 umount /tmp/${ZVOL_NAME}-part1 || fail 11
332 rmdir /tmp/${ZVOL_NAME}-part1 || fail 12
c9c0d073
BB
333
334 ${ZFS} destroy ${FULL_NAME} || fail 13
335 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 14
336 ${ZFS_SH} -u || fail 15
337 rm -f ${TMP_CACHE} || fail 16
338
339 pass
340}
325f0235 341run_test 5 "zvol+ext3 volume"
c9c0d073
BB
342
343# ZVOL snapshot sanity check
325f0235 344test_6() {
c9c0d073
BB
345 local POOL_NAME=tank
346 local ZVOL_NAME=fish
347 local SNAP_NAME=pristine
348 local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
349 local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
350 local SRC_DIR=/bin/
351 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
352
c9c0d073
BB
353 # Create a pool and volume.
354 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
3fce1d09
BB
355 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raid0 || fail 2
356 ${ZFS} create -V 800M ${FULL_ZVOL_NAME} || fail 3
c9c0d073 357
3fce1d09
BB
358 # Partition the volume, for a 800M volume there will be
359 # 1624 cylinders, 16 heads, and 63 sectors per track.
360 zconfig_partition /dev/zvol/${FULL_ZVOL_NAME} 0 1624
c9c0d073
BB
361
362 # Format the partition with ext2 (no journal).
3fce1d09 363 /sbin/mkfs.ext2 -q /dev/zvol/${FULL_ZVOL_NAME}-part1 || fail 5
c9c0d073
BB
364
365 # Mount the ext3 filesystem and copy some data to it.
3fce1d09
BB
366 mkdir -p /tmp/${ZVOL_NAME}-part1 || fail 6
367 mount /dev/zvol/${FULL_ZVOL_NAME}-part1 /tmp/${ZVOL_NAME}-part1 \
368 || fail 7
c9c0d073
BB
369
370 # Snapshot the pristine ext2 filesystem and mount it read-only.
2c4834f8 371 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 8
3fce1d09
BB
372 wait_udev /dev/zvol/${FULL_SNAP_NAME}-part1 30 || fail 8
373 mkdir -p /tmp/${SNAP_NAME}-part1 || fail 9
374 mount /dev/zvol/${FULL_SNAP_NAME}-part1 /tmp/${SNAP_NAME}-part1 \
375 &>/dev/null || fail 10
c9c0d073
BB
376
377 # Copy to original volume
3fce1d09 378 cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}-part1 || fail 11
c9c0d073
BB
379 sync
380
381 # Verify the copied files match the original files,
382 # and the copied files do NOT appear in the snapshot.
3fce1d09
BB
383 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}-part1${SRC_DIR} \
384 &>/dev/null || fail 12
385 diff -ur ${SRC_DIR} /tmp/${SNAP_NAME}-part1${SRC_DIR} \
386 &>/dev/null && fail 13
c9c0d073
BB
387
388 # umount, destroy the snapshot, volume, and pool.
3fce1d09
BB
389 umount /tmp/${SNAP_NAME}-part1 || fail 14
390 rmdir /tmp/${SNAP_NAME}-part1 || fail 15
c9c0d073
BB
391 ${ZFS} destroy ${FULL_SNAP_NAME} || fail 16
392
3fce1d09
BB
393 umount /tmp/${ZVOL_NAME}-part1 || fail 17
394 rmdir /tmp/${ZVOL_NAME}-part1 || fail 18
c9c0d073
BB
395 ${ZFS} destroy ${FULL_ZVOL_NAME} || fail 19
396
397 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 20
398 ${ZFS_SH} -u || fail 21
399 rm -f ${TMP_CACHE} || fail 22
400
401 pass
402}
325f0235 403run_test 6 "zvol+ext2 snapshot"
c9c0d073
BB
404
405# ZVOL clone sanity check
325f0235 406test_7() {
c9c0d073
BB
407 local POOL_NAME=tank
408 local ZVOL_NAME=fish
409 local SNAP_NAME=pristine
410 local CLONE_NAME=clone
411 local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
412 local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
413 local FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
414 local SRC_DIR=/bin/
415 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
416
c9c0d073
BB
417 # Create a pool and volume.
418 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
419 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
ac063c48 420 ${ZFS} create -V 300M ${FULL_ZVOL_NAME} || fail 3
c9c0d073 421
ac063c48
BB
422 # Partition the volume, for a 300M volume there will be
423 # 609 cylinders, 16 heads, and 63 sectors per track.
3fce1d09 424 zconfig_partition /dev/zvol/${FULL_ZVOL_NAME} 0 609
c9c0d073
BB
425
426 # Format the partition with ext2 (no journal).
3fce1d09 427 /sbin/mkfs.ext2 -q /dev/zvol/${FULL_ZVOL_NAME}-part1 || fail 5
c9c0d073
BB
428
429 # Mount the ext3 filesystem and copy some data to it.
3fce1d09
BB
430 mkdir -p /tmp/${ZVOL_NAME}-part1 || fail 6
431 mount /dev/zvol/${FULL_ZVOL_NAME}-part1 /tmp/${ZVOL_NAME}-part1 \
432 || fail 7
c9c0d073
BB
433
434 # Snapshot the pristine ext2 filesystem and mount it read-only.
2c4834f8 435 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 8
3fce1d09
BB
436 wait_udev /dev/zvol/${FULL_SNAP_NAME}-part1 30 || fail 8
437 mkdir -p /tmp/${SNAP_NAME}-part1 || fail 9
438 mount /dev/zvol/${FULL_SNAP_NAME}-part1 \
439 /tmp/${SNAP_NAME}-part1 &>/dev/null || fail 10
c9c0d073
BB
440
441 # Copy to original volume.
3fce1d09 442 cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}-part1 || fail 11
c9c0d073
BB
443 sync
444
445 # Verify the copied files match the original files,
446 # and the copied files do NOT appear in the snapshot.
3fce1d09
BB
447 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}-part1${SRC_DIR} \
448 &>/dev/null || fail 12
449 diff -ur ${SRC_DIR} /tmp/${SNAP_NAME}-part1${SRC_DIR} \
450 &>/dev/null && fail 13
c9c0d073
BB
451
452 # Clone from the original pristine snapshot
2c4834f8 453 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 14
3fce1d09
BB
454 wait_udev /dev/zvol/${FULL_CLONE_NAME}-part1 30 || fail 14
455 mkdir -p /tmp/${CLONE_NAME}-part1 || fail 15
456 mount /dev/zvol/${FULL_CLONE_NAME}-part1 \
457 /tmp/${CLONE_NAME}-part1 || fail 16
c9c0d073
BB
458
459 # Verify the clone matches the pristine snapshot,
460 # and the files copied to the original volume are NOT there.
3fce1d09
BB
461 diff -ur /tmp/${SNAP_NAME}-part1 /tmp/${CLONE_NAME}-part1 \
462 &>/dev/null || fail 17
463 diff -ur /tmp/${ZVOL_NAME}-part1 /tmp/${CLONE_NAME}-part1 \
464 &>/dev/null && fail 18
c9c0d073
BB
465
466 # Copy to cloned volume.
3fce1d09 467 cp -RL ${SRC_DIR} /tmp/${CLONE_NAME}-part1 || fail 19
c9c0d073
BB
468 sync
469
470 # Verify the clone matches the modified original volume.
3fce1d09
BB
471 diff -ur /tmp/${ZVOL_NAME}-part1 /tmp/${CLONE_NAME}-part1 \
472 &>/dev/null || fail 20
c9c0d073
BB
473
474 # umount, destroy the snapshot, volume, and pool.
3fce1d09
BB
475 umount /tmp/${CLONE_NAME}-part1 || fail 21
476 rmdir /tmp/${CLONE_NAME}-part1 || fail 22
c9c0d073
BB
477 ${ZFS} destroy ${FULL_CLONE_NAME} || fail 23
478
3fce1d09
BB
479 umount /tmp/${SNAP_NAME}-part1 || fail 24
480 rmdir /tmp/${SNAP_NAME}-part1 || fail 25
c9c0d073
BB
481 ${ZFS} destroy ${FULL_SNAP_NAME} || fail 26
482
3fce1d09
BB
483 umount /tmp/${ZVOL_NAME}-part1 || fail 27
484 rmdir /tmp/${ZVOL_NAME}-part1 || fail 28
c9c0d073
BB
485 ${ZFS} destroy ${FULL_ZVOL_NAME} || fail 29
486
487 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 30
488 ${ZFS_SH} -u || fail 31
489 rm -f ${TMP_CACHE} || fail 32
490
491 pass
492}
325f0235 493run_test 7 "zvol+ext2 clone"
c9c0d073
BB
494
495# Send/Receive sanity check
496test_8() {
497 local POOL_NAME1=tank1
498 local POOL_NAME2=tank2
499 local ZVOL_NAME=fish
500 local SNAP_NAME=snap
501 local FULL_ZVOL_NAME1=${POOL_NAME1}/${ZVOL_NAME}
502 local FULL_ZVOL_NAME2=${POOL_NAME2}/${ZVOL_NAME}
503 local FULL_SNAP_NAME1=${POOL_NAME1}/${ZVOL_NAME}@${SNAP_NAME}
504 local FULL_SNAP_NAME2=${POOL_NAME2}/${ZVOL_NAME}@${SNAP_NAME}
505 local SRC_DIR=/bin/
506 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
507
508 # Create two pools and a volume
509 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
510 ${ZPOOL_CREATE_SH} -p ${POOL_NAME1} -c lo-raidz2 || fail 2
511 ${ZPOOL_CREATE_SH} -p ${POOL_NAME2} -c lo-raidz2 || fail 3
ac063c48 512 ${ZFS} create -V 300M ${FULL_ZVOL_NAME1} || fail 4
c9c0d073 513
ac063c48
BB
514 # Partition the volume, for a 300M volume there will be
515 # 609 cylinders, 16 heads, and 63 sectors per track.
3fce1d09 516 zconfig_partition /dev/zvol/${FULL_ZVOL_NAME1} 0 609
c9c0d073
BB
517
518 # Format the partition with ext2.
3fce1d09 519 /sbin/mkfs.ext2 -q /dev/zvol/${FULL_ZVOL_NAME1}-part1 || fail 5
c9c0d073
BB
520
521 # Mount the ext3 filesystem and copy some data to it.
3fce1d09
BB
522 mkdir -p /tmp/${FULL_ZVOL_NAME1}-part1 || fail 6
523 mount /dev/zvol/${FULL_ZVOL_NAME1}-part1 \
524 /tmp/${FULL_ZVOL_NAME1}-part1 || fail 7
525 cp -RL ${SRC_DIR} /tmp/${FULL_ZVOL_NAME1}-part1 || fail 8
c9c0d073
BB
526 sync || fail 9
527
528 # Snapshot the ext3 filesystem so it may be sent.
2c4834f8 529 ${ZFS} snapshot ${FULL_SNAP_NAME1} || fail 11
3fce1d09 530 wait_udev /dev/zvol/${FULL_SNAP_NAME1} 30 || fail 11
c9c0d073
BB
531
532 # Send/receive the snapshot from POOL_NAME1 to POOL_NAME2
533 (${ZFS} send ${FULL_SNAP_NAME1} | \
2c4834f8 534 ${ZFS} receive ${FULL_ZVOL_NAME2}) || fail 12
3fce1d09 535 wait_udev /dev/zvol/${FULL_ZVOL_NAME2}-part1 30 || fail 12
c9c0d073
BB
536
537 # Mount the sent ext3 filesystem.
3fce1d09
BB
538 mkdir -p /tmp/${FULL_ZVOL_NAME2}-part1 || fail 13
539 mount /dev/zvol/${FULL_ZVOL_NAME2}-part1 \
540 /tmp/${FULL_ZVOL_NAME2}-part1 || fail 14
c9c0d073
BB
541
542 # Verify the contents of the volumes match
3fce1d09 543 diff -ur /tmp/${FULL_ZVOL_NAME1}-part1 /tmp/${FULL_ZVOL_NAME2}-part1 \
c9c0d073
BB
544 &>/dev/null || fail 15
545
546 # Umount, destroy the volume and pool.
3fce1d09
BB
547 umount /tmp/${FULL_ZVOL_NAME1}-part1 || fail 16
548 umount /tmp/${FULL_ZVOL_NAME2}-part1 || fail 17
549 rmdir /tmp/${FULL_ZVOL_NAME1}-part1 || fail 18
550 rmdir /tmp/${FULL_ZVOL_NAME2}-part1 || fail 19
c9c0d073
BB
551 rmdir /tmp/${POOL_NAME1} || fail 20
552 rmdir /tmp/${POOL_NAME2} || fail 21
553
554 ${ZFS} destroy ${FULL_SNAP_NAME1} || fail 22
555 ${ZFS} destroy ${FULL_SNAP_NAME2} || fail 23
556 ${ZFS} destroy ${FULL_ZVOL_NAME1} || fail 24
557 ${ZFS} destroy ${FULL_ZVOL_NAME2} || fail 25
558 ${ZPOOL_CREATE_SH} -p ${POOL_NAME1} -c lo-raidz2 -d || fail 26
559 ${ZPOOL_CREATE_SH} -p ${POOL_NAME2} -c lo-raidz2 -d || fail 27
560 ${ZFS_SH} -u || fail 28
561 rm -f ${TMP_CACHE} || fail 29
562
563 pass
564}
565run_test 8 "zfs send/receive"
566
567# zpool event sanity check
568test_9() {
569 local POOL_NAME=tank
570 local ZVOL_NAME=fish
571 local FULL_NAME=${POOL_NAME}/${ZVOL_NAME}
572 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
573 local TMP_EVENTS=`mktemp -p /tmp zpool.events.XXXXXXXX`
574
575 # Create a pool and volume.
576 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
577 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
ac063c48 578 ${ZFS} create -V 300M ${FULL_NAME} || fail 3
c9c0d073
BB
579
580 # Dump the events, there should be at least 5 lines.
581 ${ZPOOL} events >${TMP_EVENTS} || fail 4
582 EVENTS=`wc -l ${TMP_EVENTS} | cut -f1 -d' '`
583 [ $EVENTS -lt 5 ] && fail 5
584
585 # Clear the events and ensure there are none.
586 ${ZPOOL} events -c >/dev/null || fail 6
587 ${ZPOOL} events >${TMP_EVENTS} || fail 7
588 EVENTS=`wc -l ${TMP_EVENTS} | cut -f1 -d' '`
589 [ $EVENTS -gt 1 ] && fail 8
590
591 ${ZFS} destroy ${FULL_NAME} || fail 9
592 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 10
593 ${ZFS_SH} -u || fail 11
594 rm -f ${TMP_CACHE} || fail 12
595 rm -f ${TMP_EVENTS} || fail 13
596
597 pass
598}
599run_test 9 "zpool events"
600
d4055aac
NB
601zconfig_add_vdev() {
602 local POOL_NAME=$1
603 local TYPE=$2
604 local DEVICE=$3
605 local TMP_FILE1=`mktemp`
606 local TMP_FILE2=`mktemp`
607 local TMP_FILE3=`mktemp`
608
609 BASE_DEVICE=`basename ${DEVICE}`
610
611 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1}
612 ${ZPOOL} add -f ${POOL_NAME} ${TYPE} ${DEVICE} 2>/dev/null || return 1
613 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2}
614 diff ${TMP_FILE1} ${TMP_FILE2} > ${TMP_FILE3}
615
616 [ `wc -l ${TMP_FILE3}|${AWK} '{print $1}'` -eq 3 ] || return 1
617
618 PARENT_VDEV=`tail -2 ${TMP_FILE3} | head -1 | ${AWK} '{print $NF}'`
619 case $TYPE in
620 cache)
621 [ "${PARENT_VDEV}" = "${TYPE}" ] || return 1
622 ;;
623 log)
624 [ "${PARENT_VDEV}" = "logs" ] || return 1
625 ;;
626 esac
627
628 if ! tail -1 ${TMP_FILE3} |
629 egrep -q "^>[[:space:]]+${BASE_DEVICE}[[:space:]]+ONLINE" ; then
630 return 1
631 fi
632 rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_FILE3}
633
634 return 0
635}
636
637# zpool add and remove sanity check
638test_10() {
639 local POOL_NAME=tank
640 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
641 local TMP_FILE1=`mktemp`
642 local TMP_FILE2=`mktemp`
643
644 if [ ${SCSI_DEBUG} -eq 0 ] || [ ${HAVE_LSSCSI} -eq 0 ] ; then
645 skip
646 return
647 fi
648
649 test `${LSMOD} | grep -c scsi_debug` -gt 0 && \
650 (${RMMOD} scsi_debug || exit 1)
651
652 /sbin/modprobe scsi_debug dev_size_mb=128 ||
653 die "Error $? creating scsi_debug device"
654 udev_trigger
655
656 SDDEVICE=`${LSSCSI}|${AWK} '/scsi_debug/ { print $6; exit }'`
657 BASE_SDDEVICE=`basename $SDDEVICE`
658
659 # Create a pool
660 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
661 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
662 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3
663
664 # Add and remove a cache vdev by full path
665 zconfig_add_vdev ${POOL_NAME} cache ${SDDEVICE} || fail 4
666 ${ZPOOL} remove ${POOL_NAME} ${SDDEVICE} || fail 5
667 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 6
668 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 7
3fce1d09 669 sleep 1
d4055aac
NB
670
671 # Add and remove a cache vdev by shorthand path
672 zconfig_add_vdev ${POOL_NAME} cache ${BASE_SDDEVICE} || fail 8
673 ${ZPOOL} remove ${POOL_NAME} ${BASE_SDDEVICE} || fail 9
674 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 10
675 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 11
3fce1d09 676 sleep 1
d4055aac
NB
677
678 # Add and remove a log vdev
679 zconfig_add_vdev ${POOL_NAME} log ${BASE_SDDEVICE} || fail 12
680 ${ZPOOL} remove ${POOL_NAME} ${BASE_SDDEVICE} || fail 13
681 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 14
682 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 15
683
684 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 16
685 ${ZFS_SH} -u || fail 17
686 ${RMMOD} scsi_debug || fail 18
687
688 rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_CACHE} || fail 19
689
690 pass
691}
692run_test 10 "zpool add/remove vdev"
693
c9c0d073 694exit 0