]> git.proxmox.com Git - mirror_zfs-debian.git/blame - scripts/zconfig.sh
Imported Upstream version 0.6.5.9
[mirror_zfs-debian.git] / scripts / zconfig.sh
CommitLineData
c9c0d073
BB
1#!/bin/bash
2#
3# ZFS/ZPOOL configuration test script.
4
5basedir="$(dirname $0)"
6
7SCRIPT_COMMON=common.sh
8if [ -f "${basedir}/${SCRIPT_COMMON}" ]; then
9. "${basedir}/${SCRIPT_COMMON}"
10else
11echo "Missing helper script ${SCRIPT_COMMON}" && exit 1
12fi
13
14PROG=zconfig.sh
15
16usage() {
17cat << EOF
18USAGE:
e0f3df67 19$0 [hvcts]
c9c0d073
BB
20
21DESCRIPTION:
22 ZFS/ZPOOL configuration tests
23
24OPTIONS:
25 -h Show this message
26 -v Verbose
27 -c Cleanup lo+file devices at start
e0f3df67
BB
28 -t <#> Run listed tests
29 -s <#> Skip listed tests
c9c0d073
BB
30
31EOF
32}
33
325f0235 34while getopts 'hvct:s:?' OPTION; do
c9c0d073
BB
35 case $OPTION in
36 h)
37 usage
38 exit 1
39 ;;
40 v)
41 VERBOSE=1
42 ;;
43 c)
44 CLEANUP=1
45 ;;
325f0235
BB
46 t)
47 TESTS_RUN=($OPTARG)
48 ;;
49 s)
50 TESTS_SKIP=($OPTARG)
51 ;;
c9c0d073
BB
52 ?)
53 usage
54 exit
55 ;;
56 esac
57done
58
59if [ $(id -u) != 0 ]; then
60 die "Must run as root"
61fi
62
5cbf6db9
BB
63# Initialize the test suite
64init
65
c9c0d073
BB
66# Perform pre-cleanup is requested
67if [ ${CLEANUP} ]; then
7dc3830c 68 ${ZFS_SH} -u
0ee8118b 69 cleanup_md_devices
c9c0d073
BB
70 cleanup_loop_devices
71 rm -f /tmp/zpool.cache.*
72fi
73
d4055aac
NB
74# Check if we need to skip the tests that require scsi_debug and lsscsi.
75SCSI_DEBUG=0
76${INFOMOD} scsi_debug &>/dev/null && SCSI_DEBUG=1
77HAVE_LSSCSI=0
78test -f ${LSSCSI} && HAVE_LSSCSI=1
79if [ ${SCSI_DEBUG} -eq 0 ] || [ ${HAVE_LSSCSI} -eq 0 ]; then
80 echo "Skipping test 10 which requires the scsi_debug " \
81 "module and the ${LSSCSI} utility"
82fi
83
c9c0d073 84# Validate persistent zpool.cache configuration.
325f0235 85test_1() {
c9c0d073
BB
86 local POOL_NAME=test1
87 local TMP_FILE1=`mktemp`
88 local TMP_FILE2=`mktemp`
89 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
90
c9c0d073
BB
91 # Create a pool save its status for comparison.
92 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
93 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
94 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3
95
96 # Unload/load the module stack and verify the pool persists.
97 ${ZFS_SH} -u || fail 4
98 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 5
ea04106b 99 ${ZPOOL} import -c ${TMP_CACHE} ${POOL_NAME} || fail 5
c9c0d073
BB
100 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 6
101 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 7
102
103 # Cleanup the test pool and temporary files
104 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 8
105 rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_CACHE} || fail 9
106 ${ZFS_SH} -u || fail 10
107
108 pass
109}
325f0235 110run_test 1 "persistent zpool.cache"
c9c0d073
BB
111
112# Validate ZFS disk scanning and import w/out zpool.cache configuration.
325f0235 113test_2() {
c9c0d073
BB
114 local POOL_NAME=test2
115 local TMP_FILE1=`mktemp`
116 local TMP_FILE2=`mktemp`
117 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
118
c9c0d073
BB
119 # Create a pool save its status for comparison.
120 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
121 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
122 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3
123
124 # Unload the module stack, remove the cache file, load the module
125 # stack and attempt to probe the disks to import the pool. As
126 # a cross check verify the old pool state against the imported.
127 ${ZFS_SH} -u || fail 4
128 rm -f ${TMP_CACHE} || fail 5
129 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 6
ea04106b 130 ${ZPOOL} import -d /dev ${POOL_NAME} || fail 8
c9c0d073
BB
131 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 9
132 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 10
133
134 # Cleanup the test pool and temporary files
135 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 11
136 rm -f ${TMP_FILE1} ${TMP_FILE2} || fail 12
137 ${ZFS_SH} -u || fail 13
138
139 pass
140}
325f0235 141run_test 2 "scan disks for pools to import"
c9c0d073
BB
142
143zconfig_zvol_device_stat() {
144 local EXPECT=$1
3fce1d09
BB
145 local POOL_NAME=/dev/zvol/$2
146 local ZVOL_NAME=/dev/zvol/$3
147 local SNAP_NAME=/dev/zvol/$4
148 local CLONE_NAME=/dev/zvol/$5
c9c0d073
BB
149 local COUNT=0
150
151 # Briefly delay for udev
3fce1d09 152 udev_trigger
c9c0d073
BB
153
154 # Pool exists
155 stat ${POOL_NAME} &>/dev/null && let COUNT=$COUNT+1
156
157 # Volume and partitions
158 stat ${ZVOL_NAME} &>/dev/null && let COUNT=$COUNT+1
3fce1d09
BB
159 stat ${ZVOL_NAME}-part1 &>/dev/null && let COUNT=$COUNT+1
160 stat ${ZVOL_NAME}-part2 &>/dev/null && let COUNT=$COUNT+1
c9c0d073
BB
161
162 # Snapshot with partitions
163 stat ${SNAP_NAME} &>/dev/null && let COUNT=$COUNT+1
3fce1d09
BB
164 stat ${SNAP_NAME}-part1 &>/dev/null && let COUNT=$COUNT+1
165 stat ${SNAP_NAME}-part2 &>/dev/null && let COUNT=$COUNT+1
c9c0d073
BB
166
167 # Clone with partitions
168 stat ${CLONE_NAME} &>/dev/null && let COUNT=$COUNT+1
3fce1d09
BB
169 stat ${CLONE_NAME}-part1 &>/dev/null && let COUNT=$COUNT+1
170 stat ${CLONE_NAME}-part2 &>/dev/null && let COUNT=$COUNT+1
c9c0d073
BB
171
172 if [ $EXPECT -ne $COUNT ]; then
173 return 1
174 fi
175
176 return 0
177}
178
179# zpool import/export device check
180# (1 volume, 2 partitions, 1 snapshot, 1 clone)
325f0235 181test_3() {
c9c0d073
BB
182 local POOL_NAME=tank
183 local ZVOL_NAME=volume
184 local SNAP_NAME=snap
185 local CLONE_NAME=clone
186 local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
187 local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
188 local FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
189 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
190
c9c0d073
BB
191 # Create a pool, volume, partition, snapshot, and clone.
192 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
193 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
194 ${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3
0b4d1b58 195 ${ZFS} set snapdev=visible ${FULL_ZVOL_NAME} || fail 3
93648f31
BB
196 label /dev/zvol/${FULL_ZVOL_NAME} msdos || fail 4
197 partition /dev/zvol/${FULL_ZVOL_NAME} primary 1% 50% || fail 4
e528c9b4 198 partition /dev/zvol/${FULL_ZVOL_NAME} primary 51% -1 || fail 4
c9c0d073
BB
199 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5
200 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6
201
202 # Verify the devices were created
203 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
204 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 7
205
206 # Export the pool
207 ${ZPOOL} export ${POOL_NAME} || fail 8
208
209 # verify the devices were removed
210 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
211 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9
212
213 # Import the pool, wait 1 second for udev
214 ${ZPOOL} import ${POOL_NAME} || fail 10
215
216 # Verify the devices were created
217 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
218 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 11
219
4e820b5a
AX
220 # Toggle the snapdev and observe snapshot device links toggled
221 ${ZFS} set snapdev=hidden ${FULL_ZVOL_NAME} || fail 12
222
223 zconfig_zvol_device_stat 7 ${POOL_NAME} ${FULL_ZVOL_NAME} \
224 "invalid" ${FULL_CLONE_NAME} || fail 13
225
226 ${ZFS} set snapdev=visible ${FULL_ZVOL_NAME} || fail 14
227
228 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
229 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 15
230
c9c0d073 231 # Destroy the pool and consequently the devices
4e820b5a 232 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 16
c9c0d073
BB
233
234 # verify the devices were removed
235 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
4e820b5a 236 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 17
c9c0d073 237
4e820b5a
AX
238 ${ZFS_SH} -u || fail 18
239 rm -f ${TMP_CACHE} || fail 19
c9c0d073
BB
240
241 pass
242}
325f0235 243run_test 3 "zpool import/export device"
c9c0d073
BB
244
245# zpool insmod/rmmod device check (1 volume, 1 snapshot, 1 clone)
325f0235 246test_4() {
c9c0d073
BB
247 POOL_NAME=tank
248 ZVOL_NAME=volume
249 SNAP_NAME=snap
250 CLONE_NAME=clone
251 FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
252 FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
253 FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
254 TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
255
c9c0d073
BB
256 # Create a pool, volume, snapshot, and clone
257 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
258 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
259 ${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3
0b4d1b58 260 ${ZFS} set snapdev=visible ${FULL_ZVOL_NAME} || fail 3
93648f31
BB
261 label /dev/zvol/${FULL_ZVOL_NAME} msdos || fail 4
262 partition /dev/zvol/${FULL_ZVOL_NAME} primary 1% 50% || fail 4
e528c9b4 263 partition /dev/zvol/${FULL_ZVOL_NAME} primary 51% -1 || fail 4
c9c0d073
BB
264 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5
265 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6
266
267 # Verify the devices were created
268 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
269 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 7
270
271 # Unload the modules
272 ${ZFS_SH} -u || fail 8
273
274 # Verify the devices were removed
275 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
276 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9
277
c06d4368 278 # Load the modules, list the pools to ensure they are opened
c9c0d073 279 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 10
ea04106b 280 ${ZPOOL} import -c ${TMP_CACHE} ${POOL_NAME} || fail 10
c06d4368 281 ${ZPOOL} list &>/dev/null
c9c0d073
BB
282
283 # Verify the devices were created
284 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
285 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 11
286
287 # Destroy the pool and consequently the devices
288 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 12
289
290 # Verify the devices were removed
291 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
292 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 13
293
294 ${ZFS_SH} -u || fail 14
295 rm -f ${TMP_CACHE} || fail 15
296
297 pass
298}
325f0235 299run_test 4 "zpool insmod/rmmod device"
c9c0d073
BB
300
301# ZVOL volume sanity check
325f0235 302test_5() {
c9c0d073
BB
303 local POOL_NAME=tank
304 local ZVOL_NAME=fish
305 local FULL_NAME=${POOL_NAME}/${ZVOL_NAME}
c9c0d073
BB
306 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
307
c9c0d073
BB
308 # Create a pool and volume.
309 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
3fce1d09
BB
310 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raid0 || fail 2
311 ${ZFS} create -V 800M ${FULL_NAME} || fail 3
93648f31
BB
312 label /dev/zvol/${FULL_NAME} msdos || fail 4
313 partition /dev/zvol/${FULL_NAME} primary 1 -1 || fail 4
314 format /dev/zvol/${FULL_NAME}-part1 ext2 || fail 5
c9c0d073 315
93648f31 316 # Mount the ext2 filesystem and copy some data to it.
3fce1d09
BB
317 mkdir -p /tmp/${ZVOL_NAME}-part1 || fail 6
318 mount /dev/zvol/${FULL_NAME}-part1 /tmp/${ZVOL_NAME}-part1 || fail 7
319 cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}-part1 || fail 8
c9c0d073
BB
320 sync
321
322 # Verify the copied files match the original files.
930b6fec 323 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}-part1/${SRC_DIR##*/} \
3fce1d09 324 &>/dev/null || fail 9
c9c0d073
BB
325
326 # Remove the files, umount, destroy the volume and pool.
930b6fec 327 rm -Rf /tmp/${ZVOL_NAME}-part1/${SRC_DIR##*/} || fail 10
3fce1d09
BB
328 umount /tmp/${ZVOL_NAME}-part1 || fail 11
329 rmdir /tmp/${ZVOL_NAME}-part1 || fail 12
c9c0d073
BB
330
331 ${ZFS} destroy ${FULL_NAME} || fail 13
332 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 14
333 ${ZFS_SH} -u || fail 15
334 rm -f ${TMP_CACHE} || fail 16
335
336 pass
337}
93648f31 338run_test 5 "zvol+ext2 volume"
c9c0d073
BB
339
340# ZVOL snapshot sanity check
325f0235 341test_6() {
c9c0d073
BB
342 local POOL_NAME=tank
343 local ZVOL_NAME=fish
344 local SNAP_NAME=pristine
345 local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
346 local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
c9c0d073
BB
347 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
348
c9c0d073
BB
349 # Create a pool and volume.
350 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
3fce1d09
BB
351 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raid0 || fail 2
352 ${ZFS} create -V 800M ${FULL_ZVOL_NAME} || fail 3
0b4d1b58 353 ${ZFS} set snapdev=visible ${FULL_ZVOL_NAME} || fail 3
93648f31
BB
354 label /dev/zvol/${FULL_ZVOL_NAME} msdos || fail 4
355 partition /dev/zvol/${FULL_ZVOL_NAME} primary 1 -1 || fail 4
356 format /dev/zvol/${FULL_ZVOL_NAME}-part1 ext2 || fail 5
c9c0d073 357
93648f31 358 # Mount the ext2 filesystem and copy some data to it.
3fce1d09
BB
359 mkdir -p /tmp/${ZVOL_NAME}-part1 || fail 6
360 mount /dev/zvol/${FULL_ZVOL_NAME}-part1 /tmp/${ZVOL_NAME}-part1 \
361 || fail 7
c9c0d073
BB
362
363 # Snapshot the pristine ext2 filesystem and mount it read-only.
2c4834f8 364 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 8
3fce1d09
BB
365 wait_udev /dev/zvol/${FULL_SNAP_NAME}-part1 30 || fail 8
366 mkdir -p /tmp/${SNAP_NAME}-part1 || fail 9
367 mount /dev/zvol/${FULL_SNAP_NAME}-part1 /tmp/${SNAP_NAME}-part1 \
368 &>/dev/null || fail 10
c9c0d073
BB
369
370 # Copy to original volume
3fce1d09 371 cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}-part1 || fail 11
c9c0d073
BB
372 sync
373
374 # Verify the copied files match the original files,
375 # and the copied files do NOT appear in the snapshot.
930b6fec 376 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}-part1/${SRC_DIR##*/} \
3fce1d09 377 &>/dev/null || fail 12
930b6fec 378 diff -ur ${SRC_DIR} /tmp/${SNAP_NAME}-part1/${SRC_DIR##*/} \
3fce1d09 379 &>/dev/null && fail 13
c9c0d073
BB
380
381 # umount, destroy the snapshot, volume, and pool.
3fce1d09
BB
382 umount /tmp/${SNAP_NAME}-part1 || fail 14
383 rmdir /tmp/${SNAP_NAME}-part1 || fail 15
c9c0d073
BB
384 ${ZFS} destroy ${FULL_SNAP_NAME} || fail 16
385
3fce1d09
BB
386 umount /tmp/${ZVOL_NAME}-part1 || fail 17
387 rmdir /tmp/${ZVOL_NAME}-part1 || fail 18
c9c0d073
BB
388 ${ZFS} destroy ${FULL_ZVOL_NAME} || fail 19
389
390 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 20
391 ${ZFS_SH} -u || fail 21
392 rm -f ${TMP_CACHE} || fail 22
393
394 pass
395}
325f0235 396run_test 6 "zvol+ext2 snapshot"
c9c0d073
BB
397
398# ZVOL clone sanity check
325f0235 399test_7() {
c9c0d073
BB
400 local POOL_NAME=tank
401 local ZVOL_NAME=fish
402 local SNAP_NAME=pristine
403 local CLONE_NAME=clone
404 local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
405 local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
406 local FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
c9c0d073
BB
407 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
408
c9c0d073
BB
409 # Create a pool and volume.
410 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
411 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
ac063c48 412 ${ZFS} create -V 300M ${FULL_ZVOL_NAME} || fail 3
0b4d1b58 413 ${ZFS} set snapdev=visible ${FULL_ZVOL_NAME} || fail 3
93648f31
BB
414 label /dev/zvol/${FULL_ZVOL_NAME} msdos || fail 4
415 partition /dev/zvol/${FULL_ZVOL_NAME} primary 1 -1 || fail 4
416 format /dev/zvol/${FULL_ZVOL_NAME}-part1 ext2 || fail 5
c9c0d073 417
a08ee875
LG
418 # Snapshot the pristine ext2 filesystem.
419 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 6
420 wait_udev /dev/zvol/${FULL_SNAP_NAME}-part1 30 || fail 7
c9c0d073 421
a08ee875
LG
422 # Mount the ext2 filesystem so some data can be copied to it.
423 mkdir -p /tmp/${ZVOL_NAME}-part1 || fail 7
424 mount /dev/zvol/${FULL_ZVOL_NAME}-part1 \
425 /tmp/${ZVOL_NAME}-part1 || fail 8
426
427 # Mount the pristine ext2 snapshot.
3fce1d09
BB
428 mkdir -p /tmp/${SNAP_NAME}-part1 || fail 9
429 mount /dev/zvol/${FULL_SNAP_NAME}-part1 \
430 /tmp/${SNAP_NAME}-part1 &>/dev/null || fail 10
c9c0d073
BB
431
432 # Copy to original volume.
3fce1d09 433 cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}-part1 || fail 11
c9c0d073
BB
434 sync
435
436 # Verify the copied files match the original files,
437 # and the copied files do NOT appear in the snapshot.
930b6fec 438 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}-part1/${SRC_DIR##*/} \
3fce1d09 439 &>/dev/null || fail 12
930b6fec 440 diff -ur ${SRC_DIR} /tmp/${SNAP_NAME}-part1/${SRC_DIR##*/} \
3fce1d09 441 &>/dev/null && fail 13
c9c0d073
BB
442
443 # Clone from the original pristine snapshot
2c4834f8 444 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 14
3fce1d09
BB
445 wait_udev /dev/zvol/${FULL_CLONE_NAME}-part1 30 || fail 14
446 mkdir -p /tmp/${CLONE_NAME}-part1 || fail 15
447 mount /dev/zvol/${FULL_CLONE_NAME}-part1 \
448 /tmp/${CLONE_NAME}-part1 || fail 16
c9c0d073
BB
449
450 # Verify the clone matches the pristine snapshot,
451 # and the files copied to the original volume are NOT there.
3fce1d09
BB
452 diff -ur /tmp/${SNAP_NAME}-part1 /tmp/${CLONE_NAME}-part1 \
453 &>/dev/null || fail 17
454 diff -ur /tmp/${ZVOL_NAME}-part1 /tmp/${CLONE_NAME}-part1 \
455 &>/dev/null && fail 18
c9c0d073
BB
456
457 # Copy to cloned volume.
3fce1d09 458 cp -RL ${SRC_DIR} /tmp/${CLONE_NAME}-part1 || fail 19
c9c0d073
BB
459 sync
460
461 # Verify the clone matches the modified original volume.
3fce1d09
BB
462 diff -ur /tmp/${ZVOL_NAME}-part1 /tmp/${CLONE_NAME}-part1 \
463 &>/dev/null || fail 20
c9c0d073
BB
464
465 # umount, destroy the snapshot, volume, and pool.
3fce1d09
BB
466 umount /tmp/${CLONE_NAME}-part1 || fail 21
467 rmdir /tmp/${CLONE_NAME}-part1 || fail 22
c9c0d073
BB
468 ${ZFS} destroy ${FULL_CLONE_NAME} || fail 23
469
3fce1d09
BB
470 umount /tmp/${SNAP_NAME}-part1 || fail 24
471 rmdir /tmp/${SNAP_NAME}-part1 || fail 25
c9c0d073
BB
472 ${ZFS} destroy ${FULL_SNAP_NAME} || fail 26
473
3fce1d09
BB
474 umount /tmp/${ZVOL_NAME}-part1 || fail 27
475 rmdir /tmp/${ZVOL_NAME}-part1 || fail 28
c9c0d073
BB
476 ${ZFS} destroy ${FULL_ZVOL_NAME} || fail 29
477
478 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 30
479 ${ZFS_SH} -u || fail 31
480 rm -f ${TMP_CACHE} || fail 32
481
482 pass
483}
325f0235 484run_test 7 "zvol+ext2 clone"
c9c0d073
BB
485
486# Send/Receive sanity check
487test_8() {
488 local POOL_NAME1=tank1
489 local POOL_NAME2=tank2
490 local ZVOL_NAME=fish
491 local SNAP_NAME=snap
492 local FULL_ZVOL_NAME1=${POOL_NAME1}/${ZVOL_NAME}
493 local FULL_ZVOL_NAME2=${POOL_NAME2}/${ZVOL_NAME}
494 local FULL_SNAP_NAME1=${POOL_NAME1}/${ZVOL_NAME}@${SNAP_NAME}
495 local FULL_SNAP_NAME2=${POOL_NAME2}/${ZVOL_NAME}@${SNAP_NAME}
c9c0d073
BB
496 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
497
498 # Create two pools and a volume
499 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
500 ${ZPOOL_CREATE_SH} -p ${POOL_NAME1} -c lo-raidz2 || fail 2
93648f31
BB
501 ${ZPOOL_CREATE_SH} -p ${POOL_NAME2} -c lo-raidz2 || fail 2
502 ${ZFS} create -V 300M ${FULL_ZVOL_NAME1} || fail 3
0b4d1b58 503 ${ZFS} set snapdev=visible ${FULL_ZVOL_NAME1} || fail 3
93648f31
BB
504 label /dev/zvol/${FULL_ZVOL_NAME1} msdos || fail 4
505 partition /dev/zvol/${FULL_ZVOL_NAME1} primary 1 -1 || fail 4
506 format /dev/zvol/${FULL_ZVOL_NAME1}-part1 ext2 || fail 5
c9c0d073 507
93648f31 508 # Mount the ext2 filesystem and copy some data to it.
3fce1d09
BB
509 mkdir -p /tmp/${FULL_ZVOL_NAME1}-part1 || fail 6
510 mount /dev/zvol/${FULL_ZVOL_NAME1}-part1 \
511 /tmp/${FULL_ZVOL_NAME1}-part1 || fail 7
512 cp -RL ${SRC_DIR} /tmp/${FULL_ZVOL_NAME1}-part1 || fail 8
c9c0d073 513
a08ee875
LG
514 # Unmount, snapshot, mount the ext2 filesystem so it may be sent.
515 # We only unmount to ensure the ext2 filesystem is clean.
516 umount /tmp/${FULL_ZVOL_NAME1}-part1 || fail 9
517 ${ZFS} snapshot ${FULL_SNAP_NAME1} || fail 10
518 wait_udev /dev/zvol/${FULL_SNAP_NAME1} 30 || fail 10
519 mount /dev/zvol/${FULL_ZVOL_NAME1}-part1 \
520 /tmp/${FULL_ZVOL_NAME1}-part1 || 11
c9c0d073
BB
521
522 # Send/receive the snapshot from POOL_NAME1 to POOL_NAME2
523 (${ZFS} send ${FULL_SNAP_NAME1} | \
2c4834f8 524 ${ZFS} receive ${FULL_ZVOL_NAME2}) || fail 12
3fce1d09 525 wait_udev /dev/zvol/${FULL_ZVOL_NAME2}-part1 30 || fail 12
c9c0d073 526
93648f31 527 # Mount the sent ext2 filesystem.
3fce1d09
BB
528 mkdir -p /tmp/${FULL_ZVOL_NAME2}-part1 || fail 13
529 mount /dev/zvol/${FULL_ZVOL_NAME2}-part1 \
530 /tmp/${FULL_ZVOL_NAME2}-part1 || fail 14
c9c0d073
BB
531
532 # Verify the contents of the volumes match
3fce1d09 533 diff -ur /tmp/${FULL_ZVOL_NAME1}-part1 /tmp/${FULL_ZVOL_NAME2}-part1 \
c9c0d073
BB
534 &>/dev/null || fail 15
535
536 # Umount, destroy the volume and pool.
3fce1d09
BB
537 umount /tmp/${FULL_ZVOL_NAME1}-part1 || fail 16
538 umount /tmp/${FULL_ZVOL_NAME2}-part1 || fail 17
539 rmdir /tmp/${FULL_ZVOL_NAME1}-part1 || fail 18
540 rmdir /tmp/${FULL_ZVOL_NAME2}-part1 || fail 19
c9c0d073
BB
541 rmdir /tmp/${POOL_NAME1} || fail 20
542 rmdir /tmp/${POOL_NAME2} || fail 21
543
544 ${ZFS} destroy ${FULL_SNAP_NAME1} || fail 22
545 ${ZFS} destroy ${FULL_SNAP_NAME2} || fail 23
546 ${ZFS} destroy ${FULL_ZVOL_NAME1} || fail 24
547 ${ZFS} destroy ${FULL_ZVOL_NAME2} || fail 25
548 ${ZPOOL_CREATE_SH} -p ${POOL_NAME1} -c lo-raidz2 -d || fail 26
549 ${ZPOOL_CREATE_SH} -p ${POOL_NAME2} -c lo-raidz2 -d || fail 27
550 ${ZFS_SH} -u || fail 28
551 rm -f ${TMP_CACHE} || fail 29
552
553 pass
554}
555run_test 8 "zfs send/receive"
556
557# zpool event sanity check
558test_9() {
559 local POOL_NAME=tank
560 local ZVOL_NAME=fish
561 local FULL_NAME=${POOL_NAME}/${ZVOL_NAME}
562 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
563 local TMP_EVENTS=`mktemp -p /tmp zpool.events.XXXXXXXX`
564
565 # Create a pool and volume.
566 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
567 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
ac063c48 568 ${ZFS} create -V 300M ${FULL_NAME} || fail 3
ea04106b 569 udev_trigger
c9c0d073
BB
570
571 # Dump the events, there should be at least 5 lines.
572 ${ZPOOL} events >${TMP_EVENTS} || fail 4
573 EVENTS=`wc -l ${TMP_EVENTS} | cut -f1 -d' '`
574 [ $EVENTS -lt 5 ] && fail 5
575
576 # Clear the events and ensure there are none.
577 ${ZPOOL} events -c >/dev/null || fail 6
578 ${ZPOOL} events >${TMP_EVENTS} || fail 7
579 EVENTS=`wc -l ${TMP_EVENTS} | cut -f1 -d' '`
580 [ $EVENTS -gt 1 ] && fail 8
581
582 ${ZFS} destroy ${FULL_NAME} || fail 9
583 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 10
584 ${ZFS_SH} -u || fail 11
585 rm -f ${TMP_CACHE} || fail 12
586 rm -f ${TMP_EVENTS} || fail 13
587
588 pass
589}
590run_test 9 "zpool events"
591
d4055aac
NB
592zconfig_add_vdev() {
593 local POOL_NAME=$1
594 local TYPE=$2
595 local DEVICE=$3
596 local TMP_FILE1=`mktemp`
597 local TMP_FILE2=`mktemp`
598 local TMP_FILE3=`mktemp`
599
600 BASE_DEVICE=`basename ${DEVICE}`
601
602 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1}
603 ${ZPOOL} add -f ${POOL_NAME} ${TYPE} ${DEVICE} 2>/dev/null || return 1
604 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2}
605 diff ${TMP_FILE1} ${TMP_FILE2} > ${TMP_FILE3}
606
607 [ `wc -l ${TMP_FILE3}|${AWK} '{print $1}'` -eq 3 ] || return 1
608
609 PARENT_VDEV=`tail -2 ${TMP_FILE3} | head -1 | ${AWK} '{print $NF}'`
610 case $TYPE in
611 cache)
612 [ "${PARENT_VDEV}" = "${TYPE}" ] || return 1
613 ;;
614 log)
615 [ "${PARENT_VDEV}" = "logs" ] || return 1
616 ;;
617 esac
618
619 if ! tail -1 ${TMP_FILE3} |
620 egrep -q "^>[[:space:]]+${BASE_DEVICE}[[:space:]]+ONLINE" ; then
621 return 1
622 fi
623 rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_FILE3}
624
625 return 0
626}
627
628# zpool add and remove sanity check
629test_10() {
630 local POOL_NAME=tank
631 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
632 local TMP_FILE1=`mktemp`
633 local TMP_FILE2=`mktemp`
634
635 if [ ${SCSI_DEBUG} -eq 0 ] || [ ${HAVE_LSSCSI} -eq 0 ] ; then
636 skip
637 return
638 fi
639
640 test `${LSMOD} | grep -c scsi_debug` -gt 0 && \
641 (${RMMOD} scsi_debug || exit 1)
642
643 /sbin/modprobe scsi_debug dev_size_mb=128 ||
644 die "Error $? creating scsi_debug device"
645 udev_trigger
646
647 SDDEVICE=`${LSSCSI}|${AWK} '/scsi_debug/ { print $6; exit }'`
648 BASE_SDDEVICE=`basename $SDDEVICE`
649
650 # Create a pool
651 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
652 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
653 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3
654
655 # Add and remove a cache vdev by full path
656 zconfig_add_vdev ${POOL_NAME} cache ${SDDEVICE} || fail 4
657 ${ZPOOL} remove ${POOL_NAME} ${SDDEVICE} || fail 5
658 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 6
659 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 7
3fce1d09 660 sleep 1
d4055aac
NB
661
662 # Add and remove a cache vdev by shorthand path
663 zconfig_add_vdev ${POOL_NAME} cache ${BASE_SDDEVICE} || fail 8
664 ${ZPOOL} remove ${POOL_NAME} ${BASE_SDDEVICE} || fail 9
665 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 10
666 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 11
3fce1d09 667 sleep 1
d4055aac
NB
668
669 # Add and remove a log vdev
670 zconfig_add_vdev ${POOL_NAME} log ${BASE_SDDEVICE} || fail 12
671 ${ZPOOL} remove ${POOL_NAME} ${BASE_SDDEVICE} || fail 13
672 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 14
673 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 15
674
675 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 16
676 ${ZFS_SH} -u || fail 17
677 ${RMMOD} scsi_debug || fail 18
678
679 rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_CACHE} || fail 19
680
681 pass
682}
683run_test 10 "zpool add/remove vdev"
684
c9c0d073 685exit 0