]> git.proxmox.com Git - mirror_zfs.git/blame - scripts/zconfig.sh
OpenZFS 7104 - increase indirect block size
[mirror_zfs.git] / scripts / zconfig.sh
CommitLineData
c9c0d073
BB
1#!/bin/bash
2#
3# ZFS/ZPOOL configuration test script.
4
5basedir="$(dirname $0)"
6
7SCRIPT_COMMON=common.sh
8if [ -f "${basedir}/${SCRIPT_COMMON}" ]; then
9. "${basedir}/${SCRIPT_COMMON}"
10else
11echo "Missing helper script ${SCRIPT_COMMON}" && exit 1
12fi
13
14PROG=zconfig.sh
15
16usage() {
17cat << EOF
18USAGE:
e0f3df67 19$0 [hvcts]
c9c0d073
BB
20
21DESCRIPTION:
22 ZFS/ZPOOL configuration tests
23
24OPTIONS:
25 -h Show this message
26 -v Verbose
27 -c Cleanup lo+file devices at start
e0f3df67
BB
28 -t <#> Run listed tests
29 -s <#> Skip listed tests
c9c0d073
BB
30
31EOF
32}
33
325f0235 34while getopts 'hvct:s:?' OPTION; do
c9c0d073
BB
35 case $OPTION in
36 h)
37 usage
38 exit 1
39 ;;
40 v)
41 VERBOSE=1
42 ;;
43 c)
44 CLEANUP=1
45 ;;
325f0235
BB
46 t)
47 TESTS_RUN=($OPTARG)
48 ;;
49 s)
50 TESTS_SKIP=($OPTARG)
51 ;;
c9c0d073
BB
52 ?)
53 usage
54 exit
55 ;;
56 esac
57done
58
59if [ $(id -u) != 0 ]; then
60 die "Must run as root"
61fi
62
5cbf6db9
BB
63# Initialize the test suite
64init
65
f74b821a
BB
66# Disable the udev rule 90-zfs.rules to prevent the zfs module
67# stack from being loaded due to the detection of a zfs device.
68# This is important because this test scripts require full control
69# over when and how the modules are loaded/unloaded. A trap is
70# set to ensure the udev rule is correctly replaced on exit.
71RULE=${udevruledir}/90-zfs.rules
72if test -e ${RULE}; then
73 trap "mv ${RULE}.disabled ${RULE}" INT TERM EXIT
74 mv ${RULE} ${RULE}.disabled
75fi
76
c9c0d073
BB
77# Perform pre-cleanup is requested
78if [ ${CLEANUP} ]; then
7dc3830c 79 ${ZFS_SH} -u
0ee8118b 80 cleanup_md_devices
c9c0d073
BB
81 cleanup_loop_devices
82 rm -f /tmp/zpool.cache.*
83fi
84
d4055aac
NB
85# Check if we need to skip the tests that require scsi_debug and lsscsi.
86SCSI_DEBUG=0
87${INFOMOD} scsi_debug &>/dev/null && SCSI_DEBUG=1
88HAVE_LSSCSI=0
89test -f ${LSSCSI} && HAVE_LSSCSI=1
90if [ ${SCSI_DEBUG} -eq 0 ] || [ ${HAVE_LSSCSI} -eq 0 ]; then
91 echo "Skipping test 10 which requires the scsi_debug " \
92 "module and the ${LSSCSI} utility"
93fi
94
c9c0d073 95# Validate persistent zpool.cache configuration.
325f0235 96test_1() {
c9c0d073
BB
97 local POOL_NAME=test1
98 local TMP_FILE1=`mktemp`
99 local TMP_FILE2=`mktemp`
100 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
101
c9c0d073
BB
102 # Create a pool save its status for comparison.
103 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
104 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
105 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3
106
107 # Unload/load the module stack and verify the pool persists.
108 ${ZFS_SH} -u || fail 4
109 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 5
1611bb7b 110 ${ZPOOL} import -c ${TMP_CACHE} ${POOL_NAME} || fail 5
c9c0d073
BB
111 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 6
112 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 7
113
114 # Cleanup the test pool and temporary files
115 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 8
116 rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_CACHE} || fail 9
117 ${ZFS_SH} -u || fail 10
118
119 pass
120}
325f0235 121run_test 1 "persistent zpool.cache"
c9c0d073
BB
122
123# Validate ZFS disk scanning and import w/out zpool.cache configuration.
325f0235 124test_2() {
c9c0d073
BB
125 local POOL_NAME=test2
126 local TMP_FILE1=`mktemp`
127 local TMP_FILE2=`mktemp`
128 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
129
c9c0d073
BB
130 # Create a pool save its status for comparison.
131 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
132 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
133 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3
134
135 # Unload the module stack, remove the cache file, load the module
136 # stack and attempt to probe the disks to import the pool. As
137 # a cross check verify the old pool state against the imported.
138 ${ZFS_SH} -u || fail 4
139 rm -f ${TMP_CACHE} || fail 5
140 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 6
1611bb7b 141 ${ZPOOL} import -d /dev ${POOL_NAME} || fail 8
c9c0d073
BB
142 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 9
143 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 10
144
145 # Cleanup the test pool and temporary files
146 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 11
147 rm -f ${TMP_FILE1} ${TMP_FILE2} || fail 12
148 ${ZFS_SH} -u || fail 13
149
150 pass
151}
325f0235 152run_test 2 "scan disks for pools to import"
c9c0d073
BB
153
154zconfig_zvol_device_stat() {
155 local EXPECT=$1
3fce1d09
BB
156 local POOL_NAME=/dev/zvol/$2
157 local ZVOL_NAME=/dev/zvol/$3
158 local SNAP_NAME=/dev/zvol/$4
159 local CLONE_NAME=/dev/zvol/$5
c9c0d073
BB
160 local COUNT=0
161
162 # Briefly delay for udev
3fce1d09 163 udev_trigger
c9c0d073
BB
164
165 # Pool exists
166 stat ${POOL_NAME} &>/dev/null && let COUNT=$COUNT+1
167
168 # Volume and partitions
169 stat ${ZVOL_NAME} &>/dev/null && let COUNT=$COUNT+1
3fce1d09
BB
170 stat ${ZVOL_NAME}-part1 &>/dev/null && let COUNT=$COUNT+1
171 stat ${ZVOL_NAME}-part2 &>/dev/null && let COUNT=$COUNT+1
c9c0d073
BB
172
173 # Snapshot with partitions
174 stat ${SNAP_NAME} &>/dev/null && let COUNT=$COUNT+1
3fce1d09
BB
175 stat ${SNAP_NAME}-part1 &>/dev/null && let COUNT=$COUNT+1
176 stat ${SNAP_NAME}-part2 &>/dev/null && let COUNT=$COUNT+1
c9c0d073
BB
177
178 # Clone with partitions
179 stat ${CLONE_NAME} &>/dev/null && let COUNT=$COUNT+1
3fce1d09
BB
180 stat ${CLONE_NAME}-part1 &>/dev/null && let COUNT=$COUNT+1
181 stat ${CLONE_NAME}-part2 &>/dev/null && let COUNT=$COUNT+1
c9c0d073
BB
182
183 if [ $EXPECT -ne $COUNT ]; then
184 return 1
185 fi
186
187 return 0
188}
189
190# zpool import/export device check
191# (1 volume, 2 partitions, 1 snapshot, 1 clone)
325f0235 192test_3() {
c9c0d073
BB
193 local POOL_NAME=tank
194 local ZVOL_NAME=volume
195 local SNAP_NAME=snap
196 local CLONE_NAME=clone
197 local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
198 local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
199 local FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
200 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
201
c9c0d073
BB
202 # Create a pool, volume, partition, snapshot, and clone.
203 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
204 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
205 ${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3
0b4d1b58 206 ${ZFS} set snapdev=visible ${FULL_ZVOL_NAME} || fail 3
93648f31
BB
207 label /dev/zvol/${FULL_ZVOL_NAME} msdos || fail 4
208 partition /dev/zvol/${FULL_ZVOL_NAME} primary 1% 50% || fail 4
e528c9b4 209 partition /dev/zvol/${FULL_ZVOL_NAME} primary 51% -1 || fail 4
c9c0d073
BB
210 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5
211 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6
212
213 # Verify the devices were created
214 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
215 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 7
216
217 # Export the pool
218 ${ZPOOL} export ${POOL_NAME} || fail 8
219
220 # verify the devices were removed
221 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
222 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9
223
224 # Import the pool, wait 1 second for udev
225 ${ZPOOL} import ${POOL_NAME} || fail 10
226
227 # Verify the devices were created
228 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
229 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 11
230
a0bd735a
BP
231 # Toggle the snapdev and observe snapshot device links toggled
232 ${ZFS} set snapdev=hidden ${FULL_ZVOL_NAME} || fail 12
233
234 zconfig_zvol_device_stat 7 ${POOL_NAME} ${FULL_ZVOL_NAME} \
235 "invalid" ${FULL_CLONE_NAME} || fail 13
236
237 ${ZFS} set snapdev=visible ${FULL_ZVOL_NAME} || fail 14
238
239 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
240 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 15
241
c9c0d073 242 # Destroy the pool and consequently the devices
a0bd735a 243 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 16
c9c0d073
BB
244
245 # verify the devices were removed
246 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
a0bd735a 247 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 17
c9c0d073 248
a0bd735a
BP
249 ${ZFS_SH} -u || fail 18
250 rm -f ${TMP_CACHE} || fail 19
c9c0d073
BB
251
252 pass
253}
325f0235 254run_test 3 "zpool import/export device"
c9c0d073
BB
255
256# zpool insmod/rmmod device check (1 volume, 1 snapshot, 1 clone)
325f0235 257test_4() {
c9c0d073
BB
258 POOL_NAME=tank
259 ZVOL_NAME=volume
260 SNAP_NAME=snap
261 CLONE_NAME=clone
262 FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
263 FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
264 FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
265 TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
266
c9c0d073
BB
267 # Create a pool, volume, snapshot, and clone
268 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
269 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
270 ${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3
0b4d1b58 271 ${ZFS} set snapdev=visible ${FULL_ZVOL_NAME} || fail 3
93648f31
BB
272 label /dev/zvol/${FULL_ZVOL_NAME} msdos || fail 4
273 partition /dev/zvol/${FULL_ZVOL_NAME} primary 1% 50% || fail 4
e528c9b4 274 partition /dev/zvol/${FULL_ZVOL_NAME} primary 51% -1 || fail 4
c9c0d073
BB
275 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5
276 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6
277
278 # Verify the devices were created
279 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
280 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 7
281
282 # Unload the modules
283 ${ZFS_SH} -u || fail 8
284
285 # Verify the devices were removed
286 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
287 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9
288
91604b29 289 # Load the modules, list the pools to ensure they are opened
c9c0d073 290 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 10
1611bb7b 291 ${ZPOOL} import -c ${TMP_CACHE} ${POOL_NAME} || fail 10
91604b29 292 ${ZPOOL} list &>/dev/null
c9c0d073
BB
293
294 # Verify the devices were created
295 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
296 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 11
297
298 # Destroy the pool and consequently the devices
299 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 12
300
301 # Verify the devices were removed
302 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
303 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 13
304
305 ${ZFS_SH} -u || fail 14
306 rm -f ${TMP_CACHE} || fail 15
307
308 pass
309}
325f0235 310run_test 4 "zpool insmod/rmmod device"
c9c0d073
BB
311
312# ZVOL volume sanity check
325f0235 313test_5() {
c9c0d073
BB
314 local POOL_NAME=tank
315 local ZVOL_NAME=fish
316 local FULL_NAME=${POOL_NAME}/${ZVOL_NAME}
c9c0d073
BB
317 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
318
c9c0d073
BB
319 # Create a pool and volume.
320 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
3fce1d09
BB
321 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raid0 || fail 2
322 ${ZFS} create -V 800M ${FULL_NAME} || fail 3
93648f31
BB
323 label /dev/zvol/${FULL_NAME} msdos || fail 4
324 partition /dev/zvol/${FULL_NAME} primary 1 -1 || fail 4
325 format /dev/zvol/${FULL_NAME}-part1 ext2 || fail 5
c9c0d073 326
93648f31 327 # Mount the ext2 filesystem and copy some data to it.
3fce1d09
BB
328 mkdir -p /tmp/${ZVOL_NAME}-part1 || fail 6
329 mount /dev/zvol/${FULL_NAME}-part1 /tmp/${ZVOL_NAME}-part1 || fail 7
330 cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}-part1 || fail 8
c9c0d073
BB
331 sync
332
333 # Verify the copied files match the original files.
930b6fec 334 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}-part1/${SRC_DIR##*/} \
3fce1d09 335 &>/dev/null || fail 9
c9c0d073
BB
336
337 # Remove the files, umount, destroy the volume and pool.
930b6fec 338 rm -Rf /tmp/${ZVOL_NAME}-part1/${SRC_DIR##*/} || fail 10
3fce1d09
BB
339 umount /tmp/${ZVOL_NAME}-part1 || fail 11
340 rmdir /tmp/${ZVOL_NAME}-part1 || fail 12
c9c0d073
BB
341
342 ${ZFS} destroy ${FULL_NAME} || fail 13
343 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 14
344 ${ZFS_SH} -u || fail 15
345 rm -f ${TMP_CACHE} || fail 16
346
347 pass
348}
93648f31 349run_test 5 "zvol+ext2 volume"
c9c0d073
BB
350
351# ZVOL snapshot sanity check
325f0235 352test_6() {
c9c0d073
BB
353 local POOL_NAME=tank
354 local ZVOL_NAME=fish
355 local SNAP_NAME=pristine
356 local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
357 local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
c9c0d073
BB
358 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
359
c9c0d073
BB
360 # Create a pool and volume.
361 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
3fce1d09 362 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raid0 || fail 2
d7958b4c 363 ${ZFS} create -s -V 800M ${FULL_ZVOL_NAME} || fail 3
0b4d1b58 364 ${ZFS} set snapdev=visible ${FULL_ZVOL_NAME} || fail 3
93648f31
BB
365 label /dev/zvol/${FULL_ZVOL_NAME} msdos || fail 4
366 partition /dev/zvol/${FULL_ZVOL_NAME} primary 1 -1 || fail 4
367 format /dev/zvol/${FULL_ZVOL_NAME}-part1 ext2 || fail 5
c9c0d073 368
93648f31 369 # Mount the ext2 filesystem and copy some data to it.
3fce1d09
BB
370 mkdir -p /tmp/${ZVOL_NAME}-part1 || fail 6
371 mount /dev/zvol/${FULL_ZVOL_NAME}-part1 /tmp/${ZVOL_NAME}-part1 \
372 || fail 7
c9c0d073
BB
373
374 # Snapshot the pristine ext2 filesystem and mount it read-only.
2c4834f8 375 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 8
3fce1d09
BB
376 wait_udev /dev/zvol/${FULL_SNAP_NAME}-part1 30 || fail 8
377 mkdir -p /tmp/${SNAP_NAME}-part1 || fail 9
378 mount /dev/zvol/${FULL_SNAP_NAME}-part1 /tmp/${SNAP_NAME}-part1 \
379 &>/dev/null || fail 10
c9c0d073
BB
380
381 # Copy to original volume
3fce1d09 382 cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}-part1 || fail 11
c9c0d073
BB
383 sync
384
385 # Verify the copied files match the original files,
386 # and the copied files do NOT appear in the snapshot.
930b6fec 387 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}-part1/${SRC_DIR##*/} \
3fce1d09 388 &>/dev/null || fail 12
930b6fec 389 diff -ur ${SRC_DIR} /tmp/${SNAP_NAME}-part1/${SRC_DIR##*/} \
3fce1d09 390 &>/dev/null && fail 13
c9c0d073
BB
391
392 # umount, destroy the snapshot, volume, and pool.
3fce1d09
BB
393 umount /tmp/${SNAP_NAME}-part1 || fail 14
394 rmdir /tmp/${SNAP_NAME}-part1 || fail 15
c9c0d073
BB
395 ${ZFS} destroy ${FULL_SNAP_NAME} || fail 16
396
3fce1d09
BB
397 umount /tmp/${ZVOL_NAME}-part1 || fail 17
398 rmdir /tmp/${ZVOL_NAME}-part1 || fail 18
c9c0d073
BB
399 ${ZFS} destroy ${FULL_ZVOL_NAME} || fail 19
400
401 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 20
402 ${ZFS_SH} -u || fail 21
403 rm -f ${TMP_CACHE} || fail 22
404
405 pass
406}
325f0235 407run_test 6 "zvol+ext2 snapshot"
c9c0d073
BB
408
409# ZVOL clone sanity check
325f0235 410test_7() {
c9c0d073
BB
411 local POOL_NAME=tank
412 local ZVOL_NAME=fish
413 local SNAP_NAME=pristine
414 local CLONE_NAME=clone
415 local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
416 local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
417 local FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
c9c0d073
BB
418 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
419
c9c0d073
BB
420 # Create a pool and volume.
421 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
422 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
ac063c48 423 ${ZFS} create -V 300M ${FULL_ZVOL_NAME} || fail 3
0b4d1b58 424 ${ZFS} set snapdev=visible ${FULL_ZVOL_NAME} || fail 3
93648f31
BB
425 label /dev/zvol/${FULL_ZVOL_NAME} msdos || fail 4
426 partition /dev/zvol/${FULL_ZVOL_NAME} primary 1 -1 || fail 4
427 format /dev/zvol/${FULL_ZVOL_NAME}-part1 ext2 || fail 5
c9c0d073 428
c2d439df
BB
429 # Snapshot the pristine ext2 filesystem.
430 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 6
431 wait_udev /dev/zvol/${FULL_SNAP_NAME}-part1 30 || fail 7
c9c0d073 432
c2d439df
BB
433 # Mount the ext2 filesystem so some data can be copied to it.
434 mkdir -p /tmp/${ZVOL_NAME}-part1 || fail 7
435 mount /dev/zvol/${FULL_ZVOL_NAME}-part1 \
436 /tmp/${ZVOL_NAME}-part1 || fail 8
437
438 # Mount the pristine ext2 snapshot.
3fce1d09
BB
439 mkdir -p /tmp/${SNAP_NAME}-part1 || fail 9
440 mount /dev/zvol/${FULL_SNAP_NAME}-part1 \
441 /tmp/${SNAP_NAME}-part1 &>/dev/null || fail 10
c9c0d073
BB
442
443 # Copy to original volume.
3fce1d09 444 cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}-part1 || fail 11
c9c0d073
BB
445 sync
446
447 # Verify the copied files match the original files,
448 # and the copied files do NOT appear in the snapshot.
930b6fec 449 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}-part1/${SRC_DIR##*/} \
3fce1d09 450 &>/dev/null || fail 12
930b6fec 451 diff -ur ${SRC_DIR} /tmp/${SNAP_NAME}-part1/${SRC_DIR##*/} \
3fce1d09 452 &>/dev/null && fail 13
c9c0d073
BB
453
454 # Clone from the original pristine snapshot
2c4834f8 455 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 14
3fce1d09
BB
456 wait_udev /dev/zvol/${FULL_CLONE_NAME}-part1 30 || fail 14
457 mkdir -p /tmp/${CLONE_NAME}-part1 || fail 15
458 mount /dev/zvol/${FULL_CLONE_NAME}-part1 \
459 /tmp/${CLONE_NAME}-part1 || fail 16
c9c0d073
BB
460
461 # Verify the clone matches the pristine snapshot,
462 # and the files copied to the original volume are NOT there.
3fce1d09
BB
463 diff -ur /tmp/${SNAP_NAME}-part1 /tmp/${CLONE_NAME}-part1 \
464 &>/dev/null || fail 17
465 diff -ur /tmp/${ZVOL_NAME}-part1 /tmp/${CLONE_NAME}-part1 \
466 &>/dev/null && fail 18
c9c0d073
BB
467
468 # Copy to cloned volume.
3fce1d09 469 cp -RL ${SRC_DIR} /tmp/${CLONE_NAME}-part1 || fail 19
c9c0d073
BB
470 sync
471
472 # Verify the clone matches the modified original volume.
3fce1d09
BB
473 diff -ur /tmp/${ZVOL_NAME}-part1 /tmp/${CLONE_NAME}-part1 \
474 &>/dev/null || fail 20
c9c0d073
BB
475
476 # umount, destroy the snapshot, volume, and pool.
3fce1d09
BB
477 umount /tmp/${CLONE_NAME}-part1 || fail 21
478 rmdir /tmp/${CLONE_NAME}-part1 || fail 22
c9c0d073
BB
479 ${ZFS} destroy ${FULL_CLONE_NAME} || fail 23
480
3fce1d09
BB
481 umount /tmp/${SNAP_NAME}-part1 || fail 24
482 rmdir /tmp/${SNAP_NAME}-part1 || fail 25
c9c0d073
BB
483 ${ZFS} destroy ${FULL_SNAP_NAME} || fail 26
484
3fce1d09
BB
485 umount /tmp/${ZVOL_NAME}-part1 || fail 27
486 rmdir /tmp/${ZVOL_NAME}-part1 || fail 28
c9c0d073
BB
487 ${ZFS} destroy ${FULL_ZVOL_NAME} || fail 29
488
489 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 30
490 ${ZFS_SH} -u || fail 31
491 rm -f ${TMP_CACHE} || fail 32
492
493 pass
494}
325f0235 495run_test 7 "zvol+ext2 clone"
c9c0d073
BB
496
497# Send/Receive sanity check
498test_8() {
499 local POOL_NAME1=tank1
500 local POOL_NAME2=tank2
501 local ZVOL_NAME=fish
502 local SNAP_NAME=snap
503 local FULL_ZVOL_NAME1=${POOL_NAME1}/${ZVOL_NAME}
504 local FULL_ZVOL_NAME2=${POOL_NAME2}/${ZVOL_NAME}
505 local FULL_SNAP_NAME1=${POOL_NAME1}/${ZVOL_NAME}@${SNAP_NAME}
506 local FULL_SNAP_NAME2=${POOL_NAME2}/${ZVOL_NAME}@${SNAP_NAME}
c9c0d073
BB
507 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
508
509 # Create two pools and a volume
510 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
511 ${ZPOOL_CREATE_SH} -p ${POOL_NAME1} -c lo-raidz2 || fail 2
93648f31
BB
512 ${ZPOOL_CREATE_SH} -p ${POOL_NAME2} -c lo-raidz2 || fail 2
513 ${ZFS} create -V 300M ${FULL_ZVOL_NAME1} || fail 3
0b4d1b58 514 ${ZFS} set snapdev=visible ${FULL_ZVOL_NAME1} || fail 3
93648f31
BB
515 label /dev/zvol/${FULL_ZVOL_NAME1} msdos || fail 4
516 partition /dev/zvol/${FULL_ZVOL_NAME1} primary 1 -1 || fail 4
517 format /dev/zvol/${FULL_ZVOL_NAME1}-part1 ext2 || fail 5
c9c0d073 518
93648f31 519 # Mount the ext2 filesystem and copy some data to it.
3fce1d09
BB
520 mkdir -p /tmp/${FULL_ZVOL_NAME1}-part1 || fail 6
521 mount /dev/zvol/${FULL_ZVOL_NAME1}-part1 \
522 /tmp/${FULL_ZVOL_NAME1}-part1 || fail 7
523 cp -RL ${SRC_DIR} /tmp/${FULL_ZVOL_NAME1}-part1 || fail 8
c9c0d073 524
c2d439df
BB
525 # Unmount, snapshot, mount the ext2 filesystem so it may be sent.
526 # We only unmount to ensure the ext2 filesystem is clean.
527 umount /tmp/${FULL_ZVOL_NAME1}-part1 || fail 9
528 ${ZFS} snapshot ${FULL_SNAP_NAME1} || fail 10
529 wait_udev /dev/zvol/${FULL_SNAP_NAME1} 30 || fail 10
530 mount /dev/zvol/${FULL_ZVOL_NAME1}-part1 \
531 /tmp/${FULL_ZVOL_NAME1}-part1 || 11
c9c0d073
BB
532
533 # Send/receive the snapshot from POOL_NAME1 to POOL_NAME2
534 (${ZFS} send ${FULL_SNAP_NAME1} | \
2c4834f8 535 ${ZFS} receive ${FULL_ZVOL_NAME2}) || fail 12
3fce1d09 536 wait_udev /dev/zvol/${FULL_ZVOL_NAME2}-part1 30 || fail 12
c9c0d073 537
93648f31 538 # Mount the sent ext2 filesystem.
3fce1d09
BB
539 mkdir -p /tmp/${FULL_ZVOL_NAME2}-part1 || fail 13
540 mount /dev/zvol/${FULL_ZVOL_NAME2}-part1 \
541 /tmp/${FULL_ZVOL_NAME2}-part1 || fail 14
c9c0d073
BB
542
543 # Verify the contents of the volumes match
3fce1d09 544 diff -ur /tmp/${FULL_ZVOL_NAME1}-part1 /tmp/${FULL_ZVOL_NAME2}-part1 \
c9c0d073
BB
545 &>/dev/null || fail 15
546
547 # Umount, destroy the volume and pool.
3fce1d09
BB
548 umount /tmp/${FULL_ZVOL_NAME1}-part1 || fail 16
549 umount /tmp/${FULL_ZVOL_NAME2}-part1 || fail 17
550 rmdir /tmp/${FULL_ZVOL_NAME1}-part1 || fail 18
551 rmdir /tmp/${FULL_ZVOL_NAME2}-part1 || fail 19
c9c0d073
BB
552 rmdir /tmp/${POOL_NAME1} || fail 20
553 rmdir /tmp/${POOL_NAME2} || fail 21
554
555 ${ZFS} destroy ${FULL_SNAP_NAME1} || fail 22
556 ${ZFS} destroy ${FULL_SNAP_NAME2} || fail 23
557 ${ZFS} destroy ${FULL_ZVOL_NAME1} || fail 24
558 ${ZFS} destroy ${FULL_ZVOL_NAME2} || fail 25
559 ${ZPOOL_CREATE_SH} -p ${POOL_NAME1} -c lo-raidz2 -d || fail 26
560 ${ZPOOL_CREATE_SH} -p ${POOL_NAME2} -c lo-raidz2 -d || fail 27
561 ${ZFS_SH} -u || fail 28
562 rm -f ${TMP_CACHE} || fail 29
563
564 pass
565}
566run_test 8 "zfs send/receive"
567
568# zpool event sanity check
569test_9() {
570 local POOL_NAME=tank
571 local ZVOL_NAME=fish
572 local FULL_NAME=${POOL_NAME}/${ZVOL_NAME}
573 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
574 local TMP_EVENTS=`mktemp -p /tmp zpool.events.XXXXXXXX`
575
576 # Create a pool and volume.
577 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
578 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
ac063c48 579 ${ZFS} create -V 300M ${FULL_NAME} || fail 3
0820ec65 580 udev_trigger
c9c0d073 581
d02ca379 582 # Dump the events, there should be a pool create event
c9c0d073 583 ${ZPOOL} events >${TMP_EVENTS} || fail 4
d02ca379
DB
584 MATCHES=`grep -c sysevent\.fs\.zfs\.pool_create ${TMP_EVENTS}`
585 [ $MATCHES -eq 1 ] || fail 5
c9c0d073
BB
586
587 # Clear the events and ensure there are none.
588 ${ZPOOL} events -c >/dev/null || fail 6
589 ${ZPOOL} events >${TMP_EVENTS} || fail 7
590 EVENTS=`wc -l ${TMP_EVENTS} | cut -f1 -d' '`
591 [ $EVENTS -gt 1 ] && fail 8
592
593 ${ZFS} destroy ${FULL_NAME} || fail 9
594 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 10
595 ${ZFS_SH} -u || fail 11
596 rm -f ${TMP_CACHE} || fail 12
597 rm -f ${TMP_EVENTS} || fail 13
598
599 pass
600}
601run_test 9 "zpool events"
602
d4055aac
NB
603zconfig_add_vdev() {
604 local POOL_NAME=$1
605 local TYPE=$2
606 local DEVICE=$3
607 local TMP_FILE1=`mktemp`
608 local TMP_FILE2=`mktemp`
609 local TMP_FILE3=`mktemp`
610
611 BASE_DEVICE=`basename ${DEVICE}`
612
613 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1}
614 ${ZPOOL} add -f ${POOL_NAME} ${TYPE} ${DEVICE} 2>/dev/null || return 1
615 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2}
616 diff ${TMP_FILE1} ${TMP_FILE2} > ${TMP_FILE3}
617
618 [ `wc -l ${TMP_FILE3}|${AWK} '{print $1}'` -eq 3 ] || return 1
619
620 PARENT_VDEV=`tail -2 ${TMP_FILE3} | head -1 | ${AWK} '{print $NF}'`
621 case $TYPE in
622 cache)
623 [ "${PARENT_VDEV}" = "${TYPE}" ] || return 1
624 ;;
625 log)
626 [ "${PARENT_VDEV}" = "logs" ] || return 1
627 ;;
628 esac
629
630 if ! tail -1 ${TMP_FILE3} |
631 egrep -q "^>[[:space:]]+${BASE_DEVICE}[[:space:]]+ONLINE" ; then
632 return 1
633 fi
634 rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_FILE3}
635
636 return 0
637}
638
639# zpool add and remove sanity check
640test_10() {
641 local POOL_NAME=tank
642 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
643 local TMP_FILE1=`mktemp`
644 local TMP_FILE2=`mktemp`
645
646 if [ ${SCSI_DEBUG} -eq 0 ] || [ ${HAVE_LSSCSI} -eq 0 ] ; then
647 skip
648 return
649 fi
650
651 test `${LSMOD} | grep -c scsi_debug` -gt 0 && \
652 (${RMMOD} scsi_debug || exit 1)
653
654 /sbin/modprobe scsi_debug dev_size_mb=128 ||
655 die "Error $? creating scsi_debug device"
656 udev_trigger
657
658 SDDEVICE=`${LSSCSI}|${AWK} '/scsi_debug/ { print $6; exit }'`
659 BASE_SDDEVICE=`basename $SDDEVICE`
660
661 # Create a pool
662 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
663 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
664 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3
665
666 # Add and remove a cache vdev by full path
667 zconfig_add_vdev ${POOL_NAME} cache ${SDDEVICE} || fail 4
668 ${ZPOOL} remove ${POOL_NAME} ${SDDEVICE} || fail 5
669 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 6
670 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 7
3fce1d09 671 sleep 1
d4055aac
NB
672
673 # Add and remove a cache vdev by shorthand path
674 zconfig_add_vdev ${POOL_NAME} cache ${BASE_SDDEVICE} || fail 8
675 ${ZPOOL} remove ${POOL_NAME} ${BASE_SDDEVICE} || fail 9
676 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 10
677 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 11
3fce1d09 678 sleep 1
d4055aac
NB
679
680 # Add and remove a log vdev
681 zconfig_add_vdev ${POOL_NAME} log ${BASE_SDDEVICE} || fail 12
682 ${ZPOOL} remove ${POOL_NAME} ${BASE_SDDEVICE} || fail 13
683 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 14
684 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 15
685
686 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 16
687 ${ZFS_SH} -u || fail 17
688 ${RMMOD} scsi_debug || fail 18
689
690 rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_CACHE} || fail 19
691
692 pass
693}
694run_test 10 "zpool add/remove vdev"
695
c9c0d073 696exit 0