]> git.proxmox.com Git - mirror_zfs-debian.git/blame - scripts/zconfig.sh
Add parted and lsscsi dependencies to zfs-test
[mirror_zfs-debian.git] / scripts / zconfig.sh
CommitLineData
c9c0d073
BB
1#!/bin/bash
2#
3# ZFS/ZPOOL configuration test script.
4
5basedir="$(dirname $0)"
6
7SCRIPT_COMMON=common.sh
8if [ -f "${basedir}/${SCRIPT_COMMON}" ]; then
9. "${basedir}/${SCRIPT_COMMON}"
10else
11echo "Missing helper script ${SCRIPT_COMMON}" && exit 1
12fi
13
14PROG=zconfig.sh
15
16usage() {
17cat << EOF
18USAGE:
e0f3df67 19$0 [hvcts]
c9c0d073
BB
20
21DESCRIPTION:
22 ZFS/ZPOOL configuration tests
23
24OPTIONS:
25 -h Show this message
26 -v Verbose
27 -c Cleanup lo+file devices at start
e0f3df67
BB
28 -t <#> Run listed tests
29 -s <#> Skip listed tests
c9c0d073
BB
30
31EOF
32}
33
325f0235 34while getopts 'hvct:s:?' OPTION; do
c9c0d073
BB
35 case $OPTION in
36 h)
37 usage
38 exit 1
39 ;;
40 v)
41 VERBOSE=1
42 ;;
43 c)
44 CLEANUP=1
45 ;;
325f0235
BB
46 t)
47 TESTS_RUN=($OPTARG)
48 ;;
49 s)
50 TESTS_SKIP=($OPTARG)
51 ;;
c9c0d073
BB
52 ?)
53 usage
54 exit
55 ;;
56 esac
57done
58
59if [ $(id -u) != 0 ]; then
60 die "Must run as root"
61fi
62
63# Perform pre-cleanup is requested
64if [ ${CLEANUP} ]; then
7dc3830c 65 ${ZFS_SH} -u
0ee8118b 66 cleanup_md_devices
c9c0d073
BB
67 cleanup_loop_devices
68 rm -f /tmp/zpool.cache.*
69fi
70
d4055aac
NB
71# Check if we need to skip the tests that require scsi_debug and lsscsi.
72SCSI_DEBUG=0
73${INFOMOD} scsi_debug &>/dev/null && SCSI_DEBUG=1
74HAVE_LSSCSI=0
75test -f ${LSSCSI} && HAVE_LSSCSI=1
76if [ ${SCSI_DEBUG} -eq 0 ] || [ ${HAVE_LSSCSI} -eq 0 ]; then
77 echo "Skipping test 10 which requires the scsi_debug " \
78 "module and the ${LSSCSI} utility"
79fi
80
c9c0d073
BB
81zconfig_partition() {
82 local DEVICE=$1
83 local START=$2
84 local END=$3
85 local TMP_FILE=`mktemp`
86
87 /sbin/sfdisk -q ${DEVICE} << EOF &>${TMP_FILE} || fail 4
88${START},${END}
89;
90;
91;
92EOF
93
94 rm ${TMP_FILE}
95}
96
97# Validate persistent zpool.cache configuration.
325f0235 98test_1() {
c9c0d073
BB
99 local POOL_NAME=test1
100 local TMP_FILE1=`mktemp`
101 local TMP_FILE2=`mktemp`
102 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
103
c9c0d073
BB
104 # Create a pool save its status for comparison.
105 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
106 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
107 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3
108
109 # Unload/load the module stack and verify the pool persists.
110 ${ZFS_SH} -u || fail 4
111 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 5
112 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 6
113 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 7
114
115 # Cleanup the test pool and temporary files
116 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 8
117 rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_CACHE} || fail 9
118 ${ZFS_SH} -u || fail 10
119
120 pass
121}
325f0235 122run_test 1 "persistent zpool.cache"
c9c0d073
BB
123
124# Validate ZFS disk scanning and import w/out zpool.cache configuration.
325f0235 125test_2() {
c9c0d073
BB
126 local POOL_NAME=test2
127 local TMP_FILE1=`mktemp`
128 local TMP_FILE2=`mktemp`
129 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
130
c9c0d073
BB
131 # Create a pool save its status for comparison.
132 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
133 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
134 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3
135
136 # Unload the module stack, remove the cache file, load the module
137 # stack and attempt to probe the disks to import the pool. As
138 # a cross check verify the old pool state against the imported.
139 ${ZFS_SH} -u || fail 4
140 rm -f ${TMP_CACHE} || fail 5
141 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 6
142 ${ZPOOL} import | grep ${POOL_NAME} >/dev/null || fail 7
143 ${ZPOOL} import ${POOL_NAME} || fail 8
144 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 9
145 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 10
146
147 # Cleanup the test pool and temporary files
148 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 11
149 rm -f ${TMP_FILE1} ${TMP_FILE2} || fail 12
150 ${ZFS_SH} -u || fail 13
151
152 pass
153}
325f0235 154run_test 2 "scan disks for pools to import"
c9c0d073
BB
155
156zconfig_zvol_device_stat() {
157 local EXPECT=$1
158 local POOL_NAME=/dev/$2
159 local ZVOL_NAME=/dev/$3
160 local SNAP_NAME=/dev/$4
161 local CLONE_NAME=/dev/$5
162 local COUNT=0
163
164 # Briefly delay for udev
2c4834f8 165 sleep 3
c9c0d073
BB
166
167 # Pool exists
168 stat ${POOL_NAME} &>/dev/null && let COUNT=$COUNT+1
169
170 # Volume and partitions
171 stat ${ZVOL_NAME} &>/dev/null && let COUNT=$COUNT+1
172 stat ${ZVOL_NAME}1 &>/dev/null && let COUNT=$COUNT+1
173 stat ${ZVOL_NAME}2 &>/dev/null && let COUNT=$COUNT+1
174
175 # Snapshot with partitions
176 stat ${SNAP_NAME} &>/dev/null && let COUNT=$COUNT+1
177 stat ${SNAP_NAME}1 &>/dev/null && let COUNT=$COUNT+1
178 stat ${SNAP_NAME}2 &>/dev/null && let COUNT=$COUNT+1
179
180 # Clone with partitions
181 stat ${CLONE_NAME} &>/dev/null && let COUNT=$COUNT+1
182 stat ${CLONE_NAME}1 &>/dev/null && let COUNT=$COUNT+1
183 stat ${CLONE_NAME}2 &>/dev/null && let COUNT=$COUNT+1
184
185 if [ $EXPECT -ne $COUNT ]; then
186 return 1
187 fi
188
189 return 0
190}
191
192# zpool import/export device check
193# (1 volume, 2 partitions, 1 snapshot, 1 clone)
325f0235 194test_3() {
c9c0d073
BB
195 local POOL_NAME=tank
196 local ZVOL_NAME=volume
197 local SNAP_NAME=snap
198 local CLONE_NAME=clone
199 local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
200 local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
201 local FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
202 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
203
c9c0d073
BB
204 # Create a pool, volume, partition, snapshot, and clone.
205 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
206 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
207 ${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3
208 zconfig_partition /dev/${FULL_ZVOL_NAME} 0 64 || fail 4
209 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5
210 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6
211
212 # Verify the devices were created
213 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
214 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 7
215
216 # Export the pool
217 ${ZPOOL} export ${POOL_NAME} || fail 8
218
219 # verify the devices were removed
220 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
221 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9
222
223 # Import the pool, wait 1 second for udev
224 ${ZPOOL} import ${POOL_NAME} || fail 10
225
226 # Verify the devices were created
227 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
228 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 11
229
230 # Destroy the pool and consequently the devices
231 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 12
232
233 # verify the devices were removed
234 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
235 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 13
236
237 ${ZFS_SH} -u || fail 14
238 rm -f ${TMP_CACHE} || fail 15
239
240 pass
241}
325f0235 242run_test 3 "zpool import/export device"
c9c0d073
BB
243
244# zpool insmod/rmmod device check (1 volume, 1 snapshot, 1 clone)
325f0235 245test_4() {
c9c0d073
BB
246 POOL_NAME=tank
247 ZVOL_NAME=volume
248 SNAP_NAME=snap
249 CLONE_NAME=clone
250 FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
251 FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
252 FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
253 TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
254
c9c0d073
BB
255 # Create a pool, volume, snapshot, and clone
256 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
257 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
258 ${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3
259 zconfig_partition /dev/${FULL_ZVOL_NAME} 0 64 || fail 4
260 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5
261 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6
262
263 # Verify the devices were created
264 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
265 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 7
266
267 # Unload the modules
268 ${ZFS_SH} -u || fail 8
269
270 # Verify the devices were removed
271 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
272 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9
273
274 # Load the modules, wait 1 second for udev
275 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 10
276
277 # Verify the devices were created
278 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
279 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 11
280
281 # Destroy the pool and consequently the devices
282 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 12
283
284 # Verify the devices were removed
285 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
286 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 13
287
288 ${ZFS_SH} -u || fail 14
289 rm -f ${TMP_CACHE} || fail 15
290
291 pass
292}
325f0235 293run_test 4 "zpool insmod/rmmod device"
c9c0d073
BB
294
295# ZVOL volume sanity check
325f0235 296test_5() {
c9c0d073
BB
297 local POOL_NAME=tank
298 local ZVOL_NAME=fish
299 local FULL_NAME=${POOL_NAME}/${ZVOL_NAME}
300 local SRC_DIR=/bin/
301 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
302
c9c0d073
BB
303 # Create a pool and volume.
304 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
305 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
306 ${ZFS} create -V 400M ${FULL_NAME} || fail 3
307
308 # Partition the volume, for a 400M volume there will be
309 # 812 cylinders, 16 heads, and 63 sectors per track.
310 zconfig_partition /dev/${FULL_NAME} 0 812
311
312 # Format the partition with ext3.
313 /sbin/mkfs.ext3 -q /dev/${FULL_NAME}1 || fail 5
314
315 # Mount the ext3 filesystem and copy some data to it.
316 mkdir -p /tmp/${ZVOL_NAME}1 || fail 6
317 mount /dev/${FULL_NAME}1 /tmp/${ZVOL_NAME}1 || fail 7
318 cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}1 || fail 8
319 sync
320
321 # Verify the copied files match the original files.
322 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}1${SRC_DIR} &>/dev/null || fail 9
323
324 # Remove the files, umount, destroy the volume and pool.
325 rm -Rf /tmp/${ZVOL_NAME}1${SRC_DIR}* || fail 10
326 umount /tmp/${ZVOL_NAME}1 || fail 11
327 rmdir /tmp/${ZVOL_NAME}1 || fail 12
328
329 ${ZFS} destroy ${FULL_NAME} || fail 13
330 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 14
331 ${ZFS_SH} -u || fail 15
332 rm -f ${TMP_CACHE} || fail 16
333
334 pass
335}
325f0235 336run_test 5 "zvol+ext3 volume"
c9c0d073
BB
337
338# ZVOL snapshot sanity check
325f0235 339test_6() {
c9c0d073
BB
340 local POOL_NAME=tank
341 local ZVOL_NAME=fish
342 local SNAP_NAME=pristine
343 local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
344 local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
345 local SRC_DIR=/bin/
346 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
347
c9c0d073
BB
348 # Create a pool and volume.
349 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
350 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
351 ${ZFS} create -V 400M ${FULL_ZVOL_NAME} || fail 3
352
353 # Partition the volume, for a 400M volume there will be
354 # 812 cylinders, 16 heads, and 63 sectors per track.
355 zconfig_partition /dev/${FULL_ZVOL_NAME} 0 812
356
357 # Format the partition with ext2 (no journal).
358 /sbin/mkfs.ext2 -q /dev/${FULL_ZVOL_NAME}1 || fail 5
359
360 # Mount the ext3 filesystem and copy some data to it.
361 mkdir -p /tmp/${ZVOL_NAME}1 || fail 6
362 mount /dev/${FULL_ZVOL_NAME}1 /tmp/${ZVOL_NAME}1 || fail 7
363
364 # Snapshot the pristine ext2 filesystem and mount it read-only.
2c4834f8
BB
365 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 8
366 wait_udev /dev/${FULL_SNAP_NAME}1 30 || fail 8
c9c0d073
BB
367 mkdir -p /tmp/${SNAP_NAME}1 || fail 9
368 mount /dev/${FULL_SNAP_NAME}1 /tmp/${SNAP_NAME}1 &>/dev/null || fail 10
369
370 # Copy to original volume
371 cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}1 || fail 11
372 sync
373
374 # Verify the copied files match the original files,
375 # and the copied files do NOT appear in the snapshot.
376 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}1${SRC_DIR} &>/dev/null || fail 12
377 diff -ur ${SRC_DIR} /tmp/${SNAP_NAME}1${SRC_DIR} &>/dev/null && fail 13
378
379 # umount, destroy the snapshot, volume, and pool.
380 umount /tmp/${SNAP_NAME}1 || fail 14
381 rmdir /tmp/${SNAP_NAME}1 || fail 15
382 ${ZFS} destroy ${FULL_SNAP_NAME} || fail 16
383
384 umount /tmp/${ZVOL_NAME}1 || fail 17
385 rmdir /tmp/${ZVOL_NAME}1 || fail 18
386 ${ZFS} destroy ${FULL_ZVOL_NAME} || fail 19
387
388 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 20
389 ${ZFS_SH} -u || fail 21
390 rm -f ${TMP_CACHE} || fail 22
391
392 pass
393}
325f0235 394run_test 6 "zvol+ext2 snapshot"
c9c0d073
BB
395
396# ZVOL clone sanity check
325f0235 397test_7() {
c9c0d073
BB
398 local POOL_NAME=tank
399 local ZVOL_NAME=fish
400 local SNAP_NAME=pristine
401 local CLONE_NAME=clone
402 local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
403 local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
404 local FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
405 local SRC_DIR=/bin/
406 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
407
c9c0d073
BB
408 # Create a pool and volume.
409 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
410 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
ac063c48 411 ${ZFS} create -V 300M ${FULL_ZVOL_NAME} || fail 3
c9c0d073 412
ac063c48
BB
413 # Partition the volume, for a 300M volume there will be
414 # 609 cylinders, 16 heads, and 63 sectors per track.
415 zconfig_partition /dev/${FULL_ZVOL_NAME} 0 609
c9c0d073
BB
416
417 # Format the partition with ext2 (no journal).
418 /sbin/mkfs.ext2 -q /dev/${FULL_ZVOL_NAME}1 || fail 5
419
420 # Mount the ext3 filesystem and copy some data to it.
421 mkdir -p /tmp/${ZVOL_NAME}1 || fail 6
422 mount /dev/${FULL_ZVOL_NAME}1 /tmp/${ZVOL_NAME}1 || fail 7
423
424 # Snapshot the pristine ext2 filesystem and mount it read-only.
2c4834f8
BB
425 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 8
426 wait_udev /dev/${FULL_SNAP_NAME}1 30 || fail 8
c9c0d073
BB
427 mkdir -p /tmp/${SNAP_NAME}1 || fail 9
428 mount /dev/${FULL_SNAP_NAME}1 /tmp/${SNAP_NAME}1 &>/dev/null || fail 10
429
430 # Copy to original volume.
431 cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}1 || fail 11
432 sync
433
434 # Verify the copied files match the original files,
435 # and the copied files do NOT appear in the snapshot.
436 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}1${SRC_DIR} &>/dev/null || fail 12
437 diff -ur ${SRC_DIR} /tmp/${SNAP_NAME}1${SRC_DIR} &>/dev/null && fail 13
438
439 # Clone from the original pristine snapshot
2c4834f8
BB
440 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 14
441 wait_udev /dev/${FULL_CLONE_NAME}1 30 || fail 14
c9c0d073
BB
442 mkdir -p /tmp/${CLONE_NAME}1 || fail 15
443 mount /dev/${FULL_CLONE_NAME}1 /tmp/${CLONE_NAME}1 || fail 16
444
445 # Verify the clone matches the pristine snapshot,
446 # and the files copied to the original volume are NOT there.
447 diff -ur /tmp/${SNAP_NAME}1 /tmp/${CLONE_NAME}1 &>/dev/null || fail 17
448 diff -ur /tmp/${ZVOL_NAME}1 /tmp/${CLONE_NAME}1 &>/dev/null && fail 18
449
450 # Copy to cloned volume.
451 cp -RL ${SRC_DIR} /tmp/${CLONE_NAME}1 || fail 19
452 sync
453
454 # Verify the clone matches the modified original volume.
455 diff -ur /tmp/${ZVOL_NAME}1 /tmp/${CLONE_NAME}1 &>/dev/null || fail 20
456
457 # umount, destroy the snapshot, volume, and pool.
458 umount /tmp/${CLONE_NAME}1 || fail 21
459 rmdir /tmp/${CLONE_NAME}1 || fail 22
460 ${ZFS} destroy ${FULL_CLONE_NAME} || fail 23
461
462 umount /tmp/${SNAP_NAME}1 || fail 24
463 rmdir /tmp/${SNAP_NAME}1 || fail 25
464 ${ZFS} destroy ${FULL_SNAP_NAME} || fail 26
465
466 umount /tmp/${ZVOL_NAME}1 || fail 27
467 rmdir /tmp/${ZVOL_NAME}1 || fail 28
468 ${ZFS} destroy ${FULL_ZVOL_NAME} || fail 29
469
470 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 30
471 ${ZFS_SH} -u || fail 31
472 rm -f ${TMP_CACHE} || fail 32
473
474 pass
475}
325f0235 476run_test 7 "zvol+ext2 clone"
c9c0d073
BB
477
478# Send/Receive sanity check
479test_8() {
480 local POOL_NAME1=tank1
481 local POOL_NAME2=tank2
482 local ZVOL_NAME=fish
483 local SNAP_NAME=snap
484 local FULL_ZVOL_NAME1=${POOL_NAME1}/${ZVOL_NAME}
485 local FULL_ZVOL_NAME2=${POOL_NAME2}/${ZVOL_NAME}
486 local FULL_SNAP_NAME1=${POOL_NAME1}/${ZVOL_NAME}@${SNAP_NAME}
487 local FULL_SNAP_NAME2=${POOL_NAME2}/${ZVOL_NAME}@${SNAP_NAME}
488 local SRC_DIR=/bin/
489 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
490
491 # Create two pools and a volume
492 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
493 ${ZPOOL_CREATE_SH} -p ${POOL_NAME1} -c lo-raidz2 || fail 2
494 ${ZPOOL_CREATE_SH} -p ${POOL_NAME2} -c lo-raidz2 || fail 3
ac063c48 495 ${ZFS} create -V 300M ${FULL_ZVOL_NAME1} || fail 4
c9c0d073 496
ac063c48
BB
497 # Partition the volume, for a 300M volume there will be
498 # 609 cylinders, 16 heads, and 63 sectors per track.
499 zconfig_partition /dev/${FULL_ZVOL_NAME1} 0 609
c9c0d073
BB
500
501 # Format the partition with ext2.
502 /sbin/mkfs.ext2 -q /dev/${FULL_ZVOL_NAME1}1 || fail 5
503
504 # Mount the ext3 filesystem and copy some data to it.
505 mkdir -p /tmp/${FULL_ZVOL_NAME1}1 || fail 6
506 mount /dev/${FULL_ZVOL_NAME1}1 /tmp/${FULL_ZVOL_NAME1}1 || fail 7
507 cp -RL ${SRC_DIR} /tmp/${FULL_ZVOL_NAME1}1 || fail 8
508 sync || fail 9
509
510 # Snapshot the ext3 filesystem so it may be sent.
2c4834f8
BB
511 ${ZFS} snapshot ${FULL_SNAP_NAME1} || fail 11
512 wait_udev /dev/${FULL_SNAP_NAME1} 30 || fail 11
c9c0d073
BB
513
514 # Send/receive the snapshot from POOL_NAME1 to POOL_NAME2
515 (${ZFS} send ${FULL_SNAP_NAME1} | \
2c4834f8
BB
516 ${ZFS} receive ${FULL_ZVOL_NAME2}) || fail 12
517 wait_udev /dev/${FULL_ZVOL_NAME2}1 30 || fail 12
c9c0d073
BB
518
519 # Mount the sent ext3 filesystem.
520 mkdir -p /tmp/${FULL_ZVOL_NAME2}1 || fail 13
521 mount /dev/${FULL_ZVOL_NAME2}1 /tmp/${FULL_ZVOL_NAME2}1 || fail 14
522
523 # Verify the contents of the volumes match
524 diff -ur /tmp/${FULL_ZVOL_NAME1}1 /tmp/${FULL_ZVOL_NAME2}1 \
525 &>/dev/null || fail 15
526
527 # Umount, destroy the volume and pool.
528 umount /tmp/${FULL_ZVOL_NAME1}1 || fail 16
529 umount /tmp/${FULL_ZVOL_NAME2}1 || fail 17
530 rmdir /tmp/${FULL_ZVOL_NAME1}1 || fail 18
531 rmdir /tmp/${FULL_ZVOL_NAME2}1 || fail 19
532 rmdir /tmp/${POOL_NAME1} || fail 20
533 rmdir /tmp/${POOL_NAME2} || fail 21
534
535 ${ZFS} destroy ${FULL_SNAP_NAME1} || fail 22
536 ${ZFS} destroy ${FULL_SNAP_NAME2} || fail 23
537 ${ZFS} destroy ${FULL_ZVOL_NAME1} || fail 24
538 ${ZFS} destroy ${FULL_ZVOL_NAME2} || fail 25
539 ${ZPOOL_CREATE_SH} -p ${POOL_NAME1} -c lo-raidz2 -d || fail 26
540 ${ZPOOL_CREATE_SH} -p ${POOL_NAME2} -c lo-raidz2 -d || fail 27
541 ${ZFS_SH} -u || fail 28
542 rm -f ${TMP_CACHE} || fail 29
543
544 pass
545}
546run_test 8 "zfs send/receive"
547
548# zpool event sanity check
549test_9() {
550 local POOL_NAME=tank
551 local ZVOL_NAME=fish
552 local FULL_NAME=${POOL_NAME}/${ZVOL_NAME}
553 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
554 local TMP_EVENTS=`mktemp -p /tmp zpool.events.XXXXXXXX`
555
556 # Create a pool and volume.
557 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
558 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
ac063c48 559 ${ZFS} create -V 300M ${FULL_NAME} || fail 3
c9c0d073
BB
560
561 # Dump the events, there should be at least 5 lines.
562 ${ZPOOL} events >${TMP_EVENTS} || fail 4
563 EVENTS=`wc -l ${TMP_EVENTS} | cut -f1 -d' '`
564 [ $EVENTS -lt 5 ] && fail 5
565
566 # Clear the events and ensure there are none.
567 ${ZPOOL} events -c >/dev/null || fail 6
568 ${ZPOOL} events >${TMP_EVENTS} || fail 7
569 EVENTS=`wc -l ${TMP_EVENTS} | cut -f1 -d' '`
570 [ $EVENTS -gt 1 ] && fail 8
571
572 ${ZFS} destroy ${FULL_NAME} || fail 9
573 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 10
574 ${ZFS_SH} -u || fail 11
575 rm -f ${TMP_CACHE} || fail 12
576 rm -f ${TMP_EVENTS} || fail 13
577
578 pass
579}
580run_test 9 "zpool events"
581
d4055aac
NB
582zconfig_add_vdev() {
583 local POOL_NAME=$1
584 local TYPE=$2
585 local DEVICE=$3
586 local TMP_FILE1=`mktemp`
587 local TMP_FILE2=`mktemp`
588 local TMP_FILE3=`mktemp`
589
590 BASE_DEVICE=`basename ${DEVICE}`
591
592 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1}
593 ${ZPOOL} add -f ${POOL_NAME} ${TYPE} ${DEVICE} 2>/dev/null || return 1
594 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2}
595 diff ${TMP_FILE1} ${TMP_FILE2} > ${TMP_FILE3}
596
597 [ `wc -l ${TMP_FILE3}|${AWK} '{print $1}'` -eq 3 ] || return 1
598
599 PARENT_VDEV=`tail -2 ${TMP_FILE3} | head -1 | ${AWK} '{print $NF}'`
600 case $TYPE in
601 cache)
602 [ "${PARENT_VDEV}" = "${TYPE}" ] || return 1
603 ;;
604 log)
605 [ "${PARENT_VDEV}" = "logs" ] || return 1
606 ;;
607 esac
608
609 if ! tail -1 ${TMP_FILE3} |
610 egrep -q "^>[[:space:]]+${BASE_DEVICE}[[:space:]]+ONLINE" ; then
611 return 1
612 fi
613 rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_FILE3}
614
615 return 0
616}
617
618# zpool add and remove sanity check
619test_10() {
620 local POOL_NAME=tank
621 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
622 local TMP_FILE1=`mktemp`
623 local TMP_FILE2=`mktemp`
624
625 if [ ${SCSI_DEBUG} -eq 0 ] || [ ${HAVE_LSSCSI} -eq 0 ] ; then
626 skip
627 return
628 fi
629
630 test `${LSMOD} | grep -c scsi_debug` -gt 0 && \
631 (${RMMOD} scsi_debug || exit 1)
632
633 /sbin/modprobe scsi_debug dev_size_mb=128 ||
634 die "Error $? creating scsi_debug device"
635 udev_trigger
636
637 SDDEVICE=`${LSSCSI}|${AWK} '/scsi_debug/ { print $6; exit }'`
638 BASE_SDDEVICE=`basename $SDDEVICE`
639
640 # Create a pool
641 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
642 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
643 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3
644
645 # Add and remove a cache vdev by full path
646 zconfig_add_vdev ${POOL_NAME} cache ${SDDEVICE} || fail 4
647 ${ZPOOL} remove ${POOL_NAME} ${SDDEVICE} || fail 5
648 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 6
649 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 7
650
651 # Add and remove a cache vdev by shorthand path
652 zconfig_add_vdev ${POOL_NAME} cache ${BASE_SDDEVICE} || fail 8
653 ${ZPOOL} remove ${POOL_NAME} ${BASE_SDDEVICE} || fail 9
654 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 10
655 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 11
656
657 # Add and remove a log vdev
658 zconfig_add_vdev ${POOL_NAME} log ${BASE_SDDEVICE} || fail 12
659 ${ZPOOL} remove ${POOL_NAME} ${BASE_SDDEVICE} || fail 13
660 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 14
661 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 15
662
663 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 16
664 ${ZFS_SH} -u || fail 17
665 ${RMMOD} scsi_debug || fail 18
666
667 rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_CACHE} || fail 19
668
669 pass
670}
671run_test 10 "zpool add/remove vdev"
672
c9c0d073
BB
673exit 0
674