]> git.proxmox.com Git - mirror_zfs-debian.git/blob - scripts/zconfig.sh
Imported Upstream version 0.6.4.2
[mirror_zfs-debian.git] / scripts / zconfig.sh
1 #!/bin/bash
2 #
3 # ZFS/ZPOOL configuration test script.
4
5 basedir="$(dirname $0)"
6
7 SCRIPT_COMMON=common.sh
8 if [ -f "${basedir}/${SCRIPT_COMMON}" ]; then
9 . "${basedir}/${SCRIPT_COMMON}"
10 else
11 echo "Missing helper script ${SCRIPT_COMMON}" && exit 1
12 fi
13
14 PROG=zconfig.sh
15
16 usage() {
17 cat << EOF
18 USAGE:
19 $0 [hvcts]
20
21 DESCRIPTION:
22 ZFS/ZPOOL configuration tests
23
24 OPTIONS:
25 -h Show this message
26 -v Verbose
27 -c Cleanup lo+file devices at start
28 -t <#> Run listed tests
29 -s <#> Skip listed tests
30
31 EOF
32 }
33
34 while getopts 'hvct:s:?' OPTION; do
35 case $OPTION in
36 h)
37 usage
38 exit 1
39 ;;
40 v)
41 VERBOSE=1
42 ;;
43 c)
44 CLEANUP=1
45 ;;
46 t)
47 TESTS_RUN=($OPTARG)
48 ;;
49 s)
50 TESTS_SKIP=($OPTARG)
51 ;;
52 ?)
53 usage
54 exit
55 ;;
56 esac
57 done
58
59 if [ $(id -u) != 0 ]; then
60 die "Must run as root"
61 fi
62
63 # Initialize the test suite
64 init
65
66 # Perform pre-cleanup is requested
67 if [ ${CLEANUP} ]; then
68 ${ZFS_SH} -u
69 cleanup_md_devices
70 cleanup_loop_devices
71 rm -f /tmp/zpool.cache.*
72 fi
73
74 # Check if we need to skip the tests that require scsi_debug and lsscsi.
75 SCSI_DEBUG=0
76 ${INFOMOD} scsi_debug &>/dev/null && SCSI_DEBUG=1
77 HAVE_LSSCSI=0
78 test -f ${LSSCSI} && HAVE_LSSCSI=1
79 if [ ${SCSI_DEBUG} -eq 0 ] || [ ${HAVE_LSSCSI} -eq 0 ]; then
80 echo "Skipping test 10 which requires the scsi_debug " \
81 "module and the ${LSSCSI} utility"
82 fi
83
84 # Validate persistent zpool.cache configuration.
85 test_1() {
86 local POOL_NAME=test1
87 local TMP_FILE1=`mktemp`
88 local TMP_FILE2=`mktemp`
89 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
90
91 # Create a pool save its status for comparison.
92 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
93 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
94 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3
95
96 # Unload/load the module stack and verify the pool persists.
97 ${ZFS_SH} -u || fail 4
98 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 5
99 ${ZPOOL} import -c ${TMP_CACHE} ${POOL_NAME} || fail 5
100 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 6
101 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 7
102
103 # Cleanup the test pool and temporary files
104 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 8
105 rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_CACHE} || fail 9
106 ${ZFS_SH} -u || fail 10
107
108 pass
109 }
110 run_test 1 "persistent zpool.cache"
111
112 # Validate ZFS disk scanning and import w/out zpool.cache configuration.
113 test_2() {
114 local POOL_NAME=test2
115 local TMP_FILE1=`mktemp`
116 local TMP_FILE2=`mktemp`
117 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
118
119 # Create a pool save its status for comparison.
120 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
121 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
122 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3
123
124 # Unload the module stack, remove the cache file, load the module
125 # stack and attempt to probe the disks to import the pool. As
126 # a cross check verify the old pool state against the imported.
127 ${ZFS_SH} -u || fail 4
128 rm -f ${TMP_CACHE} || fail 5
129 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 6
130 ${ZPOOL} import -d /dev ${POOL_NAME} || fail 8
131 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 9
132 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 10
133
134 # Cleanup the test pool and temporary files
135 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 11
136 rm -f ${TMP_FILE1} ${TMP_FILE2} || fail 12
137 ${ZFS_SH} -u || fail 13
138
139 pass
140 }
141 run_test 2 "scan disks for pools to import"
142
143 zconfig_zvol_device_stat() {
144 local EXPECT=$1
145 local POOL_NAME=/dev/zvol/$2
146 local ZVOL_NAME=/dev/zvol/$3
147 local SNAP_NAME=/dev/zvol/$4
148 local CLONE_NAME=/dev/zvol/$5
149 local COUNT=0
150
151 # Briefly delay for udev
152 udev_trigger
153
154 # Pool exists
155 stat ${POOL_NAME} &>/dev/null && let COUNT=$COUNT+1
156
157 # Volume and partitions
158 stat ${ZVOL_NAME} &>/dev/null && let COUNT=$COUNT+1
159 stat ${ZVOL_NAME}-part1 &>/dev/null && let COUNT=$COUNT+1
160 stat ${ZVOL_NAME}-part2 &>/dev/null && let COUNT=$COUNT+1
161
162 # Snapshot with partitions
163 stat ${SNAP_NAME} &>/dev/null && let COUNT=$COUNT+1
164 stat ${SNAP_NAME}-part1 &>/dev/null && let COUNT=$COUNT+1
165 stat ${SNAP_NAME}-part2 &>/dev/null && let COUNT=$COUNT+1
166
167 # Clone with partitions
168 stat ${CLONE_NAME} &>/dev/null && let COUNT=$COUNT+1
169 stat ${CLONE_NAME}-part1 &>/dev/null && let COUNT=$COUNT+1
170 stat ${CLONE_NAME}-part2 &>/dev/null && let COUNT=$COUNT+1
171
172 if [ $EXPECT -ne $COUNT ]; then
173 return 1
174 fi
175
176 return 0
177 }
178
179 # zpool import/export device check
180 # (1 volume, 2 partitions, 1 snapshot, 1 clone)
181 test_3() {
182 local POOL_NAME=tank
183 local ZVOL_NAME=volume
184 local SNAP_NAME=snap
185 local CLONE_NAME=clone
186 local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
187 local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
188 local FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
189 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
190
191 # Create a pool, volume, partition, snapshot, and clone.
192 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
193 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
194 ${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3
195 ${ZFS} set snapdev=visible ${FULL_ZVOL_NAME} || fail 3
196 label /dev/zvol/${FULL_ZVOL_NAME} msdos || fail 4
197 partition /dev/zvol/${FULL_ZVOL_NAME} primary 1% 50% || fail 4
198 partition /dev/zvol/${FULL_ZVOL_NAME} primary 51% -1 || fail 4
199 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5
200 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6
201
202 # Verify the devices were created
203 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
204 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 7
205
206 # Export the pool
207 ${ZPOOL} export ${POOL_NAME} || fail 8
208
209 # verify the devices were removed
210 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
211 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9
212
213 # Import the pool, wait 1 second for udev
214 ${ZPOOL} import ${POOL_NAME} || fail 10
215
216 # Verify the devices were created
217 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
218 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 11
219
220 # Destroy the pool and consequently the devices
221 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 12
222
223 # verify the devices were removed
224 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
225 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 13
226
227 ${ZFS_SH} -u || fail 14
228 rm -f ${TMP_CACHE} || fail 15
229
230 pass
231 }
232 run_test 3 "zpool import/export device"
233
234 # zpool insmod/rmmod device check (1 volume, 1 snapshot, 1 clone)
235 test_4() {
236 POOL_NAME=tank
237 ZVOL_NAME=volume
238 SNAP_NAME=snap
239 CLONE_NAME=clone
240 FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
241 FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
242 FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
243 TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
244
245 # Create a pool, volume, snapshot, and clone
246 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
247 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
248 ${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3
249 ${ZFS} set snapdev=visible ${FULL_ZVOL_NAME} || fail 3
250 label /dev/zvol/${FULL_ZVOL_NAME} msdos || fail 4
251 partition /dev/zvol/${FULL_ZVOL_NAME} primary 1% 50% || fail 4
252 partition /dev/zvol/${FULL_ZVOL_NAME} primary 51% -1 || fail 4
253 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5
254 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6
255
256 # Verify the devices were created
257 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
258 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 7
259
260 # Unload the modules
261 ${ZFS_SH} -u || fail 8
262
263 # Verify the devices were removed
264 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
265 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9
266
267 # Load the modules, list the pools to ensure they are opened
268 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 10
269 ${ZPOOL} import -c ${TMP_CACHE} ${POOL_NAME} || fail 10
270 ${ZPOOL} list &>/dev/null
271
272 # Verify the devices were created
273 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
274 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 11
275
276 # Destroy the pool and consequently the devices
277 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 12
278
279 # Verify the devices were removed
280 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
281 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 13
282
283 ${ZFS_SH} -u || fail 14
284 rm -f ${TMP_CACHE} || fail 15
285
286 pass
287 }
288 run_test 4 "zpool insmod/rmmod device"
289
290 # ZVOL volume sanity check
291 test_5() {
292 local POOL_NAME=tank
293 local ZVOL_NAME=fish
294 local FULL_NAME=${POOL_NAME}/${ZVOL_NAME}
295 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
296
297 # Create a pool and volume.
298 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
299 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raid0 || fail 2
300 ${ZFS} create -V 800M ${FULL_NAME} || fail 3
301 label /dev/zvol/${FULL_NAME} msdos || fail 4
302 partition /dev/zvol/${FULL_NAME} primary 1 -1 || fail 4
303 format /dev/zvol/${FULL_NAME}-part1 ext2 || fail 5
304
305 # Mount the ext2 filesystem and copy some data to it.
306 mkdir -p /tmp/${ZVOL_NAME}-part1 || fail 6
307 mount /dev/zvol/${FULL_NAME}-part1 /tmp/${ZVOL_NAME}-part1 || fail 7
308 cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}-part1 || fail 8
309 sync
310
311 # Verify the copied files match the original files.
312 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}-part1/${SRC_DIR##*/} \
313 &>/dev/null || fail 9
314
315 # Remove the files, umount, destroy the volume and pool.
316 rm -Rf /tmp/${ZVOL_NAME}-part1/${SRC_DIR##*/} || fail 10
317 umount /tmp/${ZVOL_NAME}-part1 || fail 11
318 rmdir /tmp/${ZVOL_NAME}-part1 || fail 12
319
320 ${ZFS} destroy ${FULL_NAME} || fail 13
321 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 14
322 ${ZFS_SH} -u || fail 15
323 rm -f ${TMP_CACHE} || fail 16
324
325 pass
326 }
327 run_test 5 "zvol+ext2 volume"
328
329 # ZVOL snapshot sanity check
330 test_6() {
331 local POOL_NAME=tank
332 local ZVOL_NAME=fish
333 local SNAP_NAME=pristine
334 local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
335 local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
336 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
337
338 # Create a pool and volume.
339 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
340 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raid0 || fail 2
341 ${ZFS} create -V 800M ${FULL_ZVOL_NAME} || fail 3
342 ${ZFS} set snapdev=visible ${FULL_ZVOL_NAME} || fail 3
343 label /dev/zvol/${FULL_ZVOL_NAME} msdos || fail 4
344 partition /dev/zvol/${FULL_ZVOL_NAME} primary 1 -1 || fail 4
345 format /dev/zvol/${FULL_ZVOL_NAME}-part1 ext2 || fail 5
346
347 # Mount the ext2 filesystem and copy some data to it.
348 mkdir -p /tmp/${ZVOL_NAME}-part1 || fail 6
349 mount /dev/zvol/${FULL_ZVOL_NAME}-part1 /tmp/${ZVOL_NAME}-part1 \
350 || fail 7
351
352 # Snapshot the pristine ext2 filesystem and mount it read-only.
353 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 8
354 wait_udev /dev/zvol/${FULL_SNAP_NAME}-part1 30 || fail 8
355 mkdir -p /tmp/${SNAP_NAME}-part1 || fail 9
356 mount /dev/zvol/${FULL_SNAP_NAME}-part1 /tmp/${SNAP_NAME}-part1 \
357 &>/dev/null || fail 10
358
359 # Copy to original volume
360 cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}-part1 || fail 11
361 sync
362
363 # Verify the copied files match the original files,
364 # and the copied files do NOT appear in the snapshot.
365 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}-part1/${SRC_DIR##*/} \
366 &>/dev/null || fail 12
367 diff -ur ${SRC_DIR} /tmp/${SNAP_NAME}-part1/${SRC_DIR##*/} \
368 &>/dev/null && fail 13
369
370 # umount, destroy the snapshot, volume, and pool.
371 umount /tmp/${SNAP_NAME}-part1 || fail 14
372 rmdir /tmp/${SNAP_NAME}-part1 || fail 15
373 ${ZFS} destroy ${FULL_SNAP_NAME} || fail 16
374
375 umount /tmp/${ZVOL_NAME}-part1 || fail 17
376 rmdir /tmp/${ZVOL_NAME}-part1 || fail 18
377 ${ZFS} destroy ${FULL_ZVOL_NAME} || fail 19
378
379 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 20
380 ${ZFS_SH} -u || fail 21
381 rm -f ${TMP_CACHE} || fail 22
382
383 pass
384 }
385 run_test 6 "zvol+ext2 snapshot"
386
387 # ZVOL clone sanity check
388 test_7() {
389 local POOL_NAME=tank
390 local ZVOL_NAME=fish
391 local SNAP_NAME=pristine
392 local CLONE_NAME=clone
393 local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
394 local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
395 local FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
396 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
397
398 # Create a pool and volume.
399 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
400 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
401 ${ZFS} create -V 300M ${FULL_ZVOL_NAME} || fail 3
402 ${ZFS} set snapdev=visible ${FULL_ZVOL_NAME} || fail 3
403 label /dev/zvol/${FULL_ZVOL_NAME} msdos || fail 4
404 partition /dev/zvol/${FULL_ZVOL_NAME} primary 1 -1 || fail 4
405 format /dev/zvol/${FULL_ZVOL_NAME}-part1 ext2 || fail 5
406
407 # Snapshot the pristine ext2 filesystem.
408 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 6
409 wait_udev /dev/zvol/${FULL_SNAP_NAME}-part1 30 || fail 7
410
411 # Mount the ext2 filesystem so some data can be copied to it.
412 mkdir -p /tmp/${ZVOL_NAME}-part1 || fail 7
413 mount /dev/zvol/${FULL_ZVOL_NAME}-part1 \
414 /tmp/${ZVOL_NAME}-part1 || fail 8
415
416 # Mount the pristine ext2 snapshot.
417 mkdir -p /tmp/${SNAP_NAME}-part1 || fail 9
418 mount /dev/zvol/${FULL_SNAP_NAME}-part1 \
419 /tmp/${SNAP_NAME}-part1 &>/dev/null || fail 10
420
421 # Copy to original volume.
422 cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}-part1 || fail 11
423 sync
424
425 # Verify the copied files match the original files,
426 # and the copied files do NOT appear in the snapshot.
427 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}-part1/${SRC_DIR##*/} \
428 &>/dev/null || fail 12
429 diff -ur ${SRC_DIR} /tmp/${SNAP_NAME}-part1/${SRC_DIR##*/} \
430 &>/dev/null && fail 13
431
432 # Clone from the original pristine snapshot
433 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 14
434 wait_udev /dev/zvol/${FULL_CLONE_NAME}-part1 30 || fail 14
435 mkdir -p /tmp/${CLONE_NAME}-part1 || fail 15
436 mount /dev/zvol/${FULL_CLONE_NAME}-part1 \
437 /tmp/${CLONE_NAME}-part1 || fail 16
438
439 # Verify the clone matches the pristine snapshot,
440 # and the files copied to the original volume are NOT there.
441 diff -ur /tmp/${SNAP_NAME}-part1 /tmp/${CLONE_NAME}-part1 \
442 &>/dev/null || fail 17
443 diff -ur /tmp/${ZVOL_NAME}-part1 /tmp/${CLONE_NAME}-part1 \
444 &>/dev/null && fail 18
445
446 # Copy to cloned volume.
447 cp -RL ${SRC_DIR} /tmp/${CLONE_NAME}-part1 || fail 19
448 sync
449
450 # Verify the clone matches the modified original volume.
451 diff -ur /tmp/${ZVOL_NAME}-part1 /tmp/${CLONE_NAME}-part1 \
452 &>/dev/null || fail 20
453
454 # umount, destroy the snapshot, volume, and pool.
455 umount /tmp/${CLONE_NAME}-part1 || fail 21
456 rmdir /tmp/${CLONE_NAME}-part1 || fail 22
457 ${ZFS} destroy ${FULL_CLONE_NAME} || fail 23
458
459 umount /tmp/${SNAP_NAME}-part1 || fail 24
460 rmdir /tmp/${SNAP_NAME}-part1 || fail 25
461 ${ZFS} destroy ${FULL_SNAP_NAME} || fail 26
462
463 umount /tmp/${ZVOL_NAME}-part1 || fail 27
464 rmdir /tmp/${ZVOL_NAME}-part1 || fail 28
465 ${ZFS} destroy ${FULL_ZVOL_NAME} || fail 29
466
467 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 30
468 ${ZFS_SH} -u || fail 31
469 rm -f ${TMP_CACHE} || fail 32
470
471 pass
472 }
473 run_test 7 "zvol+ext2 clone"
474
475 # Send/Receive sanity check
476 test_8() {
477 local POOL_NAME1=tank1
478 local POOL_NAME2=tank2
479 local ZVOL_NAME=fish
480 local SNAP_NAME=snap
481 local FULL_ZVOL_NAME1=${POOL_NAME1}/${ZVOL_NAME}
482 local FULL_ZVOL_NAME2=${POOL_NAME2}/${ZVOL_NAME}
483 local FULL_SNAP_NAME1=${POOL_NAME1}/${ZVOL_NAME}@${SNAP_NAME}
484 local FULL_SNAP_NAME2=${POOL_NAME2}/${ZVOL_NAME}@${SNAP_NAME}
485 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
486
487 # Create two pools and a volume
488 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
489 ${ZPOOL_CREATE_SH} -p ${POOL_NAME1} -c lo-raidz2 || fail 2
490 ${ZPOOL_CREATE_SH} -p ${POOL_NAME2} -c lo-raidz2 || fail 2
491 ${ZFS} create -V 300M ${FULL_ZVOL_NAME1} || fail 3
492 ${ZFS} set snapdev=visible ${FULL_ZVOL_NAME1} || fail 3
493 label /dev/zvol/${FULL_ZVOL_NAME1} msdos || fail 4
494 partition /dev/zvol/${FULL_ZVOL_NAME1} primary 1 -1 || fail 4
495 format /dev/zvol/${FULL_ZVOL_NAME1}-part1 ext2 || fail 5
496
497 # Mount the ext2 filesystem and copy some data to it.
498 mkdir -p /tmp/${FULL_ZVOL_NAME1}-part1 || fail 6
499 mount /dev/zvol/${FULL_ZVOL_NAME1}-part1 \
500 /tmp/${FULL_ZVOL_NAME1}-part1 || fail 7
501 cp -RL ${SRC_DIR} /tmp/${FULL_ZVOL_NAME1}-part1 || fail 8
502
503 # Unmount, snapshot, mount the ext2 filesystem so it may be sent.
504 # We only unmount to ensure the ext2 filesystem is clean.
505 umount /tmp/${FULL_ZVOL_NAME1}-part1 || fail 9
506 ${ZFS} snapshot ${FULL_SNAP_NAME1} || fail 10
507 wait_udev /dev/zvol/${FULL_SNAP_NAME1} 30 || fail 10
508 mount /dev/zvol/${FULL_ZVOL_NAME1}-part1 \
509 /tmp/${FULL_ZVOL_NAME1}-part1 || 11
510
511 # Send/receive the snapshot from POOL_NAME1 to POOL_NAME2
512 (${ZFS} send ${FULL_SNAP_NAME1} | \
513 ${ZFS} receive ${FULL_ZVOL_NAME2}) || fail 12
514 wait_udev /dev/zvol/${FULL_ZVOL_NAME2}-part1 30 || fail 12
515
516 # Mount the sent ext2 filesystem.
517 mkdir -p /tmp/${FULL_ZVOL_NAME2}-part1 || fail 13
518 mount /dev/zvol/${FULL_ZVOL_NAME2}-part1 \
519 /tmp/${FULL_ZVOL_NAME2}-part1 || fail 14
520
521 # Verify the contents of the volumes match
522 diff -ur /tmp/${FULL_ZVOL_NAME1}-part1 /tmp/${FULL_ZVOL_NAME2}-part1 \
523 &>/dev/null || fail 15
524
525 # Umount, destroy the volume and pool.
526 umount /tmp/${FULL_ZVOL_NAME1}-part1 || fail 16
527 umount /tmp/${FULL_ZVOL_NAME2}-part1 || fail 17
528 rmdir /tmp/${FULL_ZVOL_NAME1}-part1 || fail 18
529 rmdir /tmp/${FULL_ZVOL_NAME2}-part1 || fail 19
530 rmdir /tmp/${POOL_NAME1} || fail 20
531 rmdir /tmp/${POOL_NAME2} || fail 21
532
533 ${ZFS} destroy ${FULL_SNAP_NAME1} || fail 22
534 ${ZFS} destroy ${FULL_SNAP_NAME2} || fail 23
535 ${ZFS} destroy ${FULL_ZVOL_NAME1} || fail 24
536 ${ZFS} destroy ${FULL_ZVOL_NAME2} || fail 25
537 ${ZPOOL_CREATE_SH} -p ${POOL_NAME1} -c lo-raidz2 -d || fail 26
538 ${ZPOOL_CREATE_SH} -p ${POOL_NAME2} -c lo-raidz2 -d || fail 27
539 ${ZFS_SH} -u || fail 28
540 rm -f ${TMP_CACHE} || fail 29
541
542 pass
543 }
544 run_test 8 "zfs send/receive"
545
546 # zpool event sanity check
547 test_9() {
548 local POOL_NAME=tank
549 local ZVOL_NAME=fish
550 local FULL_NAME=${POOL_NAME}/${ZVOL_NAME}
551 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
552 local TMP_EVENTS=`mktemp -p /tmp zpool.events.XXXXXXXX`
553
554 # Create a pool and volume.
555 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
556 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
557 ${ZFS} create -V 300M ${FULL_NAME} || fail 3
558 udev_trigger
559
560 # Dump the events, there should be at least 5 lines.
561 ${ZPOOL} events >${TMP_EVENTS} || fail 4
562 EVENTS=`wc -l ${TMP_EVENTS} | cut -f1 -d' '`
563 [ $EVENTS -lt 5 ] && fail 5
564
565 # Clear the events and ensure there are none.
566 ${ZPOOL} events -c >/dev/null || fail 6
567 ${ZPOOL} events >${TMP_EVENTS} || fail 7
568 EVENTS=`wc -l ${TMP_EVENTS} | cut -f1 -d' '`
569 [ $EVENTS -gt 1 ] && fail 8
570
571 ${ZFS} destroy ${FULL_NAME} || fail 9
572 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 10
573 ${ZFS_SH} -u || fail 11
574 rm -f ${TMP_CACHE} || fail 12
575 rm -f ${TMP_EVENTS} || fail 13
576
577 pass
578 }
579 run_test 9 "zpool events"
580
581 zconfig_add_vdev() {
582 local POOL_NAME=$1
583 local TYPE=$2
584 local DEVICE=$3
585 local TMP_FILE1=`mktemp`
586 local TMP_FILE2=`mktemp`
587 local TMP_FILE3=`mktemp`
588
589 BASE_DEVICE=`basename ${DEVICE}`
590
591 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1}
592 ${ZPOOL} add -f ${POOL_NAME} ${TYPE} ${DEVICE} 2>/dev/null || return 1
593 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2}
594 diff ${TMP_FILE1} ${TMP_FILE2} > ${TMP_FILE3}
595
596 [ `wc -l ${TMP_FILE3}|${AWK} '{print $1}'` -eq 3 ] || return 1
597
598 PARENT_VDEV=`tail -2 ${TMP_FILE3} | head -1 | ${AWK} '{print $NF}'`
599 case $TYPE in
600 cache)
601 [ "${PARENT_VDEV}" = "${TYPE}" ] || return 1
602 ;;
603 log)
604 [ "${PARENT_VDEV}" = "logs" ] || return 1
605 ;;
606 esac
607
608 if ! tail -1 ${TMP_FILE3} |
609 egrep -q "^>[[:space:]]+${BASE_DEVICE}[[:space:]]+ONLINE" ; then
610 return 1
611 fi
612 rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_FILE3}
613
614 return 0
615 }
616
617 # zpool add and remove sanity check
618 test_10() {
619 local POOL_NAME=tank
620 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
621 local TMP_FILE1=`mktemp`
622 local TMP_FILE2=`mktemp`
623
624 if [ ${SCSI_DEBUG} -eq 0 ] || [ ${HAVE_LSSCSI} -eq 0 ] ; then
625 skip
626 return
627 fi
628
629 test `${LSMOD} | grep -c scsi_debug` -gt 0 && \
630 (${RMMOD} scsi_debug || exit 1)
631
632 /sbin/modprobe scsi_debug dev_size_mb=128 ||
633 die "Error $? creating scsi_debug device"
634 udev_trigger
635
636 SDDEVICE=`${LSSCSI}|${AWK} '/scsi_debug/ { print $6; exit }'`
637 BASE_SDDEVICE=`basename $SDDEVICE`
638
639 # Create a pool
640 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
641 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
642 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3
643
644 # Add and remove a cache vdev by full path
645 zconfig_add_vdev ${POOL_NAME} cache ${SDDEVICE} || fail 4
646 ${ZPOOL} remove ${POOL_NAME} ${SDDEVICE} || fail 5
647 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 6
648 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 7
649 sleep 1
650
651 # Add and remove a cache vdev by shorthand path
652 zconfig_add_vdev ${POOL_NAME} cache ${BASE_SDDEVICE} || fail 8
653 ${ZPOOL} remove ${POOL_NAME} ${BASE_SDDEVICE} || fail 9
654 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 10
655 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 11
656 sleep 1
657
658 # Add and remove a log vdev
659 zconfig_add_vdev ${POOL_NAME} log ${BASE_SDDEVICE} || fail 12
660 ${ZPOOL} remove ${POOL_NAME} ${BASE_SDDEVICE} || fail 13
661 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 14
662 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 15
663
664 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 16
665 ${ZFS_SH} -u || fail 17
666 ${RMMOD} scsi_debug || fail 18
667
668 rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_CACHE} || fail 19
669
670 pass
671 }
672 run_test 10 "zpool add/remove vdev"
673
674 exit 0