]> git.proxmox.com Git - mirror_zfs-debian.git/blob - scripts/zconfig.sh
Update zconfig.sh to use new zvol names
[mirror_zfs-debian.git] / scripts / zconfig.sh
1 #!/bin/bash
2 #
3 # ZFS/ZPOOL configuration test script.
4
5 basedir="$(dirname $0)"
6
7 SCRIPT_COMMON=common.sh
8 if [ -f "${basedir}/${SCRIPT_COMMON}" ]; then
9 . "${basedir}/${SCRIPT_COMMON}"
10 else
11 echo "Missing helper script ${SCRIPT_COMMON}" && exit 1
12 fi
13
14 PROG=zconfig.sh
15
16 usage() {
17 cat << EOF
18 USAGE:
19 $0 [hvcts]
20
21 DESCRIPTION:
22 ZFS/ZPOOL configuration tests
23
24 OPTIONS:
25 -h Show this message
26 -v Verbose
27 -c Cleanup lo+file devices at start
28 -t <#> Run listed tests
29 -s <#> Skip listed tests
30
31 EOF
32 }
33
34 while getopts 'hvct:s:?' OPTION; do
35 case $OPTION in
36 h)
37 usage
38 exit 1
39 ;;
40 v)
41 VERBOSE=1
42 ;;
43 c)
44 CLEANUP=1
45 ;;
46 t)
47 TESTS_RUN=($OPTARG)
48 ;;
49 s)
50 TESTS_SKIP=($OPTARG)
51 ;;
52 ?)
53 usage
54 exit
55 ;;
56 esac
57 done
58
59 if [ $(id -u) != 0 ]; then
60 die "Must run as root"
61 fi
62
63 # Perform pre-cleanup is requested
64 if [ ${CLEANUP} ]; then
65 ${ZFS_SH} -u
66 cleanup_md_devices
67 cleanup_loop_devices
68 rm -f /tmp/zpool.cache.*
69 fi
70
71 # Check if we need to skip the tests that require scsi_debug and lsscsi.
72 SCSI_DEBUG=0
73 ${INFOMOD} scsi_debug &>/dev/null && SCSI_DEBUG=1
74 HAVE_LSSCSI=0
75 test -f ${LSSCSI} && HAVE_LSSCSI=1
76 if [ ${SCSI_DEBUG} -eq 0 ] || [ ${HAVE_LSSCSI} -eq 0 ]; then
77 echo "Skipping test 10 which requires the scsi_debug " \
78 "module and the ${LSSCSI} utility"
79 fi
80
81 zconfig_partition() {
82 local DEVICE=$1
83 local START=$2
84 local END=$3
85 local TMP_FILE=`mktemp`
86
87 /sbin/sfdisk -q ${DEVICE} << EOF &>${TMP_FILE} || fail 4
88 ${START},${END}
89 ;
90 ;
91 ;
92 EOF
93
94 rm ${TMP_FILE}
95 udev_trigger
96 }
97
98 # Validate persistent zpool.cache configuration.
99 test_1() {
100 local POOL_NAME=test1
101 local TMP_FILE1=`mktemp`
102 local TMP_FILE2=`mktemp`
103 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
104
105 # Create a pool save its status for comparison.
106 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
107 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
108 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3
109
110 # Unload/load the module stack and verify the pool persists.
111 ${ZFS_SH} -u || fail 4
112 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 5
113 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 6
114 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 7
115
116 # Cleanup the test pool and temporary files
117 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 8
118 rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_CACHE} || fail 9
119 ${ZFS_SH} -u || fail 10
120
121 pass
122 }
123 run_test 1 "persistent zpool.cache"
124
125 # Validate ZFS disk scanning and import w/out zpool.cache configuration.
126 test_2() {
127 local POOL_NAME=test2
128 local TMP_FILE1=`mktemp`
129 local TMP_FILE2=`mktemp`
130 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
131
132 # Create a pool save its status for comparison.
133 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
134 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
135 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3
136
137 # Unload the module stack, remove the cache file, load the module
138 # stack and attempt to probe the disks to import the pool. As
139 # a cross check verify the old pool state against the imported.
140 ${ZFS_SH} -u || fail 4
141 rm -f ${TMP_CACHE} || fail 5
142 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 6
143 ${ZPOOL} import | grep ${POOL_NAME} >/dev/null || fail 7
144 ${ZPOOL} import ${POOL_NAME} || fail 8
145 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 9
146 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 10
147
148 # Cleanup the test pool and temporary files
149 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 11
150 rm -f ${TMP_FILE1} ${TMP_FILE2} || fail 12
151 ${ZFS_SH} -u || fail 13
152
153 pass
154 }
155 run_test 2 "scan disks for pools to import"
156
157 zconfig_zvol_device_stat() {
158 local EXPECT=$1
159 local POOL_NAME=/dev/zvol/$2
160 local ZVOL_NAME=/dev/zvol/$3
161 local SNAP_NAME=/dev/zvol/$4
162 local CLONE_NAME=/dev/zvol/$5
163 local COUNT=0
164
165 # Briefly delay for udev
166 udev_trigger
167
168 # Pool exists
169 stat ${POOL_NAME} &>/dev/null && let COUNT=$COUNT+1
170
171 # Volume and partitions
172 stat ${ZVOL_NAME} &>/dev/null && let COUNT=$COUNT+1
173 stat ${ZVOL_NAME}-part1 &>/dev/null && let COUNT=$COUNT+1
174 stat ${ZVOL_NAME}-part2 &>/dev/null && let COUNT=$COUNT+1
175
176 # Snapshot with partitions
177 stat ${SNAP_NAME} &>/dev/null && let COUNT=$COUNT+1
178 stat ${SNAP_NAME}-part1 &>/dev/null && let COUNT=$COUNT+1
179 stat ${SNAP_NAME}-part2 &>/dev/null && let COUNT=$COUNT+1
180
181 # Clone with partitions
182 stat ${CLONE_NAME} &>/dev/null && let COUNT=$COUNT+1
183 stat ${CLONE_NAME}-part1 &>/dev/null && let COUNT=$COUNT+1
184 stat ${CLONE_NAME}-part2 &>/dev/null && let COUNT=$COUNT+1
185
186 if [ $EXPECT -ne $COUNT ]; then
187 return 1
188 fi
189
190 return 0
191 }
192
193 # zpool import/export device check
194 # (1 volume, 2 partitions, 1 snapshot, 1 clone)
195 test_3() {
196 local POOL_NAME=tank
197 local ZVOL_NAME=volume
198 local SNAP_NAME=snap
199 local CLONE_NAME=clone
200 local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
201 local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
202 local FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
203 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
204
205 # Create a pool, volume, partition, snapshot, and clone.
206 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
207 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
208 ${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3
209 zconfig_partition /dev/zvol/${FULL_ZVOL_NAME} 0 64 || fail 4
210 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5
211 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6
212
213 # Verify the devices were created
214 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
215 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 7
216
217 # Export the pool
218 ${ZPOOL} export ${POOL_NAME} || fail 8
219
220 # verify the devices were removed
221 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
222 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9
223
224 # Import the pool, wait 1 second for udev
225 ${ZPOOL} import ${POOL_NAME} || fail 10
226
227 # Verify the devices were created
228 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
229 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 11
230
231 # Destroy the pool and consequently the devices
232 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 12
233
234 # verify the devices were removed
235 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
236 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 13
237
238 ${ZFS_SH} -u || fail 14
239 rm -f ${TMP_CACHE} || fail 15
240
241 pass
242 }
243 run_test 3 "zpool import/export device"
244
245 # zpool insmod/rmmod device check (1 volume, 1 snapshot, 1 clone)
246 test_4() {
247 POOL_NAME=tank
248 ZVOL_NAME=volume
249 SNAP_NAME=snap
250 CLONE_NAME=clone
251 FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
252 FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
253 FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
254 TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
255
256 # Create a pool, volume, snapshot, and clone
257 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
258 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
259 ${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3
260 zconfig_partition /dev/zvol/${FULL_ZVOL_NAME} 0 64 || fail 4
261 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5
262 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6
263
264 # Verify the devices were created
265 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
266 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 7
267
268 # Unload the modules
269 ${ZFS_SH} -u || fail 8
270
271 # Verify the devices were removed
272 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
273 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9
274
275 # Load the modules, wait 1 second for udev
276 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 10
277
278 # Verify the devices were created
279 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \
280 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 11
281
282 # Destroy the pool and consequently the devices
283 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 12
284
285 # Verify the devices were removed
286 zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \
287 ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 13
288
289 ${ZFS_SH} -u || fail 14
290 rm -f ${TMP_CACHE} || fail 15
291
292 pass
293 }
294 run_test 4 "zpool insmod/rmmod device"
295
296 # ZVOL volume sanity check
297 test_5() {
298 local POOL_NAME=tank
299 local ZVOL_NAME=fish
300 local FULL_NAME=${POOL_NAME}/${ZVOL_NAME}
301 local SRC_DIR=/bin/
302 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
303
304 # Create a pool and volume.
305 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
306 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raid0 || fail 2
307 ${ZFS} create -V 800M ${FULL_NAME} || fail 3
308
309 # Partition the volume, for a 800M volume there will be
310 # 1624 cylinders, 16 heads, and 63 sectors per track.
311 zconfig_partition /dev/zvol/${FULL_NAME} 0 1624
312
313 # Format the partition with ext3.
314 /sbin/mkfs.ext3 -q /dev/zvol/${FULL_NAME}-part1 || fail 5
315
316 # Mount the ext3 filesystem and copy some data to it.
317 mkdir -p /tmp/${ZVOL_NAME}-part1 || fail 6
318 mount /dev/zvol/${FULL_NAME}-part1 /tmp/${ZVOL_NAME}-part1 || fail 7
319 cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}-part1 || fail 8
320 sync
321
322 # Verify the copied files match the original files.
323 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}-part1${SRC_DIR} \
324 &>/dev/null || fail 9
325
326 # Remove the files, umount, destroy the volume and pool.
327 rm -Rf /tmp/${ZVOL_NAME}-part1${SRC_DIR}* || fail 10
328 umount /tmp/${ZVOL_NAME}-part1 || fail 11
329 rmdir /tmp/${ZVOL_NAME}-part1 || fail 12
330
331 ${ZFS} destroy ${FULL_NAME} || fail 13
332 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 14
333 ${ZFS_SH} -u || fail 15
334 rm -f ${TMP_CACHE} || fail 16
335
336 pass
337 }
338 run_test 5 "zvol+ext3 volume"
339
340 # ZVOL snapshot sanity check
341 test_6() {
342 local POOL_NAME=tank
343 local ZVOL_NAME=fish
344 local SNAP_NAME=pristine
345 local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
346 local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
347 local SRC_DIR=/bin/
348 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
349
350 # Create a pool and volume.
351 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
352 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raid0 || fail 2
353 ${ZFS} create -V 800M ${FULL_ZVOL_NAME} || fail 3
354
355 # Partition the volume, for a 800M volume there will be
356 # 1624 cylinders, 16 heads, and 63 sectors per track.
357 zconfig_partition /dev/zvol/${FULL_ZVOL_NAME} 0 1624
358
359 # Format the partition with ext2 (no journal).
360 /sbin/mkfs.ext2 -q /dev/zvol/${FULL_ZVOL_NAME}-part1 || fail 5
361
362 # Mount the ext3 filesystem and copy some data to it.
363 mkdir -p /tmp/${ZVOL_NAME}-part1 || fail 6
364 mount /dev/zvol/${FULL_ZVOL_NAME}-part1 /tmp/${ZVOL_NAME}-part1 \
365 || fail 7
366
367 # Snapshot the pristine ext2 filesystem and mount it read-only.
368 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 8
369 wait_udev /dev/zvol/${FULL_SNAP_NAME}-part1 30 || fail 8
370 mkdir -p /tmp/${SNAP_NAME}-part1 || fail 9
371 mount /dev/zvol/${FULL_SNAP_NAME}-part1 /tmp/${SNAP_NAME}-part1 \
372 &>/dev/null || fail 10
373
374 # Copy to original volume
375 cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}-part1 || fail 11
376 sync
377
378 # Verify the copied files match the original files,
379 # and the copied files do NOT appear in the snapshot.
380 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}-part1${SRC_DIR} \
381 &>/dev/null || fail 12
382 diff -ur ${SRC_DIR} /tmp/${SNAP_NAME}-part1${SRC_DIR} \
383 &>/dev/null && fail 13
384
385 # umount, destroy the snapshot, volume, and pool.
386 umount /tmp/${SNAP_NAME}-part1 || fail 14
387 rmdir /tmp/${SNAP_NAME}-part1 || fail 15
388 ${ZFS} destroy ${FULL_SNAP_NAME} || fail 16
389
390 umount /tmp/${ZVOL_NAME}-part1 || fail 17
391 rmdir /tmp/${ZVOL_NAME}-part1 || fail 18
392 ${ZFS} destroy ${FULL_ZVOL_NAME} || fail 19
393
394 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 20
395 ${ZFS_SH} -u || fail 21
396 rm -f ${TMP_CACHE} || fail 22
397
398 pass
399 }
400 run_test 6 "zvol+ext2 snapshot"
401
402 # ZVOL clone sanity check
403 test_7() {
404 local POOL_NAME=tank
405 local ZVOL_NAME=fish
406 local SNAP_NAME=pristine
407 local CLONE_NAME=clone
408 local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME}
409 local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME}
410 local FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME}
411 local SRC_DIR=/bin/
412 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
413
414 # Create a pool and volume.
415 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
416 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
417 ${ZFS} create -V 300M ${FULL_ZVOL_NAME} || fail 3
418
419 # Partition the volume, for a 300M volume there will be
420 # 609 cylinders, 16 heads, and 63 sectors per track.
421 zconfig_partition /dev/zvol/${FULL_ZVOL_NAME} 0 609
422
423 # Format the partition with ext2 (no journal).
424 /sbin/mkfs.ext2 -q /dev/zvol/${FULL_ZVOL_NAME}-part1 || fail 5
425
426 # Mount the ext3 filesystem and copy some data to it.
427 mkdir -p /tmp/${ZVOL_NAME}-part1 || fail 6
428 mount /dev/zvol/${FULL_ZVOL_NAME}-part1 /tmp/${ZVOL_NAME}-part1 \
429 || fail 7
430
431 # Snapshot the pristine ext2 filesystem and mount it read-only.
432 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 8
433 wait_udev /dev/zvol/${FULL_SNAP_NAME}-part1 30 || fail 8
434 mkdir -p /tmp/${SNAP_NAME}-part1 || fail 9
435 mount /dev/zvol/${FULL_SNAP_NAME}-part1 \
436 /tmp/${SNAP_NAME}-part1 &>/dev/null || fail 10
437
438 # Copy to original volume.
439 cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}-part1 || fail 11
440 sync
441
442 # Verify the copied files match the original files,
443 # and the copied files do NOT appear in the snapshot.
444 diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}-part1${SRC_DIR} \
445 &>/dev/null || fail 12
446 diff -ur ${SRC_DIR} /tmp/${SNAP_NAME}-part1${SRC_DIR} \
447 &>/dev/null && fail 13
448
449 # Clone from the original pristine snapshot
450 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 14
451 wait_udev /dev/zvol/${FULL_CLONE_NAME}-part1 30 || fail 14
452 mkdir -p /tmp/${CLONE_NAME}-part1 || fail 15
453 mount /dev/zvol/${FULL_CLONE_NAME}-part1 \
454 /tmp/${CLONE_NAME}-part1 || fail 16
455
456 # Verify the clone matches the pristine snapshot,
457 # and the files copied to the original volume are NOT there.
458 diff -ur /tmp/${SNAP_NAME}-part1 /tmp/${CLONE_NAME}-part1 \
459 &>/dev/null || fail 17
460 diff -ur /tmp/${ZVOL_NAME}-part1 /tmp/${CLONE_NAME}-part1 \
461 &>/dev/null && fail 18
462
463 # Copy to cloned volume.
464 cp -RL ${SRC_DIR} /tmp/${CLONE_NAME}-part1 || fail 19
465 sync
466
467 # Verify the clone matches the modified original volume.
468 diff -ur /tmp/${ZVOL_NAME}-part1 /tmp/${CLONE_NAME}-part1 \
469 &>/dev/null || fail 20
470
471 # umount, destroy the snapshot, volume, and pool.
472 umount /tmp/${CLONE_NAME}-part1 || fail 21
473 rmdir /tmp/${CLONE_NAME}-part1 || fail 22
474 ${ZFS} destroy ${FULL_CLONE_NAME} || fail 23
475
476 umount /tmp/${SNAP_NAME}-part1 || fail 24
477 rmdir /tmp/${SNAP_NAME}-part1 || fail 25
478 ${ZFS} destroy ${FULL_SNAP_NAME} || fail 26
479
480 umount /tmp/${ZVOL_NAME}-part1 || fail 27
481 rmdir /tmp/${ZVOL_NAME}-part1 || fail 28
482 ${ZFS} destroy ${FULL_ZVOL_NAME} || fail 29
483
484 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 30
485 ${ZFS_SH} -u || fail 31
486 rm -f ${TMP_CACHE} || fail 32
487
488 pass
489 }
490 run_test 7 "zvol+ext2 clone"
491
492 # Send/Receive sanity check
493 test_8() {
494 local POOL_NAME1=tank1
495 local POOL_NAME2=tank2
496 local ZVOL_NAME=fish
497 local SNAP_NAME=snap
498 local FULL_ZVOL_NAME1=${POOL_NAME1}/${ZVOL_NAME}
499 local FULL_ZVOL_NAME2=${POOL_NAME2}/${ZVOL_NAME}
500 local FULL_SNAP_NAME1=${POOL_NAME1}/${ZVOL_NAME}@${SNAP_NAME}
501 local FULL_SNAP_NAME2=${POOL_NAME2}/${ZVOL_NAME}@${SNAP_NAME}
502 local SRC_DIR=/bin/
503 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
504
505 # Create two pools and a volume
506 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
507 ${ZPOOL_CREATE_SH} -p ${POOL_NAME1} -c lo-raidz2 || fail 2
508 ${ZPOOL_CREATE_SH} -p ${POOL_NAME2} -c lo-raidz2 || fail 3
509 ${ZFS} create -V 300M ${FULL_ZVOL_NAME1} || fail 4
510
511 # Partition the volume, for a 300M volume there will be
512 # 609 cylinders, 16 heads, and 63 sectors per track.
513 zconfig_partition /dev/zvol/${FULL_ZVOL_NAME1} 0 609
514
515 # Format the partition with ext2.
516 /sbin/mkfs.ext2 -q /dev/zvol/${FULL_ZVOL_NAME1}-part1 || fail 5
517
518 # Mount the ext3 filesystem and copy some data to it.
519 mkdir -p /tmp/${FULL_ZVOL_NAME1}-part1 || fail 6
520 mount /dev/zvol/${FULL_ZVOL_NAME1}-part1 \
521 /tmp/${FULL_ZVOL_NAME1}-part1 || fail 7
522 cp -RL ${SRC_DIR} /tmp/${FULL_ZVOL_NAME1}-part1 || fail 8
523 sync || fail 9
524
525 # Snapshot the ext3 filesystem so it may be sent.
526 ${ZFS} snapshot ${FULL_SNAP_NAME1} || fail 11
527 wait_udev /dev/zvol/${FULL_SNAP_NAME1} 30 || fail 11
528
529 # Send/receive the snapshot from POOL_NAME1 to POOL_NAME2
530 (${ZFS} send ${FULL_SNAP_NAME1} | \
531 ${ZFS} receive ${FULL_ZVOL_NAME2}) || fail 12
532 wait_udev /dev/zvol/${FULL_ZVOL_NAME2}-part1 30 || fail 12
533
534 # Mount the sent ext3 filesystem.
535 mkdir -p /tmp/${FULL_ZVOL_NAME2}-part1 || fail 13
536 mount /dev/zvol/${FULL_ZVOL_NAME2}-part1 \
537 /tmp/${FULL_ZVOL_NAME2}-part1 || fail 14
538
539 # Verify the contents of the volumes match
540 diff -ur /tmp/${FULL_ZVOL_NAME1}-part1 /tmp/${FULL_ZVOL_NAME2}-part1 \
541 &>/dev/null || fail 15
542
543 # Umount, destroy the volume and pool.
544 umount /tmp/${FULL_ZVOL_NAME1}-part1 || fail 16
545 umount /tmp/${FULL_ZVOL_NAME2}-part1 || fail 17
546 rmdir /tmp/${FULL_ZVOL_NAME1}-part1 || fail 18
547 rmdir /tmp/${FULL_ZVOL_NAME2}-part1 || fail 19
548 rmdir /tmp/${POOL_NAME1} || fail 20
549 rmdir /tmp/${POOL_NAME2} || fail 21
550
551 ${ZFS} destroy ${FULL_SNAP_NAME1} || fail 22
552 ${ZFS} destroy ${FULL_SNAP_NAME2} || fail 23
553 ${ZFS} destroy ${FULL_ZVOL_NAME1} || fail 24
554 ${ZFS} destroy ${FULL_ZVOL_NAME2} || fail 25
555 ${ZPOOL_CREATE_SH} -p ${POOL_NAME1} -c lo-raidz2 -d || fail 26
556 ${ZPOOL_CREATE_SH} -p ${POOL_NAME2} -c lo-raidz2 -d || fail 27
557 ${ZFS_SH} -u || fail 28
558 rm -f ${TMP_CACHE} || fail 29
559
560 pass
561 }
562 run_test 8 "zfs send/receive"
563
564 # zpool event sanity check
565 test_9() {
566 local POOL_NAME=tank
567 local ZVOL_NAME=fish
568 local FULL_NAME=${POOL_NAME}/${ZVOL_NAME}
569 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
570 local TMP_EVENTS=`mktemp -p /tmp zpool.events.XXXXXXXX`
571
572 # Create a pool and volume.
573 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
574 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
575 ${ZFS} create -V 300M ${FULL_NAME} || fail 3
576
577 # Dump the events, there should be at least 5 lines.
578 ${ZPOOL} events >${TMP_EVENTS} || fail 4
579 EVENTS=`wc -l ${TMP_EVENTS} | cut -f1 -d' '`
580 [ $EVENTS -lt 5 ] && fail 5
581
582 # Clear the events and ensure there are none.
583 ${ZPOOL} events -c >/dev/null || fail 6
584 ${ZPOOL} events >${TMP_EVENTS} || fail 7
585 EVENTS=`wc -l ${TMP_EVENTS} | cut -f1 -d' '`
586 [ $EVENTS -gt 1 ] && fail 8
587
588 ${ZFS} destroy ${FULL_NAME} || fail 9
589 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 10
590 ${ZFS_SH} -u || fail 11
591 rm -f ${TMP_CACHE} || fail 12
592 rm -f ${TMP_EVENTS} || fail 13
593
594 pass
595 }
596 run_test 9 "zpool events"
597
598 zconfig_add_vdev() {
599 local POOL_NAME=$1
600 local TYPE=$2
601 local DEVICE=$3
602 local TMP_FILE1=`mktemp`
603 local TMP_FILE2=`mktemp`
604 local TMP_FILE3=`mktemp`
605
606 BASE_DEVICE=`basename ${DEVICE}`
607
608 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1}
609 ${ZPOOL} add -f ${POOL_NAME} ${TYPE} ${DEVICE} 2>/dev/null || return 1
610 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2}
611 diff ${TMP_FILE1} ${TMP_FILE2} > ${TMP_FILE3}
612
613 [ `wc -l ${TMP_FILE3}|${AWK} '{print $1}'` -eq 3 ] || return 1
614
615 PARENT_VDEV=`tail -2 ${TMP_FILE3} | head -1 | ${AWK} '{print $NF}'`
616 case $TYPE in
617 cache)
618 [ "${PARENT_VDEV}" = "${TYPE}" ] || return 1
619 ;;
620 log)
621 [ "${PARENT_VDEV}" = "logs" ] || return 1
622 ;;
623 esac
624
625 if ! tail -1 ${TMP_FILE3} |
626 egrep -q "^>[[:space:]]+${BASE_DEVICE}[[:space:]]+ONLINE" ; then
627 return 1
628 fi
629 rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_FILE3}
630
631 return 0
632 }
633
634 # zpool add and remove sanity check
635 test_10() {
636 local POOL_NAME=tank
637 local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX`
638 local TMP_FILE1=`mktemp`
639 local TMP_FILE2=`mktemp`
640
641 if [ ${SCSI_DEBUG} -eq 0 ] || [ ${HAVE_LSSCSI} -eq 0 ] ; then
642 skip
643 return
644 fi
645
646 test `${LSMOD} | grep -c scsi_debug` -gt 0 && \
647 (${RMMOD} scsi_debug || exit 1)
648
649 /sbin/modprobe scsi_debug dev_size_mb=128 ||
650 die "Error $? creating scsi_debug device"
651 udev_trigger
652
653 SDDEVICE=`${LSSCSI}|${AWK} '/scsi_debug/ { print $6; exit }'`
654 BASE_SDDEVICE=`basename $SDDEVICE`
655
656 # Create a pool
657 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1
658 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2
659 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3
660
661 # Add and remove a cache vdev by full path
662 zconfig_add_vdev ${POOL_NAME} cache ${SDDEVICE} || fail 4
663 ${ZPOOL} remove ${POOL_NAME} ${SDDEVICE} || fail 5
664 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 6
665 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 7
666 sleep 1
667
668 # Add and remove a cache vdev by shorthand path
669 zconfig_add_vdev ${POOL_NAME} cache ${BASE_SDDEVICE} || fail 8
670 ${ZPOOL} remove ${POOL_NAME} ${BASE_SDDEVICE} || fail 9
671 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 10
672 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 11
673 sleep 1
674
675 # Add and remove a log vdev
676 zconfig_add_vdev ${POOL_NAME} log ${BASE_SDDEVICE} || fail 12
677 ${ZPOOL} remove ${POOL_NAME} ${BASE_SDDEVICE} || fail 13
678 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 14
679 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 15
680
681 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 16
682 ${ZFS_SH} -u || fail 17
683 ${RMMOD} scsi_debug || fail 18
684
685 rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_CACHE} || fail 19
686
687 pass
688 }
689 run_test 10 "zpool add/remove vdev"
690
691 exit 0