]> git.proxmox.com Git - mirror_zfs.git/blob - tests/zfs-tests/tests/functional/removal/removal_condense_export.ksh
Disable 'zfs remap' command
[mirror_zfs.git] / tests / zfs-tests / tests / functional / removal / removal_condense_export.ksh
1 #! /bin/ksh -p
2 #
3 # CDDL HEADER START
4 #
5 # This file and its contents are supplied under the terms of the
6 # Common Development and Distribution License ("CDDL"), version 1.0.
7 # You may only use this file in accordance with the terms of version
8 # 1.0 of the CDDL.
9 #
10 # A full copy of the text of the CDDL should have accompanied this
11 # source. A copy of the CDDL is also available via the Internet at
12 # http://www.illumos.org/license/CDDL.
13 #
14 # CDDL HEADER END
15 #
16
17 #
18 # Copyright (c) 2015, 2016 by Delphix. All rights reserved.
19 #
20
21 . $STF_SUITE/include/libtest.shlib
22 . $STF_SUITE/tests/functional/removal/removal.kshlib
23
24 if is_linux; then
25 log_unsupported "ZDB fails during concurrent pool activity."
26 fi
27
28 function reset
29 {
30 log_must set_tunable64 zfs_condense_indirect_commit_entry_delay_ms 0
31 log_must set_tunable64 zfs_condense_min_mapping_bytes 131072
32 default_cleanup_noexit
33 }
34
35 default_setup_noexit "$DISKS" "true"
36 log_onexit reset
37 log_must set_tunable64 zfs_condense_indirect_commit_entry_delay_ms 1000
38 log_must set_tunable64 zfs_condense_min_mapping_bytes 1
39
40 log_must zfs set recordsize=512 $TESTPOOL/$TESTFS
41
42 #
43 # Create a large file so that we know some of the blocks will be on the
44 # removed device, and hence eligible for remapping.
45 #
46 log_must dd if=/dev/urandom of=$TESTDIR/file bs=1024k count=10
47
48 #
49 # Create a file in the other filesystem, which will not be remapped.
50 #
51 log_must dd if=/dev/urandom of=$TESTDIR1/file bs=1024k count=10
52
53 #
54 # Randomly rewrite some of blocks in the file so that there will be holes and
55 # we will not be able to remap the entire file in a few huge chunks.
56 #
57 for i in {1..4096}; do
58 #
59 # We have to sync periodically so that all the writes don't end up in
60 # the same txg. If they were all in the same txg, only the last write
61 # would go through and we would not have as many allocations to
62 # fragment the file.
63 #
64 ((i % 100 > 0 )) || sync_pool $TESTPOOL || log_fail "Could not sync."
65 random_write $TESTDIR/file 512 || \
66 log_fail "Could not random write."
67 done
68
69 REMOVEDISKPATH=/dev
70 case $REMOVEDISK in
71 /*)
72 REMOVEDISKPATH=$(dirname $REMOVEDISK)
73 ;;
74 esac
75
76 log_must zpool remove $TESTPOOL $REMOVEDISK
77 log_must wait_for_removal $TESTPOOL
78 log_mustnot vdevs_in_pool $TESTPOOL $REMOVEDISK
79
80 log_must zfs remap $TESTPOOL/$TESTFS
81 sync_pool $TESTPOOL
82 sleep 5
83 sync_pool $TESTPOOL
84 log_must zpool export $TESTPOOL
85 zdb -e -p $REMOVEDISKPATH $TESTPOOL | grep 'Condensing indirect vdev' || \
86 log_fail "Did not export during a condense."
87 log_must zdb -e -p $REMOVEDISKPATH -cudi $TESTPOOL
88 log_must zpool import $TESTPOOL
89
90 log_pass "Pool can be exported in the middle of a condense."