]> git.proxmox.com Git - mirror_zfs.git/blob - tests/zfs-tests/tests/functional/pool_checkpoint/pool_checkpoint.kshlib
zdb -k does not work on Linux when used with -e
[mirror_zfs.git] / tests / zfs-tests / tests / functional / pool_checkpoint / pool_checkpoint.kshlib
1 #
2 # This file and its contents are supplied under the terms of the
3 # Common Development and Distribution License ("CDDL"), version 1.0.
4 # You may only use this file in accordance with the terms of version
5 # 1.0 of the CDDL.
6 #
7 # A full copy of the text of the CDDL should have accompanied this
8 # source. A copy of the CDDL is also available via the Internet at
9 # http://www.illumos.org/license/CDDL.
10 #
11
12 #
13 # Copyright (c) 2017, 2018 by Delphix. All rights reserved.
14 #
15
16 . $STF_SUITE/include/libtest.shlib
17 . $STF_SUITE/tests/functional/removal/removal.kshlib
18
19 #
20 # In general all the tests related to the pool checkpoint can
21 # be divided into two categories. TESTS that verify features
22 # provided by the checkpoint (e.g. checkpoint_rewind) and tests
23 # that stress-test the checkpoint (e.g. checkpoint_big_rewind).
24 #
25 # For the first group we don't really care about the size of
26 # the pool or the individual file sizes within the filesystems.
27 # This is why these tests run directly on pools that use a
28 # "real disk vdev" (meaning not a file based one). These tests
29 # use the $TESTPOOL pool that is created on top of $TESTDISK.
30 # This pool is refered to as the "test pool" and thus all
31 # the tests of this group use the testpool-related functions of
32 # this file (not the nested_pools ones).
33 #
34 # For the second group we generally try to bring the pool to its
35 # limits by increasing fragmentation, filling all allocatable
36 # space, attempting to use vdevs that the checkpoint spacemap
37 # cannot represent, etc. For these tests we need to control
38 # almost all parameters of the pool and the vdevs that back it
39 # so we create them based on file-based vdevs that we carefully
40 # create within the $TESTPOOL pool. So most of these tests, in
41 # order to create this nested pool sctructure, generally start
42 # like this:
43 # 1] We create the test pool ($TESTPOOL).
44 # 2] We create a filesystem and we populate it with files of
45 # some predetermined size.
46 # 3] We use those files as vdevs for the pool that the test
47 # will use ($NESTEDPOOL).
48 # 4] Go on and let the test run and operate on $NESTEDPOOL.
49 #
50
51 #
52 # These disks are used to back $TESTPOOL
53 #
54 TESTDISK="$(echo $DISKS | cut -d' ' -f1)"
55 EXTRATESTDISK="$(echo $DISKS | cut -d' ' -f2)"
56
57 FS0=$TESTPOOL/$TESTFS
58 FS1=$TESTPOOL/$TESTFS1
59 FS2=$TESTPOOL/$TESTFS2
60
61 FS0FILE=/$FS0/$TESTFILE0
62 FS1FILE=/$FS1/$TESTFILE1
63 FS2FILE=/$FS2/$TESTFILE2
64
65 #
66 # The following are created within $TESTPOOL and
67 # will be used to back $NESTEDPOOL
68 #
69 DISKFS=$TESTPOOL/disks
70 FILEDISKDIR=/$DISKFS
71 FILEDISK1=/$DISKFS/dsk1
72 FILEDISK2=/$DISKFS/dsk2
73 FILEDISKS="$FILEDISK1 $FILEDISK2"
74
75 #
76 # $NESTEDPOOL related variables
77 #
78 NESTEDPOOL=nestedpool
79 NESTEDFS0=$NESTEDPOOL/$TESTFS
80 NESTEDFS1=$NESTEDPOOL/$TESTFS1
81 NESTEDFS2=$NESTEDPOOL/$TESTFS2
82 NESTEDFS0FILE=/$NESTEDFS0/$TESTFILE0
83 NESTEDFS1FILE=/$NESTEDFS1/$TESTFILE1
84 NESTEDFS2FILE=/$NESTEDFS2/$TESTFILE2
85
86 #
87 # In the tests that stress-test the pool (second category
88 # mentioned above), there exist some that need to bring
89 # fragmentation at high percentages in a relatively short
90 # period of time. In order to do that we set the following
91 # parameters:
92 #
93 # * We use two disks of 1G each, to create a pool of size 2G.
94 # The point is that 2G is not small nor large, and we also
95 # want to have 2 disks to introduce indirect vdevs on our
96 # setup.
97 # * We enable compression and set the record size of all
98 # filesystems to 8K. The point of compression is to
99 # ensure that we are not filling up the whole pool (that's
100 # what checkpoint_capacity is for), and the specific
101 # record size is set to match the block size of randwritecomp
102 # which is used to increase fragmentation by writing on
103 # files.
104 # * We always have 2 big files present of 512M each, which
105 # should account for 40%~50% capacity by the end of each
106 # test with fragmentation around 50~60%.
107 # * At each file we attempt to do enough random writes to
108 # touch every offset twice on average.
109 #
110 # Note that the amount of random writes per files are based
111 # on the following calculation:
112 #
113 # ((512M / 8K) * 3) * 2 = ~400000
114 #
115 # Given that the file is 512M and one write is 8K, we would
116 # need (512M / 8K) writes to go through the whole file.
117 # Assuming though that each write has a compression ratio of
118 # 3, then we want 3 times that to cover the same amount of
119 # space. Finally, we multiply that by 2 since our goal is to
120 # touch each offset twice on average.
121 #
122 # Examples of those tests are checkpoint_big_rewind and
123 # checkpoint_discard_busy.
124 #
125 FILEDISKSIZE=1g
126 DISKSIZE=1g
127 BIGFILESIZE=512M
128 RANDOMWRITES=400000
129
130
131 #
132 # Assumes create_test_pool has been called beforehand.
133 #
134 function setup_nested_pool
135 {
136 log_must zfs create $DISKFS
137
138 log_must truncate -s $DISKSIZE $FILEDISK1
139 log_must truncate -s $DISKSIZE $FILEDISK2
140
141 log_must zpool create -O sync=disabled $NESTEDPOOL $FILEDISKS
142 }
143
144 function setup_test_pool
145 {
146 log_must zpool create -O sync=disabled $TESTPOOL "$TESTDISK"
147 }
148
149 function setup_nested_pools
150 {
151 setup_test_pool
152 setup_nested_pool
153 }
154
155 function cleanup_nested_pool
156 {
157 log_must zpool destroy $NESTEDPOOL
158 log_must rm -f $FILEDISKS
159 }
160
161 function cleanup_test_pool
162 {
163 log_must zpool destroy $TESTPOOL
164
165 #
166 # We always clear the labels of all disks
167 # between tests so imports from zpool or
168 # or zdb do not get confused with leftover
169 # data from old pools.
170 #
171 for disk in $DISKS; do
172 zpool labelclear -f $disk
173 done
174 }
175
176 function cleanup_nested_pools
177 {
178 cleanup_nested_pool
179 cleanup_test_pool
180 }
181
182 #
183 # Remove and re-add each vdev to ensure that data is
184 # moved between disks and indirect mappings are created
185 #
186 function introduce_indirection
187 {
188 for disk in ${FILEDISKS[@]}; do
189 log_must zpool remove $NESTEDPOOL $disk
190 log_must wait_for_removal $NESTEDPOOL
191 log_mustnot vdevs_in_pool $NESTEDPOOL $disk
192 log_must zpool add $NESTEDPOOL $disk
193 done
194 }
195
196 FILECONTENTS0="Can't wait to be checkpointed!"
197 FILECONTENTS1="Can't wait to be checkpointed too!"
198 NEWFILECONTENTS0="I survived after the checkpoint!"
199 NEWFILECONTENTS2="I was born after the checkpoint!"
200
201 function populate_test_pool
202 {
203 log_must zfs create -o compression=lz4 -o recordsize=8k $FS0
204 log_must zfs create -o compression=lz4 -o recordsize=8k $FS1
205
206 echo $FILECONTENTS0 > $FS0FILE
207 echo $FILECONTENTS1 > $FS1FILE
208 }
209
210 function populate_nested_pool
211 {
212 log_must zfs create -o compression=lz4 -o recordsize=8k $NESTEDFS0
213 log_must zfs create -o compression=lz4 -o recordsize=8k $NESTEDFS1
214
215 echo $FILECONTENTS0 > $NESTEDFS0FILE
216 echo $FILECONTENTS1 > $NESTEDFS1FILE
217 }
218
219 function test_verify_pre_checkpoint_state
220 {
221 log_must zfs list $FS0
222 log_must zfs list $FS1
223 log_must [ "$(cat $FS0FILE)" = "$FILECONTENTS0" ]
224 log_must [ "$(cat $FS1FILE)" = "$FILECONTENTS1" ]
225
226 #
227 # If we've opened the checkpointed state of the
228 # pool as read-only without rewinding on-disk we
229 # can't really use zdb on it.
230 #
231 if [[ "$1" != "ro-check" ]] ; then
232 log_must zdb $TESTPOOL
233 fi
234
235 #
236 # Ensure post-checkpoint state is not present
237 #
238 log_mustnot zfs list $FS2
239 log_mustnot [ "$(cat $FS0FILE)" = "$NEWFILECONTENTS0" ]
240 }
241
242 function nested_verify_pre_checkpoint_state
243 {
244 log_must zfs list $NESTEDFS0
245 log_must zfs list $NESTEDFS1
246 log_must [ "$(cat $NESTEDFS0FILE)" = "$FILECONTENTS0" ]
247 log_must [ "$(cat $NESTEDFS1FILE)" = "$FILECONTENTS1" ]
248
249 #
250 # If we've opened the checkpointed state of the
251 # pool as read-only without rewinding on-disk we
252 # can't really use zdb on it.
253 #
254 if [[ "$1" != "ro-check" ]] ; then
255 log_must zdb $NESTEDPOOL
256 fi
257
258 #
259 # Ensure post-checkpoint state is not present
260 #
261 log_mustnot zfs list $NESTEDFS2
262 log_mustnot [ "$(cat $NESTEDFS0FILE)" = "$NEWFILECONTENTS0" ]
263 }
264
265 function test_change_state_after_checkpoint
266 {
267 log_must zfs destroy $FS1
268 log_must zfs create -o compression=lz4 -o recordsize=8k $FS2
269
270 echo $NEWFILECONTENTS0 > $FS0FILE
271 echo $NEWFILECONTENTS2 > $FS2FILE
272 }
273
274 function nested_change_state_after_checkpoint
275 {
276 log_must zfs destroy $NESTEDFS1
277 log_must zfs create -o compression=lz4 -o recordsize=8k $NESTEDFS2
278
279 echo $NEWFILECONTENTS0 > $NESTEDFS0FILE
280 echo $NEWFILECONTENTS2 > $NESTEDFS2FILE
281 }
282
283 function test_verify_post_checkpoint_state
284 {
285 log_must zfs list $FS0
286 log_must zfs list $FS2
287 log_must [ "$(cat $FS0FILE)" = "$NEWFILECONTENTS0" ]
288 log_must [ "$(cat $FS2FILE)" = "$NEWFILECONTENTS2" ]
289
290 log_must zdb $TESTPOOL
291
292 #
293 # Ensure pre-checkpointed state that was removed post-checkpoint
294 # is not present
295 #
296 log_mustnot zfs list $FS1
297 log_mustnot [ "$(cat $FS0FILE)" = "$FILECONTENTS0" ]
298 }
299
300 function fragment_before_checkpoint
301 {
302 populate_nested_pool
303 log_must mkfile -n $BIGFILESIZE $NESTEDFS0FILE
304 log_must mkfile -n $BIGFILESIZE $NESTEDFS1FILE
305 log_must randwritecomp $NESTEDFS0FILE $RANDOMWRITES
306 log_must randwritecomp $NESTEDFS1FILE $RANDOMWRITES
307
308 #
309 # Display fragmentation on test log
310 #
311 log_must zpool list -v
312 }
313
314 function fragment_after_checkpoint_and_verify
315 {
316 log_must zfs destroy $NESTEDFS1
317 log_must zfs create -o compression=lz4 -o recordsize=8k $NESTEDFS2
318 log_must mkfile -n $BIGFILESIZE $NESTEDFS2FILE
319 log_must randwritecomp $NESTEDFS0FILE $RANDOMWRITES
320 log_must randwritecomp $NESTEDFS2FILE $RANDOMWRITES
321
322 #
323 # Display fragmentation on test log
324 #
325 log_must zpool list -v
326
327 log_must zdb $NESTEDPOOL
328 log_must zdb -kc $NESTEDPOOL
329 }
330
331 function wait_discard_finish
332 {
333 typeset pool="$1"
334
335 typeset status
336 status=$(zpool status $pool | grep "checkpoint:")
337 while [ "" != "$status" ]; do
338 sleep 5
339 status=$(zpool status $pool | grep "checkpoint:")
340 done
341 }
342
343 function test_wait_discard_finish
344 {
345 wait_discard_finish $TESTPOOL
346 }
347
348 function nested_wait_discard_finish
349 {
350 wait_discard_finish $NESTEDPOOL
351 }
352
353 #
354 # Creating the setup for the second group of tests mentioned in
355 # block comment of this file can take some time as we are doing
356 # random writes to raise capacity and fragmentation before taking
357 # the checkpoint. Thus we create this setup once and save the
358 # disks of the nested pool in a temporary directory where we can
359 # reuse it for each test that requires that setup.
360 #
361 SAVEDPOOLDIR="$TEST_BASE_DIR/ckpoint_saved_pool"
362
363 function test_group_premake_nested_pools
364 {
365 setup_nested_pools
366
367 #
368 # Populate and fragment the pool.
369 #
370 fragment_before_checkpoint
371
372 #
373 # Export and save the pool for other tests.
374 #
375 log_must zpool export $NESTEDPOOL
376 log_must mkdir $SAVEDPOOLDIR
377 log_must cp $FILEDISKS $SAVEDPOOLDIR
378
379 #
380 # Reimport pool to be destroyed by
381 # cleanup_nested_pools function
382 #
383 log_must zpool import -d $FILEDISKDIR $NESTEDPOOL
384 }
385
386 function test_group_destroy_saved_pool
387 {
388 log_must rm -rf $SAVEDPOOLDIR
389 }
390
391 #
392 # Recreate nested pool setup from saved pool.
393 #
394 function setup_nested_pool_state
395 {
396 setup_test_pool
397
398 log_must zfs create $DISKFS
399 log_must cp $SAVEDPOOLDIR/* $FILEDISKDIR
400
401 log_must zpool import -d $FILEDISKDIR $NESTEDPOOL
402 }