4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011 by Delphix. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
28 * The objective of this program is to provide a DMU/ZAP/SPA stress test
29 * that runs entirely in userland, is easy to use, and easy to extend.
31 * The overall design of the ztest program is as follows:
33 * (1) For each major functional area (e.g. adding vdevs to a pool,
34 * creating and destroying datasets, reading and writing objects, etc)
35 * we have a simple routine to test that functionality. These
36 * individual routines do not have to do anything "stressful".
38 * (2) We turn these simple functionality tests into a stress test by
39 * running them all in parallel, with as many threads as desired,
40 * and spread across as many datasets, objects, and vdevs as desired.
42 * (3) While all this is happening, we inject faults into the pool to
43 * verify that self-healing data really works.
45 * (4) Every time we open a dataset, we change its checksum and compression
46 * functions. Thus even individual objects vary from block to block
47 * in which checksum they use and whether they're compressed.
49 * (5) To verify that we never lose on-disk consistency after a crash,
50 * we run the entire test in a child of the main process.
51 * At random times, the child self-immolates with a SIGKILL.
52 * This is the software equivalent of pulling the power cord.
53 * The parent then runs the test again, using the existing
54 * storage pool, as many times as desired.
56 * (6) To verify that we don't have future leaks or temporal incursions,
57 * many of the functional tests record the transaction group number
58 * as part of their data. When reading old data, they verify that
59 * the transaction group number is less than the current, open txg.
60 * If you add a new test, please do this if applicable.
62 * (7) Threads are created with a reduced stack size, for sanity checking.
63 * Therefore, it's important not to allocate huge buffers on the stack.
65 * When run with no arguments, ztest runs for about five minutes and
66 * produces no output if successful. To get a little bit of information,
67 * specify -V. To get more information, specify -VV, and so on.
69 * To turn this into an overnight stress test, use -T to specify run time.
71 * You can ask more more vdevs [-v], datasets [-d], or threads [-t]
72 * to increase the pool capacity, fanout, and overall stress level.
74 * The -N(okill) option will suppress kills, so each child runs to completion.
75 * This can be useful when you're trying to distinguish temporal incursions
76 * from plain old race conditions.
79 #include <sys/zfs_context.h>
85 #include <sys/dmu_objset.h>
91 #include <sys/resource.h>
94 #include <sys/zil_impl.h>
95 #include <sys/vdev_impl.h>
96 #include <sys/vdev_file.h>
97 #include <sys/spa_impl.h>
98 #include <sys/metaslab_impl.h>
99 #include <sys/dsl_prop.h>
100 #include <sys/dsl_dataset.h>
101 #include <sys/dsl_scan.h>
102 #include <sys/zio_checksum.h>
103 #include <sys/refcount.h>
105 #include <stdio_ext.h>
113 #include <sys/fs/zfs.h>
114 #include <libnvpair.h>
116 static char cmdname
[] = "ztest";
117 static char *zopt_pool
= cmdname
;
119 static uint64_t zopt_vdevs
= 5;
120 static uint64_t zopt_vdevtime
;
121 static int zopt_ashift
= SPA_MINBLOCKSHIFT
;
122 static int zopt_mirrors
= 2;
123 static int zopt_raidz
= 4;
124 static int zopt_raidz_parity
= 1;
125 static size_t zopt_vdev_size
= SPA_MINDEVSIZE
;
126 static int zopt_datasets
= 7;
127 static int zopt_threads
= 23;
128 static uint64_t zopt_passtime
= 60; /* 60 seconds */
129 static uint64_t zopt_killrate
= 70; /* 70% kill rate */
130 static int zopt_verbose
= 0;
131 static int zopt_init
= 1;
132 static char *zopt_dir
= "/tmp";
133 static uint64_t zopt_time
= 300; /* 5 minutes */
134 static uint64_t zopt_maxloops
= 50; /* max loops during spa_freeze() */
136 #define BT_MAGIC 0x123456789abcdefULL
137 #define MAXFAULTS() (MAX(zs->zs_mirrors, 1) * (zopt_raidz_parity + 1) - 1)
141 ZTEST_IO_WRITE_PATTERN
,
142 ZTEST_IO_WRITE_ZEROES
,
148 typedef struct ztest_block_tag
{
158 typedef struct bufwad
{
165 * XXX -- fix zfs range locks to be generic so we can use them here.
187 #define ZTEST_RANGE_LOCKS 64
188 #define ZTEST_OBJECT_LOCKS 64
191 * Object descriptor. Used as a template for object lookup/create/remove.
193 typedef struct ztest_od
{
196 dmu_object_type_t od_type
;
197 dmu_object_type_t od_crtype
;
198 uint64_t od_blocksize
;
199 uint64_t od_crblocksize
;
202 char od_name
[MAXNAMELEN
];
208 typedef struct ztest_ds
{
210 krwlock_t zd_zilog_lock
;
213 ztest_od_t
*zd_od
; /* debugging aid */
214 char zd_name
[MAXNAMELEN
];
215 kmutex_t zd_dirobj_lock
;
216 rll_t zd_object_lock
[ZTEST_OBJECT_LOCKS
];
217 rll_t zd_range_lock
[ZTEST_RANGE_LOCKS
];
221 * Per-iteration state.
223 typedef void ztest_func_t(ztest_ds_t
*zd
, uint64_t id
);
225 typedef struct ztest_info
{
226 ztest_func_t
*zi_func
; /* test function */
227 uint64_t zi_iters
; /* iterations per execution */
228 uint64_t *zi_interval
; /* execute every <interval> seconds */
229 uint64_t zi_call_count
; /* per-pass count */
230 uint64_t zi_call_time
; /* per-pass time */
231 uint64_t zi_call_next
; /* next time to call this function */
235 * Note: these aren't static because we want dladdr() to work.
237 ztest_func_t ztest_dmu_read_write
;
238 ztest_func_t ztest_dmu_write_parallel
;
239 ztest_func_t ztest_dmu_object_alloc_free
;
240 ztest_func_t ztest_dmu_commit_callbacks
;
241 ztest_func_t ztest_zap
;
242 ztest_func_t ztest_zap_parallel
;
243 ztest_func_t ztest_zil_commit
;
244 ztest_func_t ztest_zil_remount
;
245 ztest_func_t ztest_dmu_read_write_zcopy
;
246 ztest_func_t ztest_dmu_objset_create_destroy
;
247 ztest_func_t ztest_dmu_prealloc
;
248 ztest_func_t ztest_fzap
;
249 ztest_func_t ztest_dmu_snapshot_create_destroy
;
250 ztest_func_t ztest_dsl_prop_get_set
;
251 ztest_func_t ztest_spa_prop_get_set
;
252 ztest_func_t ztest_spa_create_destroy
;
253 ztest_func_t ztest_fault_inject
;
254 ztest_func_t ztest_ddt_repair
;
255 ztest_func_t ztest_dmu_snapshot_hold
;
256 ztest_func_t ztest_spa_rename
;
257 ztest_func_t ztest_scrub
;
258 ztest_func_t ztest_dsl_dataset_promote_busy
;
259 ztest_func_t ztest_vdev_attach_detach
;
260 ztest_func_t ztest_vdev_LUN_growth
;
261 ztest_func_t ztest_vdev_add_remove
;
262 ztest_func_t ztest_vdev_aux_add_remove
;
263 ztest_func_t ztest_split_pool
;
264 ztest_func_t ztest_reguid
;
266 uint64_t zopt_always
= 0ULL * NANOSEC
; /* all the time */
267 uint64_t zopt_incessant
= 1ULL * NANOSEC
/ 10; /* every 1/10 second */
268 uint64_t zopt_often
= 1ULL * NANOSEC
; /* every second */
269 uint64_t zopt_sometimes
= 10ULL * NANOSEC
; /* every 10 seconds */
270 uint64_t zopt_rarely
= 60ULL * NANOSEC
; /* every 60 seconds */
272 ztest_info_t ztest_info
[] = {
273 { ztest_dmu_read_write
, 1, &zopt_always
},
274 { ztest_dmu_write_parallel
, 10, &zopt_always
},
275 { ztest_dmu_object_alloc_free
, 1, &zopt_always
},
276 { ztest_dmu_commit_callbacks
, 1, &zopt_always
},
277 { ztest_zap
, 30, &zopt_always
},
278 { ztest_zap_parallel
, 100, &zopt_always
},
279 { ztest_split_pool
, 1, &zopt_always
},
280 { ztest_zil_commit
, 1, &zopt_incessant
},
281 { ztest_zil_remount
, 1, &zopt_sometimes
},
282 { ztest_dmu_read_write_zcopy
, 1, &zopt_often
},
283 { ztest_dmu_objset_create_destroy
, 1, &zopt_often
},
284 { ztest_dsl_prop_get_set
, 1, &zopt_often
},
285 { ztest_spa_prop_get_set
, 1, &zopt_sometimes
},
287 { ztest_dmu_prealloc
, 1, &zopt_sometimes
},
289 { ztest_fzap
, 1, &zopt_sometimes
},
290 { ztest_dmu_snapshot_create_destroy
, 1, &zopt_sometimes
},
291 { ztest_spa_create_destroy
, 1, &zopt_sometimes
},
292 { ztest_fault_inject
, 1, &zopt_sometimes
},
293 { ztest_ddt_repair
, 1, &zopt_sometimes
},
294 { ztest_dmu_snapshot_hold
, 1, &zopt_sometimes
},
295 { ztest_reguid
, 1, &zopt_sometimes
},
296 { ztest_spa_rename
, 1, &zopt_rarely
},
297 { ztest_scrub
, 1, &zopt_rarely
},
298 { ztest_dsl_dataset_promote_busy
, 1, &zopt_rarely
},
299 { ztest_vdev_attach_detach
, 1, &zopt_rarely
},
300 { ztest_vdev_LUN_growth
, 1, &zopt_rarely
},
301 { ztest_vdev_add_remove
, 1, &zopt_vdevtime
},
302 { ztest_vdev_aux_add_remove
, 1, &zopt_vdevtime
},
305 #define ZTEST_FUNCS (sizeof (ztest_info) / sizeof (ztest_info_t))
308 * The following struct is used to hold a list of uncalled commit callbacks.
309 * The callbacks are ordered by txg number.
311 typedef struct ztest_cb_list
{
312 kmutex_t zcl_callbacks_lock
;
313 list_t zcl_callbacks
;
317 * Stuff we need to share writably between parent and child.
319 typedef struct ztest_shared
{
322 hrtime_t zs_proc_start
;
323 hrtime_t zs_proc_stop
;
324 hrtime_t zs_thread_start
;
325 hrtime_t zs_thread_stop
;
326 hrtime_t zs_thread_kill
;
327 uint64_t zs_enospc_count
;
328 uint64_t zs_vdev_next_leaf
;
329 uint64_t zs_vdev_aux
;
333 kmutex_t zs_vdev_lock
;
334 krwlock_t zs_name_lock
;
335 ztest_info_t zs_info
[ZTEST_FUNCS
];
341 #define ID_PARALLEL -1ULL
343 static char ztest_dev_template
[] = "%s/%s.%llua";
344 static char ztest_aux_template
[] = "%s/%s.%s.%llu";
345 ztest_shared_t
*ztest_shared
;
348 static int ztest_random_fd
;
349 static int ztest_dump_core
= 1;
351 static boolean_t ztest_exiting
;
353 /* Global commit callback list */
354 static ztest_cb_list_t zcl
;
355 /* Commit cb delay */
356 static uint64_t zc_min_txg_delay
= UINT64_MAX
;
357 static int zc_cb_counter
= 0;
360 * Minimum number of commit callbacks that need to be registered for us to check
361 * whether the minimum txg delay is acceptable.
363 #define ZTEST_COMMIT_CB_MIN_REG 100
366 * If a number of txgs equal to this threshold have been created after a commit
367 * callback has been registered but not called, then we assume there is an
368 * implementation bug.
370 #define ZTEST_COMMIT_CB_THRESH (TXG_CONCURRENT_STATES + 1000)
372 extern uint64_t metaslab_gang_bang
;
373 extern uint64_t metaslab_df_alloc_threshold
;
374 static uint64_t metaslab_sz
;
377 ZTEST_META_DNODE
= 0,
382 static void usage(boolean_t
) __NORETURN
;
385 * These libumem hooks provide a reasonable set of defaults for the allocator's
386 * debugging facilities.
389 _umem_debug_init(void)
391 return ("default,verbose"); /* $UMEM_DEBUG setting */
395 _umem_logging_init(void)
397 return ("fail,contents"); /* $UMEM_LOGGING setting */
400 #define FATAL_MSG_SZ 1024
405 fatal(int do_perror
, char *message
, ...)
408 int save_errno
= errno
;
411 (void) fflush(stdout
);
412 buf
= umem_alloc(FATAL_MSG_SZ
, UMEM_NOFAIL
);
414 va_start(args
, message
);
415 (void) sprintf(buf
, "ztest: ");
417 (void) vsprintf(buf
+ strlen(buf
), message
, args
);
420 (void) snprintf(buf
+ strlen(buf
), FATAL_MSG_SZ
- strlen(buf
),
421 ": %s", strerror(save_errno
));
423 (void) fprintf(stderr
, "%s\n", buf
);
424 fatal_msg
= buf
; /* to ease debugging */
431 str2shift(const char *buf
)
433 const char *ends
= "BKMGTPEZ";
438 for (i
= 0; i
< strlen(ends
); i
++) {
439 if (toupper(buf
[0]) == ends
[i
])
442 if (i
== strlen(ends
)) {
443 (void) fprintf(stderr
, "ztest: invalid bytes suffix: %s\n",
447 if (buf
[1] == '\0' || (toupper(buf
[1]) == 'B' && buf
[2] == '\0')) {
450 (void) fprintf(stderr
, "ztest: invalid bytes suffix: %s\n", buf
);
456 nicenumtoull(const char *buf
)
461 val
= strtoull(buf
, &end
, 0);
463 (void) fprintf(stderr
, "ztest: bad numeric value: %s\n", buf
);
465 } else if (end
[0] == '.') {
466 double fval
= strtod(buf
, &end
);
467 fval
*= pow(2, str2shift(end
));
468 if (fval
> UINT64_MAX
) {
469 (void) fprintf(stderr
, "ztest: value too large: %s\n",
473 val
= (uint64_t)fval
;
475 int shift
= str2shift(end
);
476 if (shift
>= 64 || (val
<< shift
) >> shift
!= val
) {
477 (void) fprintf(stderr
, "ztest: value too large: %s\n",
487 usage(boolean_t requested
)
489 char nice_vdev_size
[10];
490 char nice_gang_bang
[10];
491 FILE *fp
= requested
? stdout
: stderr
;
493 nicenum(zopt_vdev_size
, nice_vdev_size
);
494 nicenum(metaslab_gang_bang
, nice_gang_bang
);
496 (void) fprintf(fp
, "Usage: %s\n"
497 "\t[-v vdevs (default: %llu)]\n"
498 "\t[-s size_of_each_vdev (default: %s)]\n"
499 "\t[-a alignment_shift (default: %d)] use 0 for random\n"
500 "\t[-m mirror_copies (default: %d)]\n"
501 "\t[-r raidz_disks (default: %d)]\n"
502 "\t[-R raidz_parity (default: %d)]\n"
503 "\t[-d datasets (default: %d)]\n"
504 "\t[-t threads (default: %d)]\n"
505 "\t[-g gang_block_threshold (default: %s)]\n"
506 "\t[-i init_count (default: %d)] initialize pool i times\n"
507 "\t[-k kill_percentage (default: %llu%%)]\n"
508 "\t[-p pool_name (default: %s)]\n"
509 "\t[-f dir (default: %s)] file directory for vdev files\n"
510 "\t[-V] verbose (use multiple times for ever more blather)\n"
511 "\t[-E] use existing pool instead of creating new one\n"
512 "\t[-T time (default: %llu sec)] total run time\n"
513 "\t[-F freezeloops (default: %llu)] max loops in spa_freeze()\n"
514 "\t[-P passtime (default: %llu sec)] time per pass\n"
515 "\t[-h] (print help)\n"
518 (u_longlong_t
)zopt_vdevs
, /* -v */
519 nice_vdev_size
, /* -s */
520 zopt_ashift
, /* -a */
521 zopt_mirrors
, /* -m */
523 zopt_raidz_parity
, /* -R */
524 zopt_datasets
, /* -d */
525 zopt_threads
, /* -t */
526 nice_gang_bang
, /* -g */
528 (u_longlong_t
)zopt_killrate
, /* -k */
531 (u_longlong_t
)zopt_time
, /* -T */
532 (u_longlong_t
)zopt_maxloops
, /* -F */
533 (u_longlong_t
)zopt_passtime
); /* -P */
534 exit(requested
? 0 : 1);
538 process_options(int argc
, char **argv
)
543 /* By default, test gang blocks for blocks 32K and greater */
544 metaslab_gang_bang
= 32 << 10;
546 while ((opt
= getopt(argc
, argv
,
547 "v:s:a:m:r:R:d:t:g:i:k:p:f:VET:P:hF:")) != EOF
) {
564 value
= nicenumtoull(optarg
);
571 zopt_vdev_size
= MAX(SPA_MINDEVSIZE
, value
);
577 zopt_mirrors
= value
;
580 zopt_raidz
= MAX(1, value
);
583 zopt_raidz_parity
= MIN(MAX(value
, 1), 3);
586 zopt_datasets
= MAX(1, value
);
589 zopt_threads
= MAX(1, value
);
592 metaslab_gang_bang
= MAX(SPA_MINBLOCKSIZE
<< 1, value
);
598 zopt_killrate
= value
;
601 zopt_pool
= strdup(optarg
);
604 zopt_dir
= strdup(optarg
);
616 zopt_passtime
= MAX(1, value
);
619 zopt_maxloops
= MAX(1, value
);
631 zopt_raidz_parity
= MIN(zopt_raidz_parity
, zopt_raidz
- 1);
633 zopt_vdevtime
= (zopt_vdevs
> 0 ? zopt_time
* NANOSEC
/ zopt_vdevs
:
638 ztest_kill(ztest_shared_t
*zs
)
640 zs
->zs_alloc
= metaslab_class_get_alloc(spa_normal_class(zs
->zs_spa
));
641 zs
->zs_space
= metaslab_class_get_space(spa_normal_class(zs
->zs_spa
));
642 (void) kill(getpid(), SIGKILL
);
646 ztest_random(uint64_t range
)
653 if (read(ztest_random_fd
, &r
, sizeof (r
)) != sizeof (r
))
654 fatal(1, "short read from /dev/urandom");
661 ztest_record_enospc(const char *s
)
663 ztest_shared
->zs_enospc_count
++;
667 ztest_get_ashift(void)
669 if (zopt_ashift
== 0)
670 return (SPA_MINBLOCKSHIFT
+ ztest_random(3));
671 return (zopt_ashift
);
675 make_vdev_file(char *path
, char *aux
, size_t size
, uint64_t ashift
)
681 pathbuf
= umem_alloc(MAXPATHLEN
, UMEM_NOFAIL
);
684 ashift
= ztest_get_ashift();
690 vdev
= ztest_shared
->zs_vdev_aux
;
691 (void) sprintf(path
, ztest_aux_template
,
692 zopt_dir
, zopt_pool
, aux
, vdev
);
694 vdev
= ztest_shared
->zs_vdev_next_leaf
++;
695 (void) sprintf(path
, ztest_dev_template
,
696 zopt_dir
, zopt_pool
, vdev
);
701 int fd
= open(path
, O_RDWR
| O_CREAT
| O_TRUNC
, 0666);
703 fatal(1, "can't open %s", path
);
704 if (ftruncate(fd
, size
) != 0)
705 fatal(1, "can't ftruncate %s", path
);
709 VERIFY(nvlist_alloc(&file
, NV_UNIQUE_NAME
, 0) == 0);
710 VERIFY(nvlist_add_string(file
, ZPOOL_CONFIG_TYPE
, VDEV_TYPE_FILE
) == 0);
711 VERIFY(nvlist_add_string(file
, ZPOOL_CONFIG_PATH
, path
) == 0);
712 VERIFY(nvlist_add_uint64(file
, ZPOOL_CONFIG_ASHIFT
, ashift
) == 0);
713 umem_free(pathbuf
, MAXPATHLEN
);
719 make_vdev_raidz(char *path
, char *aux
, size_t size
, uint64_t ashift
, int r
)
721 nvlist_t
*raidz
, **child
;
725 return (make_vdev_file(path
, aux
, size
, ashift
));
726 child
= umem_alloc(r
* sizeof (nvlist_t
*), UMEM_NOFAIL
);
728 for (c
= 0; c
< r
; c
++)
729 child
[c
] = make_vdev_file(path
, aux
, size
, ashift
);
731 VERIFY(nvlist_alloc(&raidz
, NV_UNIQUE_NAME
, 0) == 0);
732 VERIFY(nvlist_add_string(raidz
, ZPOOL_CONFIG_TYPE
,
733 VDEV_TYPE_RAIDZ
) == 0);
734 VERIFY(nvlist_add_uint64(raidz
, ZPOOL_CONFIG_NPARITY
,
735 zopt_raidz_parity
) == 0);
736 VERIFY(nvlist_add_nvlist_array(raidz
, ZPOOL_CONFIG_CHILDREN
,
739 for (c
= 0; c
< r
; c
++)
740 nvlist_free(child
[c
]);
742 umem_free(child
, r
* sizeof (nvlist_t
*));
748 make_vdev_mirror(char *path
, char *aux
, size_t size
, uint64_t ashift
,
751 nvlist_t
*mirror
, **child
;
755 return (make_vdev_raidz(path
, aux
, size
, ashift
, r
));
757 child
= umem_alloc(m
* sizeof (nvlist_t
*), UMEM_NOFAIL
);
759 for (c
= 0; c
< m
; c
++)
760 child
[c
] = make_vdev_raidz(path
, aux
, size
, ashift
, r
);
762 VERIFY(nvlist_alloc(&mirror
, NV_UNIQUE_NAME
, 0) == 0);
763 VERIFY(nvlist_add_string(mirror
, ZPOOL_CONFIG_TYPE
,
764 VDEV_TYPE_MIRROR
) == 0);
765 VERIFY(nvlist_add_nvlist_array(mirror
, ZPOOL_CONFIG_CHILDREN
,
768 for (c
= 0; c
< m
; c
++)
769 nvlist_free(child
[c
]);
771 umem_free(child
, m
* sizeof (nvlist_t
*));
777 make_vdev_root(char *path
, char *aux
, size_t size
, uint64_t ashift
,
778 int log
, int r
, int m
, int t
)
780 nvlist_t
*root
, **child
;
785 child
= umem_alloc(t
* sizeof (nvlist_t
*), UMEM_NOFAIL
);
787 for (c
= 0; c
< t
; c
++) {
788 child
[c
] = make_vdev_mirror(path
, aux
, size
, ashift
, r
, m
);
789 VERIFY(nvlist_add_uint64(child
[c
], ZPOOL_CONFIG_IS_LOG
,
793 VERIFY(nvlist_alloc(&root
, NV_UNIQUE_NAME
, 0) == 0);
794 VERIFY(nvlist_add_string(root
, ZPOOL_CONFIG_TYPE
, VDEV_TYPE_ROOT
) == 0);
795 VERIFY(nvlist_add_nvlist_array(root
, aux
? aux
: ZPOOL_CONFIG_CHILDREN
,
798 for (c
= 0; c
< t
; c
++)
799 nvlist_free(child
[c
]);
801 umem_free(child
, t
* sizeof (nvlist_t
*));
807 ztest_random_blocksize(void)
809 return (1 << (SPA_MINBLOCKSHIFT
+
810 ztest_random(SPA_MAXBLOCKSHIFT
- SPA_MINBLOCKSHIFT
+ 1)));
814 ztest_random_ibshift(void)
816 return (DN_MIN_INDBLKSHIFT
+
817 ztest_random(DN_MAX_INDBLKSHIFT
- DN_MIN_INDBLKSHIFT
+ 1));
821 ztest_random_vdev_top(spa_t
*spa
, boolean_t log_ok
)
824 vdev_t
*rvd
= spa
->spa_root_vdev
;
827 ASSERT(spa_config_held(spa
, SCL_ALL
, RW_READER
) != 0);
830 top
= ztest_random(rvd
->vdev_children
);
831 tvd
= rvd
->vdev_child
[top
];
832 } while (tvd
->vdev_ishole
|| (tvd
->vdev_islog
&& !log_ok
) ||
833 tvd
->vdev_mg
== NULL
|| tvd
->vdev_mg
->mg_class
== NULL
);
839 ztest_random_dsl_prop(zfs_prop_t prop
)
844 value
= zfs_prop_random_value(prop
, ztest_random(-1ULL));
845 } while (prop
== ZFS_PROP_CHECKSUM
&& value
== ZIO_CHECKSUM_OFF
);
851 ztest_dsl_prop_set_uint64(char *osname
, zfs_prop_t prop
, uint64_t value
,
854 const char *propname
= zfs_prop_to_name(prop
);
860 error
= dsl_prop_set(osname
, propname
,
861 (inherit
? ZPROP_SRC_NONE
: ZPROP_SRC_LOCAL
),
862 sizeof (value
), 1, &value
);
864 if (error
== ENOSPC
) {
865 ztest_record_enospc(FTAG
);
868 ASSERT3U(error
, ==, 0);
870 setpoint
= umem_alloc(MAXPATHLEN
, UMEM_NOFAIL
);
871 VERIFY3U(dsl_prop_get(osname
, propname
, sizeof (curval
),
872 1, &curval
, setpoint
), ==, 0);
874 if (zopt_verbose
>= 6) {
875 VERIFY(zfs_prop_index_to_string(prop
, curval
, &valname
) == 0);
876 (void) printf("%s %s = %s at '%s'\n",
877 osname
, propname
, valname
, setpoint
);
879 umem_free(setpoint
, MAXPATHLEN
);
885 ztest_spa_prop_set_uint64(ztest_shared_t
*zs
, zpool_prop_t prop
, uint64_t value
)
887 spa_t
*spa
= zs
->zs_spa
;
888 nvlist_t
*props
= NULL
;
891 VERIFY(nvlist_alloc(&props
, NV_UNIQUE_NAME
, 0) == 0);
892 VERIFY(nvlist_add_uint64(props
, zpool_prop_to_name(prop
), value
) == 0);
894 error
= spa_prop_set(spa
, props
);
898 if (error
== ENOSPC
) {
899 ztest_record_enospc(FTAG
);
902 ASSERT3U(error
, ==, 0);
908 ztest_rll_init(rll_t
*rll
)
910 rll
->rll_writer
= NULL
;
911 rll
->rll_readers
= 0;
912 mutex_init(&rll
->rll_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
913 cv_init(&rll
->rll_cv
, NULL
, CV_DEFAULT
, NULL
);
917 ztest_rll_destroy(rll_t
*rll
)
919 ASSERT(rll
->rll_writer
== NULL
);
920 ASSERT(rll
->rll_readers
== 0);
921 mutex_destroy(&rll
->rll_lock
);
922 cv_destroy(&rll
->rll_cv
);
926 ztest_rll_lock(rll_t
*rll
, rl_type_t type
)
928 mutex_enter(&rll
->rll_lock
);
930 if (type
== RL_READER
) {
931 while (rll
->rll_writer
!= NULL
)
932 (void) cv_wait(&rll
->rll_cv
, &rll
->rll_lock
);
935 while (rll
->rll_writer
!= NULL
|| rll
->rll_readers
)
936 (void) cv_wait(&rll
->rll_cv
, &rll
->rll_lock
);
937 rll
->rll_writer
= curthread
;
940 mutex_exit(&rll
->rll_lock
);
944 ztest_rll_unlock(rll_t
*rll
)
946 mutex_enter(&rll
->rll_lock
);
948 if (rll
->rll_writer
) {
949 ASSERT(rll
->rll_readers
== 0);
950 rll
->rll_writer
= NULL
;
952 ASSERT(rll
->rll_readers
!= 0);
953 ASSERT(rll
->rll_writer
== NULL
);
957 if (rll
->rll_writer
== NULL
&& rll
->rll_readers
== 0)
958 cv_broadcast(&rll
->rll_cv
);
960 mutex_exit(&rll
->rll_lock
);
964 ztest_object_lock(ztest_ds_t
*zd
, uint64_t object
, rl_type_t type
)
966 rll_t
*rll
= &zd
->zd_object_lock
[object
& (ZTEST_OBJECT_LOCKS
- 1)];
968 ztest_rll_lock(rll
, type
);
972 ztest_object_unlock(ztest_ds_t
*zd
, uint64_t object
)
974 rll_t
*rll
= &zd
->zd_object_lock
[object
& (ZTEST_OBJECT_LOCKS
- 1)];
976 ztest_rll_unlock(rll
);
980 ztest_range_lock(ztest_ds_t
*zd
, uint64_t object
, uint64_t offset
,
981 uint64_t size
, rl_type_t type
)
983 uint64_t hash
= object
^ (offset
% (ZTEST_RANGE_LOCKS
+ 1));
984 rll_t
*rll
= &zd
->zd_range_lock
[hash
& (ZTEST_RANGE_LOCKS
- 1)];
987 rl
= umem_alloc(sizeof (*rl
), UMEM_NOFAIL
);
988 rl
->rl_object
= object
;
989 rl
->rl_offset
= offset
;
993 ztest_rll_lock(rll
, type
);
999 ztest_range_unlock(rl_t
*rl
)
1001 rll_t
*rll
= rl
->rl_lock
;
1003 ztest_rll_unlock(rll
);
1005 umem_free(rl
, sizeof (*rl
));
1009 ztest_zd_init(ztest_ds_t
*zd
, objset_t
*os
)
1012 zd
->zd_zilog
= dmu_objset_zil(os
);
1014 dmu_objset_name(os
, zd
->zd_name
);
1017 rw_init(&zd
->zd_zilog_lock
, NULL
, RW_DEFAULT
, NULL
);
1018 mutex_init(&zd
->zd_dirobj_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1020 for (l
= 0; l
< ZTEST_OBJECT_LOCKS
; l
++)
1021 ztest_rll_init(&zd
->zd_object_lock
[l
]);
1023 for (l
= 0; l
< ZTEST_RANGE_LOCKS
; l
++)
1024 ztest_rll_init(&zd
->zd_range_lock
[l
]);
1028 ztest_zd_fini(ztest_ds_t
*zd
)
1032 mutex_destroy(&zd
->zd_dirobj_lock
);
1033 rw_destroy(&zd
->zd_zilog_lock
);
1035 for (l
= 0; l
< ZTEST_OBJECT_LOCKS
; l
++)
1036 ztest_rll_destroy(&zd
->zd_object_lock
[l
]);
1038 for (l
= 0; l
< ZTEST_RANGE_LOCKS
; l
++)
1039 ztest_rll_destroy(&zd
->zd_range_lock
[l
]);
1042 #define TXG_MIGHTWAIT (ztest_random(10) == 0 ? TXG_NOWAIT : TXG_WAIT)
1045 ztest_tx_assign(dmu_tx_t
*tx
, uint64_t txg_how
, const char *tag
)
1051 * Attempt to assign tx to some transaction group.
1053 error
= dmu_tx_assign(tx
, txg_how
);
1055 if (error
== ERESTART
) {
1056 ASSERT(txg_how
== TXG_NOWAIT
);
1059 ASSERT3U(error
, ==, ENOSPC
);
1060 ztest_record_enospc(tag
);
1065 txg
= dmu_tx_get_txg(tx
);
1071 ztest_pattern_set(void *buf
, uint64_t size
, uint64_t value
)
1074 uint64_t *ip_end
= (uint64_t *)((uintptr_t)buf
+ (uintptr_t)size
);
1082 ztest_pattern_match(void *buf
, uint64_t size
, uint64_t value
)
1085 uint64_t *ip_end
= (uint64_t *)((uintptr_t)buf
+ (uintptr_t)size
);
1089 diff
|= (value
- *ip
++);
1096 ztest_bt_generate(ztest_block_tag_t
*bt
, objset_t
*os
, uint64_t object
,
1097 uint64_t offset
, uint64_t gen
, uint64_t txg
, uint64_t crtxg
)
1099 bt
->bt_magic
= BT_MAGIC
;
1100 bt
->bt_objset
= dmu_objset_id(os
);
1101 bt
->bt_object
= object
;
1102 bt
->bt_offset
= offset
;
1105 bt
->bt_crtxg
= crtxg
;
1109 ztest_bt_verify(ztest_block_tag_t
*bt
, objset_t
*os
, uint64_t object
,
1110 uint64_t offset
, uint64_t gen
, uint64_t txg
, uint64_t crtxg
)
1112 ASSERT(bt
->bt_magic
== BT_MAGIC
);
1113 ASSERT(bt
->bt_objset
== dmu_objset_id(os
));
1114 ASSERT(bt
->bt_object
== object
);
1115 ASSERT(bt
->bt_offset
== offset
);
1116 ASSERT(bt
->bt_gen
<= gen
);
1117 ASSERT(bt
->bt_txg
<= txg
);
1118 ASSERT(bt
->bt_crtxg
== crtxg
);
1121 static ztest_block_tag_t
*
1122 ztest_bt_bonus(dmu_buf_t
*db
)
1124 dmu_object_info_t doi
;
1125 ztest_block_tag_t
*bt
;
1127 dmu_object_info_from_db(db
, &doi
);
1128 ASSERT3U(doi
.doi_bonus_size
, <=, db
->db_size
);
1129 ASSERT3U(doi
.doi_bonus_size
, >=, sizeof (*bt
));
1130 bt
= (void *)((char *)db
->db_data
+ doi
.doi_bonus_size
- sizeof (*bt
));
1139 #define lrz_type lr_mode
1140 #define lrz_blocksize lr_uid
1141 #define lrz_ibshift lr_gid
1142 #define lrz_bonustype lr_rdev
1143 #define lrz_bonuslen lr_crtime[1]
1146 ztest_log_create(ztest_ds_t
*zd
, dmu_tx_t
*tx
, lr_create_t
*lr
)
1148 char *name
= (void *)(lr
+ 1); /* name follows lr */
1149 size_t namesize
= strlen(name
) + 1;
1152 if (zil_replaying(zd
->zd_zilog
, tx
))
1155 itx
= zil_itx_create(TX_CREATE
, sizeof (*lr
) + namesize
);
1156 bcopy(&lr
->lr_common
+ 1, &itx
->itx_lr
+ 1,
1157 sizeof (*lr
) + namesize
- sizeof (lr_t
));
1159 zil_itx_assign(zd
->zd_zilog
, itx
, tx
);
1163 ztest_log_remove(ztest_ds_t
*zd
, dmu_tx_t
*tx
, lr_remove_t
*lr
, uint64_t object
)
1165 char *name
= (void *)(lr
+ 1); /* name follows lr */
1166 size_t namesize
= strlen(name
) + 1;
1169 if (zil_replaying(zd
->zd_zilog
, tx
))
1172 itx
= zil_itx_create(TX_REMOVE
, sizeof (*lr
) + namesize
);
1173 bcopy(&lr
->lr_common
+ 1, &itx
->itx_lr
+ 1,
1174 sizeof (*lr
) + namesize
- sizeof (lr_t
));
1176 itx
->itx_oid
= object
;
1177 zil_itx_assign(zd
->zd_zilog
, itx
, tx
);
1181 ztest_log_write(ztest_ds_t
*zd
, dmu_tx_t
*tx
, lr_write_t
*lr
)
1184 itx_wr_state_t write_state
= ztest_random(WR_NUM_STATES
);
1186 if (zil_replaying(zd
->zd_zilog
, tx
))
1189 if (lr
->lr_length
> ZIL_MAX_LOG_DATA
)
1190 write_state
= WR_INDIRECT
;
1192 itx
= zil_itx_create(TX_WRITE
,
1193 sizeof (*lr
) + (write_state
== WR_COPIED
? lr
->lr_length
: 0));
1195 if (write_state
== WR_COPIED
&&
1196 dmu_read(zd
->zd_os
, lr
->lr_foid
, lr
->lr_offset
, lr
->lr_length
,
1197 ((lr_write_t
*)&itx
->itx_lr
) + 1, DMU_READ_NO_PREFETCH
) != 0) {
1198 zil_itx_destroy(itx
);
1199 itx
= zil_itx_create(TX_WRITE
, sizeof (*lr
));
1200 write_state
= WR_NEED_COPY
;
1202 itx
->itx_private
= zd
;
1203 itx
->itx_wr_state
= write_state
;
1204 itx
->itx_sync
= (ztest_random(8) == 0);
1205 itx
->itx_sod
+= (write_state
== WR_NEED_COPY
? lr
->lr_length
: 0);
1207 bcopy(&lr
->lr_common
+ 1, &itx
->itx_lr
+ 1,
1208 sizeof (*lr
) - sizeof (lr_t
));
1210 zil_itx_assign(zd
->zd_zilog
, itx
, tx
);
1214 ztest_log_truncate(ztest_ds_t
*zd
, dmu_tx_t
*tx
, lr_truncate_t
*lr
)
1218 if (zil_replaying(zd
->zd_zilog
, tx
))
1221 itx
= zil_itx_create(TX_TRUNCATE
, sizeof (*lr
));
1222 bcopy(&lr
->lr_common
+ 1, &itx
->itx_lr
+ 1,
1223 sizeof (*lr
) - sizeof (lr_t
));
1225 itx
->itx_sync
= B_FALSE
;
1226 zil_itx_assign(zd
->zd_zilog
, itx
, tx
);
1230 ztest_log_setattr(ztest_ds_t
*zd
, dmu_tx_t
*tx
, lr_setattr_t
*lr
)
1234 if (zil_replaying(zd
->zd_zilog
, tx
))
1237 itx
= zil_itx_create(TX_SETATTR
, sizeof (*lr
));
1238 bcopy(&lr
->lr_common
+ 1, &itx
->itx_lr
+ 1,
1239 sizeof (*lr
) - sizeof (lr_t
));
1241 itx
->itx_sync
= B_FALSE
;
1242 zil_itx_assign(zd
->zd_zilog
, itx
, tx
);
1249 ztest_replay_create(ztest_ds_t
*zd
, lr_create_t
*lr
, boolean_t byteswap
)
1251 char *name
= (void *)(lr
+ 1); /* name follows lr */
1252 objset_t
*os
= zd
->zd_os
;
1253 ztest_block_tag_t
*bbt
;
1260 byteswap_uint64_array(lr
, sizeof (*lr
));
1262 ASSERT(lr
->lr_doid
== ZTEST_DIROBJ
);
1263 ASSERT(name
[0] != '\0');
1265 tx
= dmu_tx_create(os
);
1267 dmu_tx_hold_zap(tx
, lr
->lr_doid
, B_TRUE
, name
);
1269 if (lr
->lrz_type
== DMU_OT_ZAP_OTHER
) {
1270 dmu_tx_hold_zap(tx
, DMU_NEW_OBJECT
, B_TRUE
, NULL
);
1272 dmu_tx_hold_bonus(tx
, DMU_NEW_OBJECT
);
1275 txg
= ztest_tx_assign(tx
, TXG_WAIT
, FTAG
);
1279 ASSERT(dmu_objset_zil(os
)->zl_replay
== !!lr
->lr_foid
);
1281 if (lr
->lrz_type
== DMU_OT_ZAP_OTHER
) {
1282 if (lr
->lr_foid
== 0) {
1283 lr
->lr_foid
= zap_create(os
,
1284 lr
->lrz_type
, lr
->lrz_bonustype
,
1285 lr
->lrz_bonuslen
, tx
);
1287 error
= zap_create_claim(os
, lr
->lr_foid
,
1288 lr
->lrz_type
, lr
->lrz_bonustype
,
1289 lr
->lrz_bonuslen
, tx
);
1292 if (lr
->lr_foid
== 0) {
1293 lr
->lr_foid
= dmu_object_alloc(os
,
1294 lr
->lrz_type
, 0, lr
->lrz_bonustype
,
1295 lr
->lrz_bonuslen
, tx
);
1297 error
= dmu_object_claim(os
, lr
->lr_foid
,
1298 lr
->lrz_type
, 0, lr
->lrz_bonustype
,
1299 lr
->lrz_bonuslen
, tx
);
1304 ASSERT3U(error
, ==, EEXIST
);
1305 ASSERT(zd
->zd_zilog
->zl_replay
);
1310 ASSERT(lr
->lr_foid
!= 0);
1312 if (lr
->lrz_type
!= DMU_OT_ZAP_OTHER
)
1313 VERIFY3U(0, ==, dmu_object_set_blocksize(os
, lr
->lr_foid
,
1314 lr
->lrz_blocksize
, lr
->lrz_ibshift
, tx
));
1316 VERIFY3U(0, ==, dmu_bonus_hold(os
, lr
->lr_foid
, FTAG
, &db
));
1317 bbt
= ztest_bt_bonus(db
);
1318 dmu_buf_will_dirty(db
, tx
);
1319 ztest_bt_generate(bbt
, os
, lr
->lr_foid
, -1ULL, lr
->lr_gen
, txg
, txg
);
1320 dmu_buf_rele(db
, FTAG
);
1322 VERIFY3U(0, ==, zap_add(os
, lr
->lr_doid
, name
, sizeof (uint64_t), 1,
1325 (void) ztest_log_create(zd
, tx
, lr
);
1333 ztest_replay_remove(ztest_ds_t
*zd
, lr_remove_t
*lr
, boolean_t byteswap
)
1335 char *name
= (void *)(lr
+ 1); /* name follows lr */
1336 objset_t
*os
= zd
->zd_os
;
1337 dmu_object_info_t doi
;
1339 uint64_t object
, txg
;
1342 byteswap_uint64_array(lr
, sizeof (*lr
));
1344 ASSERT(lr
->lr_doid
== ZTEST_DIROBJ
);
1345 ASSERT(name
[0] != '\0');
1348 zap_lookup(os
, lr
->lr_doid
, name
, sizeof (object
), 1, &object
));
1349 ASSERT(object
!= 0);
1351 ztest_object_lock(zd
, object
, RL_WRITER
);
1353 VERIFY3U(0, ==, dmu_object_info(os
, object
, &doi
));
1355 tx
= dmu_tx_create(os
);
1357 dmu_tx_hold_zap(tx
, lr
->lr_doid
, B_FALSE
, name
);
1358 dmu_tx_hold_free(tx
, object
, 0, DMU_OBJECT_END
);
1360 txg
= ztest_tx_assign(tx
, TXG_WAIT
, FTAG
);
1362 ztest_object_unlock(zd
, object
);
1366 if (doi
.doi_type
== DMU_OT_ZAP_OTHER
) {
1367 VERIFY3U(0, ==, zap_destroy(os
, object
, tx
));
1369 VERIFY3U(0, ==, dmu_object_free(os
, object
, tx
));
1372 VERIFY3U(0, ==, zap_remove(os
, lr
->lr_doid
, name
, tx
));
1374 (void) ztest_log_remove(zd
, tx
, lr
, object
);
1378 ztest_object_unlock(zd
, object
);
1384 ztest_replay_write(ztest_ds_t
*zd
, lr_write_t
*lr
, boolean_t byteswap
)
1386 objset_t
*os
= zd
->zd_os
;
1387 void *data
= lr
+ 1; /* data follows lr */
1388 uint64_t offset
, length
;
1389 ztest_block_tag_t
*bt
= data
;
1390 ztest_block_tag_t
*bbt
;
1391 uint64_t gen
, txg
, lrtxg
, crtxg
;
1392 dmu_object_info_t doi
;
1395 arc_buf_t
*abuf
= NULL
;
1399 byteswap_uint64_array(lr
, sizeof (*lr
));
1401 offset
= lr
->lr_offset
;
1402 length
= lr
->lr_length
;
1404 /* If it's a dmu_sync() block, write the whole block */
1405 if (lr
->lr_common
.lrc_reclen
== sizeof (lr_write_t
)) {
1406 uint64_t blocksize
= BP_GET_LSIZE(&lr
->lr_blkptr
);
1407 if (length
< blocksize
) {
1408 offset
-= offset
% blocksize
;
1413 if (bt
->bt_magic
== BSWAP_64(BT_MAGIC
))
1414 byteswap_uint64_array(bt
, sizeof (*bt
));
1416 if (bt
->bt_magic
!= BT_MAGIC
)
1419 ztest_object_lock(zd
, lr
->lr_foid
, RL_READER
);
1420 rl
= ztest_range_lock(zd
, lr
->lr_foid
, offset
, length
, RL_WRITER
);
1422 VERIFY3U(0, ==, dmu_bonus_hold(os
, lr
->lr_foid
, FTAG
, &db
));
1424 dmu_object_info_from_db(db
, &doi
);
1426 bbt
= ztest_bt_bonus(db
);
1427 ASSERT3U(bbt
->bt_magic
, ==, BT_MAGIC
);
1429 crtxg
= bbt
->bt_crtxg
;
1430 lrtxg
= lr
->lr_common
.lrc_txg
;
1432 tx
= dmu_tx_create(os
);
1434 dmu_tx_hold_write(tx
, lr
->lr_foid
, offset
, length
);
1436 if (ztest_random(8) == 0 && length
== doi
.doi_data_block_size
&&
1437 P2PHASE(offset
, length
) == 0)
1438 abuf
= dmu_request_arcbuf(db
, length
);
1440 txg
= ztest_tx_assign(tx
, TXG_WAIT
, FTAG
);
1443 dmu_return_arcbuf(abuf
);
1444 dmu_buf_rele(db
, FTAG
);
1445 ztest_range_unlock(rl
);
1446 ztest_object_unlock(zd
, lr
->lr_foid
);
1452 * Usually, verify the old data before writing new data --
1453 * but not always, because we also want to verify correct
1454 * behavior when the data was not recently read into cache.
1456 ASSERT(offset
% doi
.doi_data_block_size
== 0);
1457 if (ztest_random(4) != 0) {
1458 int prefetch
= ztest_random(2) ?
1459 DMU_READ_PREFETCH
: DMU_READ_NO_PREFETCH
;
1460 ztest_block_tag_t rbt
;
1462 VERIFY(dmu_read(os
, lr
->lr_foid
, offset
,
1463 sizeof (rbt
), &rbt
, prefetch
) == 0);
1464 if (rbt
.bt_magic
== BT_MAGIC
) {
1465 ztest_bt_verify(&rbt
, os
, lr
->lr_foid
,
1466 offset
, gen
, txg
, crtxg
);
1471 * Writes can appear to be newer than the bonus buffer because
1472 * the ztest_get_data() callback does a dmu_read() of the
1473 * open-context data, which may be different than the data
1474 * as it was when the write was generated.
1476 if (zd
->zd_zilog
->zl_replay
) {
1477 ztest_bt_verify(bt
, os
, lr
->lr_foid
, offset
,
1478 MAX(gen
, bt
->bt_gen
), MAX(txg
, lrtxg
),
1483 * Set the bt's gen/txg to the bonus buffer's gen/txg
1484 * so that all of the usual ASSERTs will work.
1486 ztest_bt_generate(bt
, os
, lr
->lr_foid
, offset
, gen
, txg
, crtxg
);
1490 dmu_write(os
, lr
->lr_foid
, offset
, length
, data
, tx
);
1492 bcopy(data
, abuf
->b_data
, length
);
1493 dmu_assign_arcbuf(db
, offset
, abuf
, tx
);
1496 (void) ztest_log_write(zd
, tx
, lr
);
1498 dmu_buf_rele(db
, FTAG
);
1502 ztest_range_unlock(rl
);
1503 ztest_object_unlock(zd
, lr
->lr_foid
);
1509 ztest_replay_truncate(ztest_ds_t
*zd
, lr_truncate_t
*lr
, boolean_t byteswap
)
1511 objset_t
*os
= zd
->zd_os
;
1517 byteswap_uint64_array(lr
, sizeof (*lr
));
1519 ztest_object_lock(zd
, lr
->lr_foid
, RL_READER
);
1520 rl
= ztest_range_lock(zd
, lr
->lr_foid
, lr
->lr_offset
, lr
->lr_length
,
1523 tx
= dmu_tx_create(os
);
1525 dmu_tx_hold_free(tx
, lr
->lr_foid
, lr
->lr_offset
, lr
->lr_length
);
1527 txg
= ztest_tx_assign(tx
, TXG_WAIT
, FTAG
);
1529 ztest_range_unlock(rl
);
1530 ztest_object_unlock(zd
, lr
->lr_foid
);
1534 VERIFY(dmu_free_range(os
, lr
->lr_foid
, lr
->lr_offset
,
1535 lr
->lr_length
, tx
) == 0);
1537 (void) ztest_log_truncate(zd
, tx
, lr
);
1541 ztest_range_unlock(rl
);
1542 ztest_object_unlock(zd
, lr
->lr_foid
);
1548 ztest_replay_setattr(ztest_ds_t
*zd
, lr_setattr_t
*lr
, boolean_t byteswap
)
1550 objset_t
*os
= zd
->zd_os
;
1553 ztest_block_tag_t
*bbt
;
1554 uint64_t txg
, lrtxg
, crtxg
;
1557 byteswap_uint64_array(lr
, sizeof (*lr
));
1559 ztest_object_lock(zd
, lr
->lr_foid
, RL_WRITER
);
1561 VERIFY3U(0, ==, dmu_bonus_hold(os
, lr
->lr_foid
, FTAG
, &db
));
1563 tx
= dmu_tx_create(os
);
1564 dmu_tx_hold_bonus(tx
, lr
->lr_foid
);
1566 txg
= ztest_tx_assign(tx
, TXG_WAIT
, FTAG
);
1568 dmu_buf_rele(db
, FTAG
);
1569 ztest_object_unlock(zd
, lr
->lr_foid
);
1573 bbt
= ztest_bt_bonus(db
);
1574 ASSERT3U(bbt
->bt_magic
, ==, BT_MAGIC
);
1575 crtxg
= bbt
->bt_crtxg
;
1576 lrtxg
= lr
->lr_common
.lrc_txg
;
1578 if (zd
->zd_zilog
->zl_replay
) {
1579 ASSERT(lr
->lr_size
!= 0);
1580 ASSERT(lr
->lr_mode
!= 0);
1584 * Randomly change the size and increment the generation.
1586 lr
->lr_size
= (ztest_random(db
->db_size
/ sizeof (*bbt
)) + 1) *
1588 lr
->lr_mode
= bbt
->bt_gen
+ 1;
1593 * Verify that the current bonus buffer is not newer than our txg.
1595 ztest_bt_verify(bbt
, os
, lr
->lr_foid
, -1ULL, lr
->lr_mode
,
1596 MAX(txg
, lrtxg
), crtxg
);
1598 dmu_buf_will_dirty(db
, tx
);
1600 ASSERT3U(lr
->lr_size
, >=, sizeof (*bbt
));
1601 ASSERT3U(lr
->lr_size
, <=, db
->db_size
);
1602 VERIFY3U(dmu_set_bonus(db
, lr
->lr_size
, tx
), ==, 0);
1603 bbt
= ztest_bt_bonus(db
);
1605 ztest_bt_generate(bbt
, os
, lr
->lr_foid
, -1ULL, lr
->lr_mode
, txg
, crtxg
);
1607 dmu_buf_rele(db
, FTAG
);
1609 (void) ztest_log_setattr(zd
, tx
, lr
);
1613 ztest_object_unlock(zd
, lr
->lr_foid
);
1618 zil_replay_func_t
*ztest_replay_vector
[TX_MAX_TYPE
] = {
1619 NULL
, /* 0 no such transaction type */
1620 (zil_replay_func_t
*)ztest_replay_create
, /* TX_CREATE */
1621 NULL
, /* TX_MKDIR */
1622 NULL
, /* TX_MKXATTR */
1623 NULL
, /* TX_SYMLINK */
1624 (zil_replay_func_t
*)ztest_replay_remove
, /* TX_REMOVE */
1625 NULL
, /* TX_RMDIR */
1627 NULL
, /* TX_RENAME */
1628 (zil_replay_func_t
*)ztest_replay_write
, /* TX_WRITE */
1629 (zil_replay_func_t
*)ztest_replay_truncate
, /* TX_TRUNCATE */
1630 (zil_replay_func_t
*)ztest_replay_setattr
, /* TX_SETATTR */
1632 NULL
, /* TX_CREATE_ACL */
1633 NULL
, /* TX_CREATE_ATTR */
1634 NULL
, /* TX_CREATE_ACL_ATTR */
1635 NULL
, /* TX_MKDIR_ACL */
1636 NULL
, /* TX_MKDIR_ATTR */
1637 NULL
, /* TX_MKDIR_ACL_ATTR */
1638 NULL
, /* TX_WRITE2 */
1642 * ZIL get_data callbacks
1646 ztest_get_done(zgd_t
*zgd
, int error
)
1648 ztest_ds_t
*zd
= zgd
->zgd_private
;
1649 uint64_t object
= zgd
->zgd_rl
->rl_object
;
1652 dmu_buf_rele(zgd
->zgd_db
, zgd
);
1654 ztest_range_unlock(zgd
->zgd_rl
);
1655 ztest_object_unlock(zd
, object
);
1657 if (error
== 0 && zgd
->zgd_bp
)
1658 zil_add_block(zgd
->zgd_zilog
, zgd
->zgd_bp
);
1660 umem_free(zgd
, sizeof (*zgd
));
1664 ztest_get_data(void *arg
, lr_write_t
*lr
, char *buf
, zio_t
*zio
)
1666 ztest_ds_t
*zd
= arg
;
1667 objset_t
*os
= zd
->zd_os
;
1668 uint64_t object
= lr
->lr_foid
;
1669 uint64_t offset
= lr
->lr_offset
;
1670 uint64_t size
= lr
->lr_length
;
1671 blkptr_t
*bp
= &lr
->lr_blkptr
;
1672 uint64_t txg
= lr
->lr_common
.lrc_txg
;
1674 dmu_object_info_t doi
;
1679 ztest_object_lock(zd
, object
, RL_READER
);
1680 error
= dmu_bonus_hold(os
, object
, FTAG
, &db
);
1682 ztest_object_unlock(zd
, object
);
1686 crtxg
= ztest_bt_bonus(db
)->bt_crtxg
;
1688 if (crtxg
== 0 || crtxg
> txg
) {
1689 dmu_buf_rele(db
, FTAG
);
1690 ztest_object_unlock(zd
, object
);
1694 dmu_object_info_from_db(db
, &doi
);
1695 dmu_buf_rele(db
, FTAG
);
1698 zgd
= umem_zalloc(sizeof (*zgd
), UMEM_NOFAIL
);
1699 zgd
->zgd_zilog
= zd
->zd_zilog
;
1700 zgd
->zgd_private
= zd
;
1702 if (buf
!= NULL
) { /* immediate write */
1703 zgd
->zgd_rl
= ztest_range_lock(zd
, object
, offset
, size
,
1706 error
= dmu_read(os
, object
, offset
, size
, buf
,
1707 DMU_READ_NO_PREFETCH
);
1710 size
= doi
.doi_data_block_size
;
1712 offset
= P2ALIGN(offset
, size
);
1714 ASSERT(offset
< size
);
1718 zgd
->zgd_rl
= ztest_range_lock(zd
, object
, offset
, size
,
1721 error
= dmu_buf_hold(os
, object
, offset
, zgd
, &db
,
1722 DMU_READ_NO_PREFETCH
);
1728 ASSERT(db
->db_offset
== offset
);
1729 ASSERT(db
->db_size
== size
);
1731 error
= dmu_sync(zio
, lr
->lr_common
.lrc_txg
,
1732 ztest_get_done
, zgd
);
1739 ztest_get_done(zgd
, error
);
1745 ztest_lr_alloc(size_t lrsize
, char *name
)
1748 size_t namesize
= name
? strlen(name
) + 1 : 0;
1750 lr
= umem_zalloc(lrsize
+ namesize
, UMEM_NOFAIL
);
1753 bcopy(name
, lr
+ lrsize
, namesize
);
1759 ztest_lr_free(void *lr
, size_t lrsize
, char *name
)
1761 size_t namesize
= name
? strlen(name
) + 1 : 0;
1763 umem_free(lr
, lrsize
+ namesize
);
1767 * Lookup a bunch of objects. Returns the number of objects not found.
1770 ztest_lookup(ztest_ds_t
*zd
, ztest_od_t
*od
, int count
)
1776 ASSERT(mutex_held(&zd
->zd_dirobj_lock
));
1778 for (i
= 0; i
< count
; i
++, od
++) {
1780 error
= zap_lookup(zd
->zd_os
, od
->od_dir
, od
->od_name
,
1781 sizeof (uint64_t), 1, &od
->od_object
);
1783 ASSERT(error
== ENOENT
);
1784 ASSERT(od
->od_object
== 0);
1788 ztest_block_tag_t
*bbt
;
1789 dmu_object_info_t doi
;
1791 ASSERT(od
->od_object
!= 0);
1792 ASSERT(missing
== 0); /* there should be no gaps */
1794 ztest_object_lock(zd
, od
->od_object
, RL_READER
);
1795 VERIFY3U(0, ==, dmu_bonus_hold(zd
->zd_os
,
1796 od
->od_object
, FTAG
, &db
));
1797 dmu_object_info_from_db(db
, &doi
);
1798 bbt
= ztest_bt_bonus(db
);
1799 ASSERT3U(bbt
->bt_magic
, ==, BT_MAGIC
);
1800 od
->od_type
= doi
.doi_type
;
1801 od
->od_blocksize
= doi
.doi_data_block_size
;
1802 od
->od_gen
= bbt
->bt_gen
;
1803 dmu_buf_rele(db
, FTAG
);
1804 ztest_object_unlock(zd
, od
->od_object
);
1812 ztest_create(ztest_ds_t
*zd
, ztest_od_t
*od
, int count
)
1817 ASSERT(mutex_held(&zd
->zd_dirobj_lock
));
1819 for (i
= 0; i
< count
; i
++, od
++) {
1826 lr_create_t
*lr
= ztest_lr_alloc(sizeof (*lr
), od
->od_name
);
1828 lr
->lr_doid
= od
->od_dir
;
1829 lr
->lr_foid
= 0; /* 0 to allocate, > 0 to claim */
1830 lr
->lrz_type
= od
->od_crtype
;
1831 lr
->lrz_blocksize
= od
->od_crblocksize
;
1832 lr
->lrz_ibshift
= ztest_random_ibshift();
1833 lr
->lrz_bonustype
= DMU_OT_UINT64_OTHER
;
1834 lr
->lrz_bonuslen
= dmu_bonus_max();
1835 lr
->lr_gen
= od
->od_crgen
;
1836 lr
->lr_crtime
[0] = time(NULL
);
1838 if (ztest_replay_create(zd
, lr
, B_FALSE
) != 0) {
1839 ASSERT(missing
== 0);
1843 od
->od_object
= lr
->lr_foid
;
1844 od
->od_type
= od
->od_crtype
;
1845 od
->od_blocksize
= od
->od_crblocksize
;
1846 od
->od_gen
= od
->od_crgen
;
1847 ASSERT(od
->od_object
!= 0);
1850 ztest_lr_free(lr
, sizeof (*lr
), od
->od_name
);
1857 ztest_remove(ztest_ds_t
*zd
, ztest_od_t
*od
, int count
)
1863 ASSERT(mutex_held(&zd
->zd_dirobj_lock
));
1867 for (i
= count
- 1; i
>= 0; i
--, od
--) {
1873 if (od
->od_object
== 0)
1876 lr_remove_t
*lr
= ztest_lr_alloc(sizeof (*lr
), od
->od_name
);
1878 lr
->lr_doid
= od
->od_dir
;
1880 if ((error
= ztest_replay_remove(zd
, lr
, B_FALSE
)) != 0) {
1881 ASSERT3U(error
, ==, ENOSPC
);
1886 ztest_lr_free(lr
, sizeof (*lr
), od
->od_name
);
1893 ztest_write(ztest_ds_t
*zd
, uint64_t object
, uint64_t offset
, uint64_t size
,
1899 lr
= ztest_lr_alloc(sizeof (*lr
) + size
, NULL
);
1901 lr
->lr_foid
= object
;
1902 lr
->lr_offset
= offset
;
1903 lr
->lr_length
= size
;
1905 BP_ZERO(&lr
->lr_blkptr
);
1907 bcopy(data
, lr
+ 1, size
);
1909 error
= ztest_replay_write(zd
, lr
, B_FALSE
);
1911 ztest_lr_free(lr
, sizeof (*lr
) + size
, NULL
);
1917 ztest_truncate(ztest_ds_t
*zd
, uint64_t object
, uint64_t offset
, uint64_t size
)
1922 lr
= ztest_lr_alloc(sizeof (*lr
), NULL
);
1924 lr
->lr_foid
= object
;
1925 lr
->lr_offset
= offset
;
1926 lr
->lr_length
= size
;
1928 error
= ztest_replay_truncate(zd
, lr
, B_FALSE
);
1930 ztest_lr_free(lr
, sizeof (*lr
), NULL
);
1936 ztest_setattr(ztest_ds_t
*zd
, uint64_t object
)
1941 lr
= ztest_lr_alloc(sizeof (*lr
), NULL
);
1943 lr
->lr_foid
= object
;
1947 error
= ztest_replay_setattr(zd
, lr
, B_FALSE
);
1949 ztest_lr_free(lr
, sizeof (*lr
), NULL
);
1955 ztest_prealloc(ztest_ds_t
*zd
, uint64_t object
, uint64_t offset
, uint64_t size
)
1957 objset_t
*os
= zd
->zd_os
;
1962 txg_wait_synced(dmu_objset_pool(os
), 0);
1964 ztest_object_lock(zd
, object
, RL_READER
);
1965 rl
= ztest_range_lock(zd
, object
, offset
, size
, RL_WRITER
);
1967 tx
= dmu_tx_create(os
);
1969 dmu_tx_hold_write(tx
, object
, offset
, size
);
1971 txg
= ztest_tx_assign(tx
, TXG_WAIT
, FTAG
);
1974 dmu_prealloc(os
, object
, offset
, size
, tx
);
1976 txg_wait_synced(dmu_objset_pool(os
), txg
);
1978 (void) dmu_free_long_range(os
, object
, offset
, size
);
1981 ztest_range_unlock(rl
);
1982 ztest_object_unlock(zd
, object
);
1986 ztest_io(ztest_ds_t
*zd
, uint64_t object
, uint64_t offset
)
1988 ztest_block_tag_t wbt
;
1989 dmu_object_info_t doi
;
1990 enum ztest_io_type io_type
;
1994 VERIFY(dmu_object_info(zd
->zd_os
, object
, &doi
) == 0);
1995 blocksize
= doi
.doi_data_block_size
;
1996 data
= umem_alloc(blocksize
, UMEM_NOFAIL
);
1999 * Pick an i/o type at random, biased toward writing block tags.
2001 io_type
= ztest_random(ZTEST_IO_TYPES
);
2002 if (ztest_random(2) == 0)
2003 io_type
= ZTEST_IO_WRITE_TAG
;
2005 (void) rw_enter(&zd
->zd_zilog_lock
, RW_READER
);
2009 case ZTEST_IO_WRITE_TAG
:
2010 ztest_bt_generate(&wbt
, zd
->zd_os
, object
, offset
, 0, 0, 0);
2011 (void) ztest_write(zd
, object
, offset
, sizeof (wbt
), &wbt
);
2014 case ZTEST_IO_WRITE_PATTERN
:
2015 (void) memset(data
, 'a' + (object
+ offset
) % 5, blocksize
);
2016 if (ztest_random(2) == 0) {
2018 * Induce fletcher2 collisions to ensure that
2019 * zio_ddt_collision() detects and resolves them
2020 * when using fletcher2-verify for deduplication.
2022 ((uint64_t *)data
)[0] ^= 1ULL << 63;
2023 ((uint64_t *)data
)[4] ^= 1ULL << 63;
2025 (void) ztest_write(zd
, object
, offset
, blocksize
, data
);
2028 case ZTEST_IO_WRITE_ZEROES
:
2029 bzero(data
, blocksize
);
2030 (void) ztest_write(zd
, object
, offset
, blocksize
, data
);
2033 case ZTEST_IO_TRUNCATE
:
2034 (void) ztest_truncate(zd
, object
, offset
, blocksize
);
2037 case ZTEST_IO_SETATTR
:
2038 (void) ztest_setattr(zd
, object
);
2044 (void) rw_exit(&zd
->zd_zilog_lock
);
2046 umem_free(data
, blocksize
);
2050 * Initialize an object description template.
2053 ztest_od_init(ztest_od_t
*od
, uint64_t id
, char *tag
, uint64_t index
,
2054 dmu_object_type_t type
, uint64_t blocksize
, uint64_t gen
)
2056 od
->od_dir
= ZTEST_DIROBJ
;
2059 od
->od_crtype
= type
;
2060 od
->od_crblocksize
= blocksize
? blocksize
: ztest_random_blocksize();
2063 od
->od_type
= DMU_OT_NONE
;
2064 od
->od_blocksize
= 0;
2067 (void) snprintf(od
->od_name
, sizeof (od
->od_name
), "%s(%lld)[%llu]",
2068 tag
, (longlong_t
)id
, (u_longlong_t
)index
);
2072 * Lookup or create the objects for a test using the od template.
2073 * If the objects do not all exist, or if 'remove' is specified,
2074 * remove any existing objects and create new ones. Otherwise,
2075 * use the existing objects.
2078 ztest_object_init(ztest_ds_t
*zd
, ztest_od_t
*od
, size_t size
, boolean_t remove
)
2080 int count
= size
/ sizeof (*od
);
2083 mutex_enter(&zd
->zd_dirobj_lock
);
2084 if ((ztest_lookup(zd
, od
, count
) != 0 || remove
) &&
2085 (ztest_remove(zd
, od
, count
) != 0 ||
2086 ztest_create(zd
, od
, count
) != 0))
2089 mutex_exit(&zd
->zd_dirobj_lock
);
2096 ztest_zil_commit(ztest_ds_t
*zd
, uint64_t id
)
2098 zilog_t
*zilog
= zd
->zd_zilog
;
2100 (void) rw_enter(&zd
->zd_zilog_lock
, RW_READER
);
2102 zil_commit(zilog
, ztest_random(ZTEST_OBJECTS
));
2105 * Remember the committed values in zd, which is in parent/child
2106 * shared memory. If we die, the next iteration of ztest_run()
2107 * will verify that the log really does contain this record.
2109 mutex_enter(&zilog
->zl_lock
);
2110 ASSERT(zd
->zd_seq
<= zilog
->zl_commit_lr_seq
);
2111 zd
->zd_seq
= zilog
->zl_commit_lr_seq
;
2112 mutex_exit(&zilog
->zl_lock
);
2114 (void) rw_exit(&zd
->zd_zilog_lock
);
2118 * This function is designed to simulate the operations that occur during a
2119 * mount/unmount operation. We hold the dataset across these operations in an
2120 * attempt to expose any implicit assumptions about ZIL management.
2124 ztest_zil_remount(ztest_ds_t
*zd
, uint64_t id
)
2126 objset_t
*os
= zd
->zd_os
;
2128 (void) rw_enter(&zd
->zd_zilog_lock
, RW_WRITER
);
2130 /* zfs_sb_teardown() */
2131 zil_close(zd
->zd_zilog
);
2133 /* zfsvfs_setup() */
2134 VERIFY(zil_open(os
, ztest_get_data
) == zd
->zd_zilog
);
2135 zil_replay(os
, zd
, ztest_replay_vector
);
2137 (void) rw_exit(&zd
->zd_zilog_lock
);
2141 * Verify that we can't destroy an active pool, create an existing pool,
2142 * or create a pool with a bad vdev spec.
2146 ztest_spa_create_destroy(ztest_ds_t
*zd
, uint64_t id
)
2148 ztest_shared_t
*zs
= ztest_shared
;
2153 * Attempt to create using a bad file.
2155 nvroot
= make_vdev_root("/dev/bogus", NULL
, 0, 0, 0, 0, 0, 1);
2156 VERIFY3U(ENOENT
, ==,
2157 spa_create("ztest_bad_file", nvroot
, NULL
, NULL
, NULL
));
2158 nvlist_free(nvroot
);
2161 * Attempt to create using a bad mirror.
2163 nvroot
= make_vdev_root("/dev/bogus", NULL
, 0, 0, 0, 0, 2, 1);
2164 VERIFY3U(ENOENT
, ==,
2165 spa_create("ztest_bad_mirror", nvroot
, NULL
, NULL
, NULL
));
2166 nvlist_free(nvroot
);
2169 * Attempt to create an existing pool. It shouldn't matter
2170 * what's in the nvroot; we should fail with EEXIST.
2172 (void) rw_enter(&zs
->zs_name_lock
, RW_READER
);
2173 nvroot
= make_vdev_root("/dev/bogus", NULL
, 0, 0, 0, 0, 0, 1);
2174 VERIFY3U(EEXIST
, ==, spa_create(zs
->zs_pool
, nvroot
, NULL
, NULL
, NULL
));
2175 nvlist_free(nvroot
);
2176 VERIFY3U(0, ==, spa_open(zs
->zs_pool
, &spa
, FTAG
));
2177 VERIFY3U(EBUSY
, ==, spa_destroy(zs
->zs_pool
));
2178 spa_close(spa
, FTAG
);
2180 (void) rw_exit(&zs
->zs_name_lock
);
2184 vdev_lookup_by_path(vdev_t
*vd
, const char *path
)
2189 if (vd
->vdev_path
!= NULL
&& strcmp(path
, vd
->vdev_path
) == 0)
2192 for (c
= 0; c
< vd
->vdev_children
; c
++)
2193 if ((mvd
= vdev_lookup_by_path(vd
->vdev_child
[c
], path
)) !=
2201 * Find the first available hole which can be used as a top-level.
2204 find_vdev_hole(spa_t
*spa
)
2206 vdev_t
*rvd
= spa
->spa_root_vdev
;
2209 ASSERT(spa_config_held(spa
, SCL_VDEV
, RW_READER
) == SCL_VDEV
);
2211 for (c
= 0; c
< rvd
->vdev_children
; c
++) {
2212 vdev_t
*cvd
= rvd
->vdev_child
[c
];
2214 if (cvd
->vdev_ishole
)
2221 * Verify that vdev_add() works as expected.
2225 ztest_vdev_add_remove(ztest_ds_t
*zd
, uint64_t id
)
2227 ztest_shared_t
*zs
= ztest_shared
;
2228 spa_t
*spa
= zs
->zs_spa
;
2234 mutex_enter(&zs
->zs_vdev_lock
);
2235 leaves
= MAX(zs
->zs_mirrors
+ zs
->zs_splits
, 1) * zopt_raidz
;
2237 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
2239 ztest_shared
->zs_vdev_next_leaf
= find_vdev_hole(spa
) * leaves
;
2242 * If we have slogs then remove them 1/4 of the time.
2244 if (spa_has_slogs(spa
) && ztest_random(4) == 0) {
2246 * Grab the guid from the head of the log class rotor.
2248 guid
= spa_log_class(spa
)->mc_rotor
->mg_vd
->vdev_guid
;
2250 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
2253 * We have to grab the zs_name_lock as writer to
2254 * prevent a race between removing a slog (dmu_objset_find)
2255 * and destroying a dataset. Removing the slog will
2256 * grab a reference on the dataset which may cause
2257 * dmu_objset_destroy() to fail with EBUSY thus
2258 * leaving the dataset in an inconsistent state.
2260 rw_enter(&ztest_shared
->zs_name_lock
, RW_WRITER
);
2261 error
= spa_vdev_remove(spa
, guid
, B_FALSE
);
2262 rw_exit(&ztest_shared
->zs_name_lock
);
2264 if (error
&& error
!= EEXIST
)
2265 fatal(0, "spa_vdev_remove() = %d", error
);
2267 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
2270 * Make 1/4 of the devices be log devices.
2272 nvroot
= make_vdev_root(NULL
, NULL
, zopt_vdev_size
, 0,
2273 ztest_random(4) == 0, zopt_raidz
, zs
->zs_mirrors
, 1);
2275 error
= spa_vdev_add(spa
, nvroot
);
2276 nvlist_free(nvroot
);
2278 if (error
== ENOSPC
)
2279 ztest_record_enospc("spa_vdev_add");
2280 else if (error
!= 0)
2281 fatal(0, "spa_vdev_add() = %d", error
);
2284 mutex_exit(&ztest_shared
->zs_vdev_lock
);
2288 * Verify that adding/removing aux devices (l2arc, hot spare) works as expected.
2292 ztest_vdev_aux_add_remove(ztest_ds_t
*zd
, uint64_t id
)
2294 ztest_shared_t
*zs
= ztest_shared
;
2295 spa_t
*spa
= zs
->zs_spa
;
2296 vdev_t
*rvd
= spa
->spa_root_vdev
;
2297 spa_aux_vdev_t
*sav
;
2303 path
= umem_alloc(MAXPATHLEN
, UMEM_NOFAIL
);
2305 if (ztest_random(2) == 0) {
2306 sav
= &spa
->spa_spares
;
2307 aux
= ZPOOL_CONFIG_SPARES
;
2309 sav
= &spa
->spa_l2cache
;
2310 aux
= ZPOOL_CONFIG_L2CACHE
;
2313 mutex_enter(&zs
->zs_vdev_lock
);
2315 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
2317 if (sav
->sav_count
!= 0 && ztest_random(4) == 0) {
2319 * Pick a random device to remove.
2321 guid
= sav
->sav_vdevs
[ztest_random(sav
->sav_count
)]->vdev_guid
;
2324 * Find an unused device we can add.
2326 zs
->zs_vdev_aux
= 0;
2329 (void) sprintf(path
, ztest_aux_template
, zopt_dir
,
2330 zopt_pool
, aux
, zs
->zs_vdev_aux
);
2331 for (c
= 0; c
< sav
->sav_count
; c
++)
2332 if (strcmp(sav
->sav_vdevs
[c
]->vdev_path
,
2335 if (c
== sav
->sav_count
&&
2336 vdev_lookup_by_path(rvd
, path
) == NULL
)
2342 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
2348 nvlist_t
*nvroot
= make_vdev_root(NULL
, aux
,
2349 (zopt_vdev_size
* 5) / 4, 0, 0, 0, 0, 1);
2350 error
= spa_vdev_add(spa
, nvroot
);
2352 fatal(0, "spa_vdev_add(%p) = %d", nvroot
, error
);
2353 nvlist_free(nvroot
);
2356 * Remove an existing device. Sometimes, dirty its
2357 * vdev state first to make sure we handle removal
2358 * of devices that have pending state changes.
2360 if (ztest_random(2) == 0)
2361 (void) vdev_online(spa
, guid
, 0, NULL
);
2363 error
= spa_vdev_remove(spa
, guid
, B_FALSE
);
2364 if (error
!= 0 && error
!= EBUSY
)
2365 fatal(0, "spa_vdev_remove(%llu) = %d", guid
, error
);
2368 mutex_exit(&zs
->zs_vdev_lock
);
2370 umem_free(path
, MAXPATHLEN
);
2374 * split a pool if it has mirror tlvdevs
2378 ztest_split_pool(ztest_ds_t
*zd
, uint64_t id
)
2380 ztest_shared_t
*zs
= ztest_shared
;
2381 spa_t
*spa
= zs
->zs_spa
;
2382 vdev_t
*rvd
= spa
->spa_root_vdev
;
2383 nvlist_t
*tree
, **child
, *config
, *split
, **schild
;
2384 uint_t c
, children
, schildren
= 0, lastlogid
= 0;
2387 mutex_enter(&zs
->zs_vdev_lock
);
2389 /* ensure we have a useable config; mirrors of raidz aren't supported */
2390 if (zs
->zs_mirrors
< 3 || zopt_raidz
> 1) {
2391 mutex_exit(&zs
->zs_vdev_lock
);
2395 /* clean up the old pool, if any */
2396 (void) spa_destroy("splitp");
2398 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
2400 /* generate a config from the existing config */
2401 mutex_enter(&spa
->spa_props_lock
);
2402 VERIFY(nvlist_lookup_nvlist(spa
->spa_config
, ZPOOL_CONFIG_VDEV_TREE
,
2404 mutex_exit(&spa
->spa_props_lock
);
2406 VERIFY(nvlist_lookup_nvlist_array(tree
, ZPOOL_CONFIG_CHILDREN
, &child
,
2409 schild
= malloc(rvd
->vdev_children
* sizeof (nvlist_t
*));
2410 for (c
= 0; c
< children
; c
++) {
2411 vdev_t
*tvd
= rvd
->vdev_child
[c
];
2415 if (tvd
->vdev_islog
|| tvd
->vdev_ops
== &vdev_hole_ops
) {
2416 VERIFY(nvlist_alloc(&schild
[schildren
], NV_UNIQUE_NAME
,
2418 VERIFY(nvlist_add_string(schild
[schildren
],
2419 ZPOOL_CONFIG_TYPE
, VDEV_TYPE_HOLE
) == 0);
2420 VERIFY(nvlist_add_uint64(schild
[schildren
],
2421 ZPOOL_CONFIG_IS_HOLE
, 1) == 0);
2423 lastlogid
= schildren
;
2428 VERIFY(nvlist_lookup_nvlist_array(child
[c
],
2429 ZPOOL_CONFIG_CHILDREN
, &mchild
, &mchildren
) == 0);
2430 VERIFY(nvlist_dup(mchild
[0], &schild
[schildren
++], 0) == 0);
2433 /* OK, create a config that can be used to split */
2434 VERIFY(nvlist_alloc(&split
, NV_UNIQUE_NAME
, 0) == 0);
2435 VERIFY(nvlist_add_string(split
, ZPOOL_CONFIG_TYPE
,
2436 VDEV_TYPE_ROOT
) == 0);
2437 VERIFY(nvlist_add_nvlist_array(split
, ZPOOL_CONFIG_CHILDREN
, schild
,
2438 lastlogid
!= 0 ? lastlogid
: schildren
) == 0);
2440 VERIFY(nvlist_alloc(&config
, NV_UNIQUE_NAME
, 0) == 0);
2441 VERIFY(nvlist_add_nvlist(config
, ZPOOL_CONFIG_VDEV_TREE
, split
) == 0);
2443 for (c
= 0; c
< schildren
; c
++)
2444 nvlist_free(schild
[c
]);
2448 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
2450 (void) rw_enter(&zs
->zs_name_lock
, RW_WRITER
);
2451 error
= spa_vdev_split_mirror(spa
, "splitp", config
, NULL
, B_FALSE
);
2452 (void) rw_exit(&zs
->zs_name_lock
);
2454 nvlist_free(config
);
2457 (void) printf("successful split - results:\n");
2458 mutex_enter(&spa_namespace_lock
);
2459 show_pool_stats(spa
);
2460 show_pool_stats(spa_lookup("splitp"));
2461 mutex_exit(&spa_namespace_lock
);
2465 mutex_exit(&zs
->zs_vdev_lock
);
2470 * Verify that we can attach and detach devices.
2474 ztest_vdev_attach_detach(ztest_ds_t
*zd
, uint64_t id
)
2476 ztest_shared_t
*zs
= ztest_shared
;
2477 spa_t
*spa
= zs
->zs_spa
;
2478 spa_aux_vdev_t
*sav
= &spa
->spa_spares
;
2479 vdev_t
*rvd
= spa
->spa_root_vdev
;
2480 vdev_t
*oldvd
, *newvd
, *pvd
;
2484 uint64_t ashift
= ztest_get_ashift();
2485 uint64_t oldguid
, pguid
;
2486 size_t oldsize
, newsize
;
2487 char *oldpath
, *newpath
;
2489 int oldvd_has_siblings
= B_FALSE
;
2490 int newvd_is_spare
= B_FALSE
;
2492 int error
, expected_error
;
2494 oldpath
= umem_alloc(MAXPATHLEN
, UMEM_NOFAIL
);
2495 newpath
= umem_alloc(MAXPATHLEN
, UMEM_NOFAIL
);
2497 mutex_enter(&zs
->zs_vdev_lock
);
2498 leaves
= MAX(zs
->zs_mirrors
, 1) * zopt_raidz
;
2500 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
2503 * Decide whether to do an attach or a replace.
2505 replacing
= ztest_random(2);
2508 * Pick a random top-level vdev.
2510 top
= ztest_random_vdev_top(spa
, B_TRUE
);
2513 * Pick a random leaf within it.
2515 leaf
= ztest_random(leaves
);
2520 oldvd
= rvd
->vdev_child
[top
];
2521 if (zs
->zs_mirrors
>= 1) {
2522 ASSERT(oldvd
->vdev_ops
== &vdev_mirror_ops
);
2523 ASSERT(oldvd
->vdev_children
>= zs
->zs_mirrors
);
2524 oldvd
= oldvd
->vdev_child
[leaf
/ zopt_raidz
];
2526 if (zopt_raidz
> 1) {
2527 ASSERT(oldvd
->vdev_ops
== &vdev_raidz_ops
);
2528 ASSERT(oldvd
->vdev_children
== zopt_raidz
);
2529 oldvd
= oldvd
->vdev_child
[leaf
% zopt_raidz
];
2533 * If we're already doing an attach or replace, oldvd may be a
2534 * mirror vdev -- in which case, pick a random child.
2536 while (oldvd
->vdev_children
!= 0) {
2537 oldvd_has_siblings
= B_TRUE
;
2538 ASSERT(oldvd
->vdev_children
>= 2);
2539 oldvd
= oldvd
->vdev_child
[ztest_random(oldvd
->vdev_children
)];
2542 oldguid
= oldvd
->vdev_guid
;
2543 oldsize
= vdev_get_min_asize(oldvd
);
2544 oldvd_is_log
= oldvd
->vdev_top
->vdev_islog
;
2545 (void) strcpy(oldpath
, oldvd
->vdev_path
);
2546 pvd
= oldvd
->vdev_parent
;
2547 pguid
= pvd
->vdev_guid
;
2550 * If oldvd has siblings, then half of the time, detach it.
2552 if (oldvd_has_siblings
&& ztest_random(2) == 0) {
2553 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
2554 error
= spa_vdev_detach(spa
, oldguid
, pguid
, B_FALSE
);
2555 if (error
!= 0 && error
!= ENODEV
&& error
!= EBUSY
&&
2557 fatal(0, "detach (%s) returned %d", oldpath
, error
);
2562 * For the new vdev, choose with equal probability between the two
2563 * standard paths (ending in either 'a' or 'b') or a random hot spare.
2565 if (sav
->sav_count
!= 0 && ztest_random(3) == 0) {
2566 newvd
= sav
->sav_vdevs
[ztest_random(sav
->sav_count
)];
2567 newvd_is_spare
= B_TRUE
;
2568 (void) strcpy(newpath
, newvd
->vdev_path
);
2570 (void) snprintf(newpath
, MAXPATHLEN
, ztest_dev_template
,
2571 zopt_dir
, zopt_pool
, top
* leaves
+ leaf
);
2572 if (ztest_random(2) == 0)
2573 newpath
[strlen(newpath
) - 1] = 'b';
2574 newvd
= vdev_lookup_by_path(rvd
, newpath
);
2578 newsize
= vdev_get_min_asize(newvd
);
2581 * Make newsize a little bigger or smaller than oldsize.
2582 * If it's smaller, the attach should fail.
2583 * If it's larger, and we're doing a replace,
2584 * we should get dynamic LUN growth when we're done.
2586 newsize
= 10 * oldsize
/ (9 + ztest_random(3));
2590 * If pvd is not a mirror or root, the attach should fail with ENOTSUP,
2591 * unless it's a replace; in that case any non-replacing parent is OK.
2593 * If newvd is already part of the pool, it should fail with EBUSY.
2595 * If newvd is too small, it should fail with EOVERFLOW.
2597 if (pvd
->vdev_ops
!= &vdev_mirror_ops
&&
2598 pvd
->vdev_ops
!= &vdev_root_ops
&& (!replacing
||
2599 pvd
->vdev_ops
== &vdev_replacing_ops
||
2600 pvd
->vdev_ops
== &vdev_spare_ops
))
2601 expected_error
= ENOTSUP
;
2602 else if (newvd_is_spare
&& (!replacing
|| oldvd_is_log
))
2603 expected_error
= ENOTSUP
;
2604 else if (newvd
== oldvd
)
2605 expected_error
= replacing
? 0 : EBUSY
;
2606 else if (vdev_lookup_by_path(rvd
, newpath
) != NULL
)
2607 expected_error
= EBUSY
;
2608 else if (newsize
< oldsize
)
2609 expected_error
= EOVERFLOW
;
2610 else if (ashift
> oldvd
->vdev_top
->vdev_ashift
)
2611 expected_error
= EDOM
;
2615 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
2618 * Build the nvlist describing newpath.
2620 root
= make_vdev_root(newpath
, NULL
, newvd
== NULL
? newsize
: 0,
2621 ashift
, 0, 0, 0, 1);
2623 error
= spa_vdev_attach(spa
, oldguid
, root
, replacing
);
2628 * If our parent was the replacing vdev, but the replace completed,
2629 * then instead of failing with ENOTSUP we may either succeed,
2630 * fail with ENODEV, or fail with EOVERFLOW.
2632 if (expected_error
== ENOTSUP
&&
2633 (error
== 0 || error
== ENODEV
|| error
== EOVERFLOW
))
2634 expected_error
= error
;
2637 * If someone grew the LUN, the replacement may be too small.
2639 if (error
== EOVERFLOW
|| error
== EBUSY
)
2640 expected_error
= error
;
2642 /* XXX workaround 6690467 */
2643 if (error
!= expected_error
&& expected_error
!= EBUSY
) {
2644 fatal(0, "attach (%s %llu, %s %llu, %d) "
2645 "returned %d, expected %d",
2646 oldpath
, (longlong_t
)oldsize
, newpath
,
2647 (longlong_t
)newsize
, replacing
, error
, expected_error
);
2650 mutex_exit(&zs
->zs_vdev_lock
);
2652 umem_free(oldpath
, MAXPATHLEN
);
2653 umem_free(newpath
, MAXPATHLEN
);
2657 * Callback function which expands the physical size of the vdev.
2660 grow_vdev(vdev_t
*vd
, void *arg
)
2662 ASSERTV(spa_t
*spa
= vd
->vdev_spa
);
2663 size_t *newsize
= arg
;
2667 ASSERT(spa_config_held(spa
, SCL_STATE
, RW_READER
) == SCL_STATE
);
2668 ASSERT(vd
->vdev_ops
->vdev_op_leaf
);
2670 if ((fd
= open(vd
->vdev_path
, O_RDWR
)) == -1)
2673 fsize
= lseek(fd
, 0, SEEK_END
);
2674 VERIFY(ftruncate(fd
, *newsize
) == 0);
2676 if (zopt_verbose
>= 6) {
2677 (void) printf("%s grew from %lu to %lu bytes\n",
2678 vd
->vdev_path
, (ulong_t
)fsize
, (ulong_t
)*newsize
);
2685 * Callback function which expands a given vdev by calling vdev_online().
2689 online_vdev(vdev_t
*vd
, void *arg
)
2691 spa_t
*spa
= vd
->vdev_spa
;
2692 vdev_t
*tvd
= vd
->vdev_top
;
2693 uint64_t guid
= vd
->vdev_guid
;
2694 uint64_t generation
= spa
->spa_config_generation
+ 1;
2695 vdev_state_t newstate
= VDEV_STATE_UNKNOWN
;
2698 ASSERT(spa_config_held(spa
, SCL_STATE
, RW_READER
) == SCL_STATE
);
2699 ASSERT(vd
->vdev_ops
->vdev_op_leaf
);
2701 /* Calling vdev_online will initialize the new metaslabs */
2702 spa_config_exit(spa
, SCL_STATE
, spa
);
2703 error
= vdev_online(spa
, guid
, ZFS_ONLINE_EXPAND
, &newstate
);
2704 spa_config_enter(spa
, SCL_STATE
, spa
, RW_READER
);
2707 * If vdev_online returned an error or the underlying vdev_open
2708 * failed then we abort the expand. The only way to know that
2709 * vdev_open fails is by checking the returned newstate.
2711 if (error
|| newstate
!= VDEV_STATE_HEALTHY
) {
2712 if (zopt_verbose
>= 5) {
2713 (void) printf("Unable to expand vdev, state %llu, "
2714 "error %d\n", (u_longlong_t
)newstate
, error
);
2718 ASSERT3U(newstate
, ==, VDEV_STATE_HEALTHY
);
2721 * Since we dropped the lock we need to ensure that we're
2722 * still talking to the original vdev. It's possible this
2723 * vdev may have been detached/replaced while we were
2724 * trying to online it.
2726 if (generation
!= spa
->spa_config_generation
) {
2727 if (zopt_verbose
>= 5) {
2728 (void) printf("vdev configuration has changed, "
2729 "guid %llu, state %llu, expected gen %llu, "
2732 (u_longlong_t
)tvd
->vdev_state
,
2733 (u_longlong_t
)generation
,
2734 (u_longlong_t
)spa
->spa_config_generation
);
2742 * Traverse the vdev tree calling the supplied function.
2743 * We continue to walk the tree until we either have walked all
2744 * children or we receive a non-NULL return from the callback.
2745 * If a NULL callback is passed, then we just return back the first
2746 * leaf vdev we encounter.
2749 vdev_walk_tree(vdev_t
*vd
, vdev_t
*(*func
)(vdev_t
*, void *), void *arg
)
2753 if (vd
->vdev_ops
->vdev_op_leaf
) {
2757 return (func(vd
, arg
));
2760 for (c
= 0; c
< vd
->vdev_children
; c
++) {
2761 vdev_t
*cvd
= vd
->vdev_child
[c
];
2762 if ((cvd
= vdev_walk_tree(cvd
, func
, arg
)) != NULL
)
2769 * Verify that dynamic LUN growth works as expected.
2773 ztest_vdev_LUN_growth(ztest_ds_t
*zd
, uint64_t id
)
2775 ztest_shared_t
*zs
= ztest_shared
;
2776 spa_t
*spa
= zs
->zs_spa
;
2778 metaslab_class_t
*mc
;
2779 metaslab_group_t
*mg
;
2780 size_t psize
, newsize
;
2782 uint64_t old_class_space
, new_class_space
, old_ms_count
, new_ms_count
;
2784 mutex_enter(&zs
->zs_vdev_lock
);
2785 spa_config_enter(spa
, SCL_STATE
, spa
, RW_READER
);
2787 top
= ztest_random_vdev_top(spa
, B_TRUE
);
2789 tvd
= spa
->spa_root_vdev
->vdev_child
[top
];
2792 old_ms_count
= tvd
->vdev_ms_count
;
2793 old_class_space
= metaslab_class_get_space(mc
);
2796 * Determine the size of the first leaf vdev associated with
2797 * our top-level device.
2799 vd
= vdev_walk_tree(tvd
, NULL
, NULL
);
2800 ASSERT3P(vd
, !=, NULL
);
2801 ASSERT(vd
->vdev_ops
->vdev_op_leaf
);
2803 psize
= vd
->vdev_psize
;
2806 * We only try to expand the vdev if it's healthy, less than 4x its
2807 * original size, and it has a valid psize.
2809 if (tvd
->vdev_state
!= VDEV_STATE_HEALTHY
||
2810 psize
== 0 || psize
>= 4 * zopt_vdev_size
) {
2811 spa_config_exit(spa
, SCL_STATE
, spa
);
2812 mutex_exit(&zs
->zs_vdev_lock
);
2816 newsize
= psize
+ psize
/ 8;
2817 ASSERT3U(newsize
, >, psize
);
2819 if (zopt_verbose
>= 6) {
2820 (void) printf("Expanding LUN %s from %lu to %lu\n",
2821 vd
->vdev_path
, (ulong_t
)psize
, (ulong_t
)newsize
);
2825 * Growing the vdev is a two step process:
2826 * 1). expand the physical size (i.e. relabel)
2827 * 2). online the vdev to create the new metaslabs
2829 if (vdev_walk_tree(tvd
, grow_vdev
, &newsize
) != NULL
||
2830 vdev_walk_tree(tvd
, online_vdev
, NULL
) != NULL
||
2831 tvd
->vdev_state
!= VDEV_STATE_HEALTHY
) {
2832 if (zopt_verbose
>= 5) {
2833 (void) printf("Could not expand LUN because "
2834 "the vdev configuration changed.\n");
2836 spa_config_exit(spa
, SCL_STATE
, spa
);
2837 mutex_exit(&zs
->zs_vdev_lock
);
2841 spa_config_exit(spa
, SCL_STATE
, spa
);
2844 * Expanding the LUN will update the config asynchronously,
2845 * thus we must wait for the async thread to complete any
2846 * pending tasks before proceeding.
2850 mutex_enter(&spa
->spa_async_lock
);
2851 done
= (spa
->spa_async_thread
== NULL
&& !spa
->spa_async_tasks
);
2852 mutex_exit(&spa
->spa_async_lock
);
2855 txg_wait_synced(spa_get_dsl(spa
), 0);
2856 (void) poll(NULL
, 0, 100);
2859 spa_config_enter(spa
, SCL_STATE
, spa
, RW_READER
);
2861 tvd
= spa
->spa_root_vdev
->vdev_child
[top
];
2862 new_ms_count
= tvd
->vdev_ms_count
;
2863 new_class_space
= metaslab_class_get_space(mc
);
2865 if (tvd
->vdev_mg
!= mg
|| mg
->mg_class
!= mc
) {
2866 if (zopt_verbose
>= 5) {
2867 (void) printf("Could not verify LUN expansion due to "
2868 "intervening vdev offline or remove.\n");
2870 spa_config_exit(spa
, SCL_STATE
, spa
);
2871 mutex_exit(&zs
->zs_vdev_lock
);
2876 * Make sure we were able to grow the vdev.
2878 if (new_ms_count
<= old_ms_count
)
2879 fatal(0, "LUN expansion failed: ms_count %llu <= %llu\n",
2880 old_ms_count
, new_ms_count
);
2883 * Make sure we were able to grow the pool.
2885 if (new_class_space
<= old_class_space
)
2886 fatal(0, "LUN expansion failed: class_space %llu <= %llu\n",
2887 old_class_space
, new_class_space
);
2889 if (zopt_verbose
>= 5) {
2890 char oldnumbuf
[6], newnumbuf
[6];
2892 nicenum(old_class_space
, oldnumbuf
);
2893 nicenum(new_class_space
, newnumbuf
);
2894 (void) printf("%s grew from %s to %s\n",
2895 spa
->spa_name
, oldnumbuf
, newnumbuf
);
2898 spa_config_exit(spa
, SCL_STATE
, spa
);
2899 mutex_exit(&zs
->zs_vdev_lock
);
2903 * Verify that dmu_objset_{create,destroy,open,close} work as expected.
2907 ztest_objset_create_cb(objset_t
*os
, void *arg
, cred_t
*cr
, dmu_tx_t
*tx
)
2910 * Create the objects common to all ztest datasets.
2912 VERIFY(zap_create_claim(os
, ZTEST_DIROBJ
,
2913 DMU_OT_ZAP_OTHER
, DMU_OT_NONE
, 0, tx
) == 0);
2917 ztest_dataset_create(char *dsname
)
2919 uint64_t zilset
= ztest_random(100);
2920 int err
= dmu_objset_create(dsname
, DMU_OST_OTHER
, 0,
2921 ztest_objset_create_cb
, NULL
);
2923 if (err
|| zilset
< 80)
2926 if (zopt_verbose
>= 5)
2927 (void) printf("Setting dataset %s to sync always\n", dsname
);
2928 return (ztest_dsl_prop_set_uint64(dsname
, ZFS_PROP_SYNC
,
2929 ZFS_SYNC_ALWAYS
, B_FALSE
));
2934 ztest_objset_destroy_cb(const char *name
, void *arg
)
2937 dmu_object_info_t doi
;
2941 * Verify that the dataset contains a directory object.
2943 VERIFY3U(0, ==, dmu_objset_hold(name
, FTAG
, &os
));
2944 error
= dmu_object_info(os
, ZTEST_DIROBJ
, &doi
);
2945 if (error
!= ENOENT
) {
2946 /* We could have crashed in the middle of destroying it */
2947 ASSERT3U(error
, ==, 0);
2948 ASSERT3U(doi
.doi_type
, ==, DMU_OT_ZAP_OTHER
);
2949 ASSERT3S(doi
.doi_physical_blocks_512
, >=, 0);
2951 dmu_objset_rele(os
, FTAG
);
2954 * Destroy the dataset.
2956 VERIFY3U(0, ==, dmu_objset_destroy(name
, B_FALSE
));
2961 ztest_snapshot_create(char *osname
, uint64_t id
)
2963 char snapname
[MAXNAMELEN
];
2966 (void) snprintf(snapname
, MAXNAMELEN
, "%s@%llu", osname
,
2969 error
= dmu_objset_snapshot(osname
, strchr(snapname
, '@') + 1,
2970 NULL
, NULL
, B_FALSE
, B_FALSE
, -1);
2971 if (error
== ENOSPC
) {
2972 ztest_record_enospc(FTAG
);
2975 if (error
!= 0 && error
!= EEXIST
)
2976 fatal(0, "ztest_snapshot_create(%s) = %d", snapname
, error
);
2981 ztest_snapshot_destroy(char *osname
, uint64_t id
)
2983 char snapname
[MAXNAMELEN
];
2986 (void) snprintf(snapname
, MAXNAMELEN
, "%s@%llu", osname
,
2989 error
= dmu_objset_destroy(snapname
, B_FALSE
);
2990 if (error
!= 0 && error
!= ENOENT
)
2991 fatal(0, "ztest_snapshot_destroy(%s) = %d", snapname
, error
);
2997 ztest_dmu_objset_create_destroy(ztest_ds_t
*zd
, uint64_t id
)
2999 ztest_shared_t
*zs
= ztest_shared
;
3008 zdtmp
= umem_alloc(sizeof (ztest_ds_t
), UMEM_NOFAIL
);
3009 name
= umem_alloc(MAXNAMELEN
, UMEM_NOFAIL
);
3011 (void) rw_enter(&zs
->zs_name_lock
, RW_READER
);
3013 (void) snprintf(name
, MAXNAMELEN
, "%s/temp_%llu",
3014 zs
->zs_pool
, (u_longlong_t
)id
);
3017 * If this dataset exists from a previous run, process its replay log
3018 * half of the time. If we don't replay it, then dmu_objset_destroy()
3019 * (invoked from ztest_objset_destroy_cb()) should just throw it away.
3021 if (ztest_random(2) == 0 &&
3022 dmu_objset_own(name
, DMU_OST_OTHER
, B_FALSE
, FTAG
, &os
) == 0) {
3023 ztest_zd_init(zdtmp
, os
);
3024 zil_replay(os
, zdtmp
, ztest_replay_vector
);
3025 ztest_zd_fini(zdtmp
);
3026 dmu_objset_disown(os
, FTAG
);
3030 * There may be an old instance of the dataset we're about to
3031 * create lying around from a previous run. If so, destroy it
3032 * and all of its snapshots.
3034 (void) dmu_objset_find(name
, ztest_objset_destroy_cb
, NULL
,
3035 DS_FIND_CHILDREN
| DS_FIND_SNAPSHOTS
);
3038 * Verify that the destroyed dataset is no longer in the namespace.
3040 VERIFY3U(ENOENT
, ==, dmu_objset_hold(name
, FTAG
, &os
));
3043 * Verify that we can create a new dataset.
3045 error
= ztest_dataset_create(name
);
3047 if (error
== ENOSPC
) {
3048 ztest_record_enospc(FTAG
);
3051 fatal(0, "dmu_objset_create(%s) = %d", name
, error
);
3055 dmu_objset_own(name
, DMU_OST_OTHER
, B_FALSE
, FTAG
, &os
));
3057 ztest_zd_init(zdtmp
, os
);
3060 * Open the intent log for it.
3062 zilog
= zil_open(os
, ztest_get_data
);
3065 * Put some objects in there, do a little I/O to them,
3066 * and randomly take a couple of snapshots along the way.
3068 iters
= ztest_random(5);
3069 for (i
= 0; i
< iters
; i
++) {
3070 ztest_dmu_object_alloc_free(zdtmp
, id
);
3071 if (ztest_random(iters
) == 0)
3072 (void) ztest_snapshot_create(name
, i
);
3076 * Verify that we cannot create an existing dataset.
3078 VERIFY3U(EEXIST
, ==,
3079 dmu_objset_create(name
, DMU_OST_OTHER
, 0, NULL
, NULL
));
3082 * Verify that we can hold an objset that is also owned.
3084 VERIFY3U(0, ==, dmu_objset_hold(name
, FTAG
, &os2
));
3085 dmu_objset_rele(os2
, FTAG
);
3088 * Verify that we cannot own an objset that is already owned.
3091 dmu_objset_own(name
, DMU_OST_OTHER
, B_FALSE
, FTAG
, &os2
));
3094 dmu_objset_disown(os
, FTAG
);
3095 ztest_zd_fini(zdtmp
);
3097 (void) rw_exit(&zs
->zs_name_lock
);
3099 umem_free(name
, MAXNAMELEN
);
3100 umem_free(zdtmp
, sizeof (ztest_ds_t
));
3104 * Verify that dmu_snapshot_{create,destroy,open,close} work as expected.
3107 ztest_dmu_snapshot_create_destroy(ztest_ds_t
*zd
, uint64_t id
)
3109 ztest_shared_t
*zs
= ztest_shared
;
3111 (void) rw_enter(&zs
->zs_name_lock
, RW_READER
);
3112 (void) ztest_snapshot_destroy(zd
->zd_name
, id
);
3113 (void) ztest_snapshot_create(zd
->zd_name
, id
);
3114 (void) rw_exit(&zs
->zs_name_lock
);
3118 * Cleanup non-standard snapshots and clones.
3121 ztest_dsl_dataset_cleanup(char *osname
, uint64_t id
)
3130 snap1name
= umem_alloc(MAXNAMELEN
, UMEM_NOFAIL
);
3131 clone1name
= umem_alloc(MAXNAMELEN
, UMEM_NOFAIL
);
3132 snap2name
= umem_alloc(MAXNAMELEN
, UMEM_NOFAIL
);
3133 clone2name
= umem_alloc(MAXNAMELEN
, UMEM_NOFAIL
);
3134 snap3name
= umem_alloc(MAXNAMELEN
, UMEM_NOFAIL
);
3136 (void) snprintf(snap1name
, MAXNAMELEN
, "%s@s1_%llu",
3137 osname
, (u_longlong_t
)id
);
3138 (void) snprintf(clone1name
, MAXNAMELEN
, "%s/c1_%llu",
3139 osname
, (u_longlong_t
)id
);
3140 (void) snprintf(snap2name
, MAXNAMELEN
, "%s@s2_%llu",
3141 clone1name
, (u_longlong_t
)id
);
3142 (void) snprintf(clone2name
, MAXNAMELEN
, "%s/c2_%llu",
3143 osname
, (u_longlong_t
)id
);
3144 (void) snprintf(snap3name
, MAXNAMELEN
, "%s@s3_%llu",
3145 clone1name
, (u_longlong_t
)id
);
3147 error
= dmu_objset_destroy(clone2name
, B_FALSE
);
3148 if (error
&& error
!= ENOENT
)
3149 fatal(0, "dmu_objset_destroy(%s) = %d", clone2name
, error
);
3150 error
= dmu_objset_destroy(snap3name
, B_FALSE
);
3151 if (error
&& error
!= ENOENT
)
3152 fatal(0, "dmu_objset_destroy(%s) = %d", snap3name
, error
);
3153 error
= dmu_objset_destroy(snap2name
, B_FALSE
);
3154 if (error
&& error
!= ENOENT
)
3155 fatal(0, "dmu_objset_destroy(%s) = %d", snap2name
, error
);
3156 error
= dmu_objset_destroy(clone1name
, B_FALSE
);
3157 if (error
&& error
!= ENOENT
)
3158 fatal(0, "dmu_objset_destroy(%s) = %d", clone1name
, error
);
3159 error
= dmu_objset_destroy(snap1name
, B_FALSE
);
3160 if (error
&& error
!= ENOENT
)
3161 fatal(0, "dmu_objset_destroy(%s) = %d", snap1name
, error
);
3163 umem_free(snap1name
, MAXNAMELEN
);
3164 umem_free(clone1name
, MAXNAMELEN
);
3165 umem_free(snap2name
, MAXNAMELEN
);
3166 umem_free(clone2name
, MAXNAMELEN
);
3167 umem_free(snap3name
, MAXNAMELEN
);
3171 * Verify dsl_dataset_promote handles EBUSY
3174 ztest_dsl_dataset_promote_busy(ztest_ds_t
*zd
, uint64_t id
)
3176 ztest_shared_t
*zs
= ztest_shared
;
3184 char *osname
= zd
->zd_name
;
3187 snap1name
= umem_alloc(MAXNAMELEN
, UMEM_NOFAIL
);
3188 clone1name
= umem_alloc(MAXNAMELEN
, UMEM_NOFAIL
);
3189 snap2name
= umem_alloc(MAXNAMELEN
, UMEM_NOFAIL
);
3190 clone2name
= umem_alloc(MAXNAMELEN
, UMEM_NOFAIL
);
3191 snap3name
= umem_alloc(MAXNAMELEN
, UMEM_NOFAIL
);
3193 (void) rw_enter(&zs
->zs_name_lock
, RW_READER
);
3195 ztest_dsl_dataset_cleanup(osname
, id
);
3197 (void) snprintf(snap1name
, MAXNAMELEN
, "%s@s1_%llu",
3198 osname
, (u_longlong_t
)id
);
3199 (void) snprintf(clone1name
, MAXNAMELEN
, "%s/c1_%llu",
3200 osname
, (u_longlong_t
)id
);
3201 (void) snprintf(snap2name
, MAXNAMELEN
, "%s@s2_%llu",
3202 clone1name
, (u_longlong_t
)id
);
3203 (void) snprintf(clone2name
, MAXNAMELEN
, "%s/c2_%llu",
3204 osname
, (u_longlong_t
)id
);
3205 (void) snprintf(snap3name
, MAXNAMELEN
, "%s@s3_%llu",
3206 clone1name
, (u_longlong_t
)id
);
3208 error
= dmu_objset_snapshot(osname
, strchr(snap1name
, '@')+1,
3209 NULL
, NULL
, B_FALSE
, B_FALSE
, -1);
3210 if (error
&& error
!= EEXIST
) {
3211 if (error
== ENOSPC
) {
3212 ztest_record_enospc(FTAG
);
3215 fatal(0, "dmu_take_snapshot(%s) = %d", snap1name
, error
);
3218 error
= dmu_objset_hold(snap1name
, FTAG
, &clone
);
3220 fatal(0, "dmu_open_snapshot(%s) = %d", snap1name
, error
);
3222 error
= dmu_objset_clone(clone1name
, dmu_objset_ds(clone
), 0);
3223 dmu_objset_rele(clone
, FTAG
);
3225 if (error
== ENOSPC
) {
3226 ztest_record_enospc(FTAG
);
3229 fatal(0, "dmu_objset_create(%s) = %d", clone1name
, error
);
3232 error
= dmu_objset_snapshot(clone1name
, strchr(snap2name
, '@')+1,
3233 NULL
, NULL
, B_FALSE
, B_FALSE
, -1);
3234 if (error
&& error
!= EEXIST
) {
3235 if (error
== ENOSPC
) {
3236 ztest_record_enospc(FTAG
);
3239 fatal(0, "dmu_open_snapshot(%s) = %d", snap2name
, error
);
3242 error
= dmu_objset_snapshot(clone1name
, strchr(snap3name
, '@')+1,
3243 NULL
, NULL
, B_FALSE
, B_FALSE
, -1);
3244 if (error
&& error
!= EEXIST
) {
3245 if (error
== ENOSPC
) {
3246 ztest_record_enospc(FTAG
);
3249 fatal(0, "dmu_open_snapshot(%s) = %d", snap3name
, error
);
3252 error
= dmu_objset_hold(snap3name
, FTAG
, &clone
);
3254 fatal(0, "dmu_open_snapshot(%s) = %d", snap3name
, error
);
3256 error
= dmu_objset_clone(clone2name
, dmu_objset_ds(clone
), 0);
3257 dmu_objset_rele(clone
, FTAG
);
3259 if (error
== ENOSPC
) {
3260 ztest_record_enospc(FTAG
);
3263 fatal(0, "dmu_objset_create(%s) = %d", clone2name
, error
);
3266 error
= dsl_dataset_own(snap2name
, B_FALSE
, FTAG
, &ds
);
3268 fatal(0, "dsl_dataset_own(%s) = %d", snap2name
, error
);
3269 error
= dsl_dataset_promote(clone2name
, NULL
);
3271 fatal(0, "dsl_dataset_promote(%s), %d, not EBUSY", clone2name
,
3273 dsl_dataset_disown(ds
, FTAG
);
3276 ztest_dsl_dataset_cleanup(osname
, id
);
3278 (void) rw_exit(&zs
->zs_name_lock
);
3280 umem_free(snap1name
, MAXNAMELEN
);
3281 umem_free(clone1name
, MAXNAMELEN
);
3282 umem_free(snap2name
, MAXNAMELEN
);
3283 umem_free(clone2name
, MAXNAMELEN
);
3284 umem_free(snap3name
, MAXNAMELEN
);
3287 #undef OD_ARRAY_SIZE
3288 #define OD_ARRAY_SIZE 4
3291 * Verify that dmu_object_{alloc,free} work as expected.
3294 ztest_dmu_object_alloc_free(ztest_ds_t
*zd
, uint64_t id
)
3301 size
= sizeof(ztest_od_t
) * OD_ARRAY_SIZE
;
3302 od
= umem_alloc(size
, UMEM_NOFAIL
);
3303 batchsize
= OD_ARRAY_SIZE
;
3305 for (b
= 0; b
< batchsize
; b
++)
3306 ztest_od_init(od
+ b
, id
, FTAG
, b
, DMU_OT_UINT64_OTHER
, 0, 0);
3309 * Destroy the previous batch of objects, create a new batch,
3310 * and do some I/O on the new objects.
3312 if (ztest_object_init(zd
, od
, size
, B_TRUE
) != 0)
3315 while (ztest_random(4 * batchsize
) != 0)
3316 ztest_io(zd
, od
[ztest_random(batchsize
)].od_object
,
3317 ztest_random(ZTEST_RANGE_LOCKS
) << SPA_MAXBLOCKSHIFT
);
3319 umem_free(od
, size
);
3322 #undef OD_ARRAY_SIZE
3323 #define OD_ARRAY_SIZE 2
3326 * Verify that dmu_{read,write} work as expected.
3329 ztest_dmu_read_write(ztest_ds_t
*zd
, uint64_t id
)
3334 objset_t
*os
= zd
->zd_os
;
3335 size
= sizeof(ztest_od_t
) * OD_ARRAY_SIZE
;
3336 od
= umem_alloc(size
, UMEM_NOFAIL
);
3338 int i
, freeit
, error
;
3340 bufwad_t
*packbuf
, *bigbuf
, *pack
, *bigH
, *bigT
;
3341 uint64_t packobj
, packoff
, packsize
, bigobj
, bigoff
, bigsize
;
3342 uint64_t chunksize
= (1000 + ztest_random(1000)) * sizeof (uint64_t);
3343 uint64_t regions
= 997;
3344 uint64_t stride
= 123456789ULL;
3345 uint64_t width
= 40;
3346 int free_percent
= 5;
3349 * This test uses two objects, packobj and bigobj, that are always
3350 * updated together (i.e. in the same tx) so that their contents are
3351 * in sync and can be compared. Their contents relate to each other
3352 * in a simple way: packobj is a dense array of 'bufwad' structures,
3353 * while bigobj is a sparse array of the same bufwads. Specifically,
3354 * for any index n, there are three bufwads that should be identical:
3356 * packobj, at offset n * sizeof (bufwad_t)
3357 * bigobj, at the head of the nth chunk
3358 * bigobj, at the tail of the nth chunk
3360 * The chunk size is arbitrary. It doesn't have to be a power of two,
3361 * and it doesn't have any relation to the object blocksize.
3362 * The only requirement is that it can hold at least two bufwads.
3364 * Normally, we write the bufwad to each of these locations.
3365 * However, free_percent of the time we instead write zeroes to
3366 * packobj and perform a dmu_free_range() on bigobj. By comparing
3367 * bigobj to packobj, we can verify that the DMU is correctly
3368 * tracking which parts of an object are allocated and free,
3369 * and that the contents of the allocated blocks are correct.
3373 * Read the directory info. If it's the first time, set things up.
3375 ztest_od_init(od
, id
, FTAG
, 0, DMU_OT_UINT64_OTHER
, 0, chunksize
);
3376 ztest_od_init(od
+ 1, id
, FTAG
, 1, DMU_OT_UINT64_OTHER
, 0, chunksize
);
3378 if (ztest_object_init(zd
, od
, size
, B_FALSE
) != 0) {
3379 umem_free(od
, size
);
3383 bigobj
= od
[0].od_object
;
3384 packobj
= od
[1].od_object
;
3385 chunksize
= od
[0].od_gen
;
3386 ASSERT(chunksize
== od
[1].od_gen
);
3389 * Prefetch a random chunk of the big object.
3390 * Our aim here is to get some async reads in flight
3391 * for blocks that we may free below; the DMU should
3392 * handle this race correctly.
3394 n
= ztest_random(regions
) * stride
+ ztest_random(width
);
3395 s
= 1 + ztest_random(2 * width
- 1);
3396 dmu_prefetch(os
, bigobj
, n
* chunksize
, s
* chunksize
);
3399 * Pick a random index and compute the offsets into packobj and bigobj.
3401 n
= ztest_random(regions
) * stride
+ ztest_random(width
);
3402 s
= 1 + ztest_random(width
- 1);
3404 packoff
= n
* sizeof (bufwad_t
);
3405 packsize
= s
* sizeof (bufwad_t
);
3407 bigoff
= n
* chunksize
;
3408 bigsize
= s
* chunksize
;
3410 packbuf
= umem_alloc(packsize
, UMEM_NOFAIL
);
3411 bigbuf
= umem_alloc(bigsize
, UMEM_NOFAIL
);
3414 * free_percent of the time, free a range of bigobj rather than
3417 freeit
= (ztest_random(100) < free_percent
);
3420 * Read the current contents of our objects.
3422 error
= dmu_read(os
, packobj
, packoff
, packsize
, packbuf
,
3424 ASSERT3U(error
, ==, 0);
3425 error
= dmu_read(os
, bigobj
, bigoff
, bigsize
, bigbuf
,
3427 ASSERT3U(error
, ==, 0);
3430 * Get a tx for the mods to both packobj and bigobj.
3432 tx
= dmu_tx_create(os
);
3434 dmu_tx_hold_write(tx
, packobj
, packoff
, packsize
);
3437 dmu_tx_hold_free(tx
, bigobj
, bigoff
, bigsize
);
3439 dmu_tx_hold_write(tx
, bigobj
, bigoff
, bigsize
);
3441 txg
= ztest_tx_assign(tx
, TXG_MIGHTWAIT
, FTAG
);
3443 umem_free(packbuf
, packsize
);
3444 umem_free(bigbuf
, bigsize
);
3445 umem_free(od
, size
);
3449 dmu_object_set_checksum(os
, bigobj
,
3450 (enum zio_checksum
)ztest_random_dsl_prop(ZFS_PROP_CHECKSUM
), tx
);
3452 dmu_object_set_compress(os
, bigobj
,
3453 (enum zio_compress
)ztest_random_dsl_prop(ZFS_PROP_COMPRESSION
), tx
);
3456 * For each index from n to n + s, verify that the existing bufwad
3457 * in packobj matches the bufwads at the head and tail of the
3458 * corresponding chunk in bigobj. Then update all three bufwads
3459 * with the new values we want to write out.
3461 for (i
= 0; i
< s
; i
++) {
3463 pack
= (bufwad_t
*)((char *)packbuf
+ i
* sizeof (bufwad_t
));
3465 bigH
= (bufwad_t
*)((char *)bigbuf
+ i
* chunksize
);
3467 bigT
= (bufwad_t
*)((char *)bigH
+ chunksize
) - 1;
3469 ASSERT((uintptr_t)bigH
- (uintptr_t)bigbuf
< bigsize
);
3470 ASSERT((uintptr_t)bigT
- (uintptr_t)bigbuf
< bigsize
);
3472 if (pack
->bw_txg
> txg
)
3473 fatal(0, "future leak: got %llx, open txg is %llx",
3476 if (pack
->bw_data
!= 0 && pack
->bw_index
!= n
+ i
)
3477 fatal(0, "wrong index: got %llx, wanted %llx+%llx",
3478 pack
->bw_index
, n
, i
);
3480 if (bcmp(pack
, bigH
, sizeof (bufwad_t
)) != 0)
3481 fatal(0, "pack/bigH mismatch in %p/%p", pack
, bigH
);
3483 if (bcmp(pack
, bigT
, sizeof (bufwad_t
)) != 0)
3484 fatal(0, "pack/bigT mismatch in %p/%p", pack
, bigT
);
3487 bzero(pack
, sizeof (bufwad_t
));
3489 pack
->bw_index
= n
+ i
;
3491 pack
->bw_data
= 1 + ztest_random(-2ULL);
3498 * We've verified all the old bufwads, and made new ones.
3499 * Now write them out.
3501 dmu_write(os
, packobj
, packoff
, packsize
, packbuf
, tx
);
3504 if (zopt_verbose
>= 7) {
3505 (void) printf("freeing offset %llx size %llx"
3507 (u_longlong_t
)bigoff
,
3508 (u_longlong_t
)bigsize
,
3511 VERIFY(0 == dmu_free_range(os
, bigobj
, bigoff
, bigsize
, tx
));
3513 if (zopt_verbose
>= 7) {
3514 (void) printf("writing offset %llx size %llx"
3516 (u_longlong_t
)bigoff
,
3517 (u_longlong_t
)bigsize
,
3520 dmu_write(os
, bigobj
, bigoff
, bigsize
, bigbuf
, tx
);
3526 * Sanity check the stuff we just wrote.
3529 void *packcheck
= umem_alloc(packsize
, UMEM_NOFAIL
);
3530 void *bigcheck
= umem_alloc(bigsize
, UMEM_NOFAIL
);
3532 VERIFY(0 == dmu_read(os
, packobj
, packoff
,
3533 packsize
, packcheck
, DMU_READ_PREFETCH
));
3534 VERIFY(0 == dmu_read(os
, bigobj
, bigoff
,
3535 bigsize
, bigcheck
, DMU_READ_PREFETCH
));
3537 ASSERT(bcmp(packbuf
, packcheck
, packsize
) == 0);
3538 ASSERT(bcmp(bigbuf
, bigcheck
, bigsize
) == 0);
3540 umem_free(packcheck
, packsize
);
3541 umem_free(bigcheck
, bigsize
);
3544 umem_free(packbuf
, packsize
);
3545 umem_free(bigbuf
, bigsize
);
3546 umem_free(od
, size
);
3550 compare_and_update_pbbufs(uint64_t s
, bufwad_t
*packbuf
, bufwad_t
*bigbuf
,
3551 uint64_t bigsize
, uint64_t n
, uint64_t chunksize
, uint64_t txg
)
3559 * For each index from n to n + s, verify that the existing bufwad
3560 * in packobj matches the bufwads at the head and tail of the
3561 * corresponding chunk in bigobj. Then update all three bufwads
3562 * with the new values we want to write out.
3564 for (i
= 0; i
< s
; i
++) {
3566 pack
= (bufwad_t
*)((char *)packbuf
+ i
* sizeof (bufwad_t
));
3568 bigH
= (bufwad_t
*)((char *)bigbuf
+ i
* chunksize
);
3570 bigT
= (bufwad_t
*)((char *)bigH
+ chunksize
) - 1;
3572 ASSERT((uintptr_t)bigH
- (uintptr_t)bigbuf
< bigsize
);
3573 ASSERT((uintptr_t)bigT
- (uintptr_t)bigbuf
< bigsize
);
3575 if (pack
->bw_txg
> txg
)
3576 fatal(0, "future leak: got %llx, open txg is %llx",
3579 if (pack
->bw_data
!= 0 && pack
->bw_index
!= n
+ i
)
3580 fatal(0, "wrong index: got %llx, wanted %llx+%llx",
3581 pack
->bw_index
, n
, i
);
3583 if (bcmp(pack
, bigH
, sizeof (bufwad_t
)) != 0)
3584 fatal(0, "pack/bigH mismatch in %p/%p", pack
, bigH
);
3586 if (bcmp(pack
, bigT
, sizeof (bufwad_t
)) != 0)
3587 fatal(0, "pack/bigT mismatch in %p/%p", pack
, bigT
);
3589 pack
->bw_index
= n
+ i
;
3591 pack
->bw_data
= 1 + ztest_random(-2ULL);
3598 #undef OD_ARRAY_SIZE
3599 #define OD_ARRAY_SIZE 2
3602 ztest_dmu_read_write_zcopy(ztest_ds_t
*zd
, uint64_t id
)
3604 objset_t
*os
= zd
->zd_os
;
3611 bufwad_t
*packbuf
, *bigbuf
;
3612 uint64_t packobj
, packoff
, packsize
, bigobj
, bigoff
, bigsize
;
3613 uint64_t blocksize
= ztest_random_blocksize();
3614 uint64_t chunksize
= blocksize
;
3615 uint64_t regions
= 997;
3616 uint64_t stride
= 123456789ULL;
3618 dmu_buf_t
*bonus_db
;
3619 arc_buf_t
**bigbuf_arcbufs
;
3620 dmu_object_info_t doi
;
3622 size
= sizeof(ztest_od_t
) * OD_ARRAY_SIZE
;
3623 od
= umem_alloc(size
, UMEM_NOFAIL
);
3626 * This test uses two objects, packobj and bigobj, that are always
3627 * updated together (i.e. in the same tx) so that their contents are
3628 * in sync and can be compared. Their contents relate to each other
3629 * in a simple way: packobj is a dense array of 'bufwad' structures,
3630 * while bigobj is a sparse array of the same bufwads. Specifically,
3631 * for any index n, there are three bufwads that should be identical:
3633 * packobj, at offset n * sizeof (bufwad_t)
3634 * bigobj, at the head of the nth chunk
3635 * bigobj, at the tail of the nth chunk
3637 * The chunk size is set equal to bigobj block size so that
3638 * dmu_assign_arcbuf() can be tested for object updates.
3642 * Read the directory info. If it's the first time, set things up.
3644 ztest_od_init(od
, id
, FTAG
, 0, DMU_OT_UINT64_OTHER
, blocksize
, 0);
3645 ztest_od_init(od
+ 1, id
, FTAG
, 1, DMU_OT_UINT64_OTHER
, 0, chunksize
);
3648 if (ztest_object_init(zd
, od
, size
, B_FALSE
) != 0) {
3649 umem_free(od
, size
);
3653 bigobj
= od
[0].od_object
;
3654 packobj
= od
[1].od_object
;
3655 blocksize
= od
[0].od_blocksize
;
3656 chunksize
= blocksize
;
3657 ASSERT(chunksize
== od
[1].od_gen
);
3659 VERIFY(dmu_object_info(os
, bigobj
, &doi
) == 0);
3660 VERIFY(ISP2(doi
.doi_data_block_size
));
3661 VERIFY(chunksize
== doi
.doi_data_block_size
);
3662 VERIFY(chunksize
>= 2 * sizeof (bufwad_t
));
3665 * Pick a random index and compute the offsets into packobj and bigobj.
3667 n
= ztest_random(regions
) * stride
+ ztest_random(width
);
3668 s
= 1 + ztest_random(width
- 1);
3670 packoff
= n
* sizeof (bufwad_t
);
3671 packsize
= s
* sizeof (bufwad_t
);
3673 bigoff
= n
* chunksize
;
3674 bigsize
= s
* chunksize
;
3676 packbuf
= umem_zalloc(packsize
, UMEM_NOFAIL
);
3677 bigbuf
= umem_zalloc(bigsize
, UMEM_NOFAIL
);
3679 VERIFY3U(0, ==, dmu_bonus_hold(os
, bigobj
, FTAG
, &bonus_db
));
3681 bigbuf_arcbufs
= umem_zalloc(2 * s
* sizeof (arc_buf_t
*), UMEM_NOFAIL
);
3684 * Iteration 0 test zcopy for DB_UNCACHED dbufs.
3685 * Iteration 1 test zcopy to already referenced dbufs.
3686 * Iteration 2 test zcopy to dirty dbuf in the same txg.
3687 * Iteration 3 test zcopy to dbuf dirty in previous txg.
3688 * Iteration 4 test zcopy when dbuf is no longer dirty.
3689 * Iteration 5 test zcopy when it can't be done.
3690 * Iteration 6 one more zcopy write.
3692 for (i
= 0; i
< 7; i
++) {
3697 * In iteration 5 (i == 5) use arcbufs
3698 * that don't match bigobj blksz to test
3699 * dmu_assign_arcbuf() when it can't directly
3700 * assign an arcbuf to a dbuf.
3702 for (j
= 0; j
< s
; j
++) {
3705 dmu_request_arcbuf(bonus_db
, chunksize
);
3707 bigbuf_arcbufs
[2 * j
] =
3708 dmu_request_arcbuf(bonus_db
, chunksize
/ 2);
3709 bigbuf_arcbufs
[2 * j
+ 1] =
3710 dmu_request_arcbuf(bonus_db
, chunksize
/ 2);
3715 * Get a tx for the mods to both packobj and bigobj.
3717 tx
= dmu_tx_create(os
);
3719 dmu_tx_hold_write(tx
, packobj
, packoff
, packsize
);
3720 dmu_tx_hold_write(tx
, bigobj
, bigoff
, bigsize
);
3722 txg
= ztest_tx_assign(tx
, TXG_MIGHTWAIT
, FTAG
);
3724 umem_free(packbuf
, packsize
);
3725 umem_free(bigbuf
, bigsize
);
3726 for (j
= 0; j
< s
; j
++) {
3728 dmu_return_arcbuf(bigbuf_arcbufs
[j
]);
3731 bigbuf_arcbufs
[2 * j
]);
3733 bigbuf_arcbufs
[2 * j
+ 1]);
3736 umem_free(bigbuf_arcbufs
, 2 * s
* sizeof (arc_buf_t
*));
3737 umem_free(od
, size
);
3738 dmu_buf_rele(bonus_db
, FTAG
);
3743 * 50% of the time don't read objects in the 1st iteration to
3744 * test dmu_assign_arcbuf() for the case when there're no
3745 * existing dbufs for the specified offsets.
3747 if (i
!= 0 || ztest_random(2) != 0) {
3748 error
= dmu_read(os
, packobj
, packoff
,
3749 packsize
, packbuf
, DMU_READ_PREFETCH
);
3750 ASSERT3U(error
, ==, 0);
3751 error
= dmu_read(os
, bigobj
, bigoff
, bigsize
,
3752 bigbuf
, DMU_READ_PREFETCH
);
3753 ASSERT3U(error
, ==, 0);
3755 compare_and_update_pbbufs(s
, packbuf
, bigbuf
, bigsize
,
3759 * We've verified all the old bufwads, and made new ones.
3760 * Now write them out.
3762 dmu_write(os
, packobj
, packoff
, packsize
, packbuf
, tx
);
3763 if (zopt_verbose
>= 7) {
3764 (void) printf("writing offset %llx size %llx"
3766 (u_longlong_t
)bigoff
,
3767 (u_longlong_t
)bigsize
,
3770 for (off
= bigoff
, j
= 0; j
< s
; j
++, off
+= chunksize
) {
3773 bcopy((caddr_t
)bigbuf
+ (off
- bigoff
),
3774 bigbuf_arcbufs
[j
]->b_data
, chunksize
);
3776 bcopy((caddr_t
)bigbuf
+ (off
- bigoff
),
3777 bigbuf_arcbufs
[2 * j
]->b_data
,
3779 bcopy((caddr_t
)bigbuf
+ (off
- bigoff
) +
3781 bigbuf_arcbufs
[2 * j
+ 1]->b_data
,
3786 VERIFY(dmu_buf_hold(os
, bigobj
, off
,
3787 FTAG
, &dbt
, DMU_READ_NO_PREFETCH
) == 0);
3790 dmu_assign_arcbuf(bonus_db
, off
,
3791 bigbuf_arcbufs
[j
], tx
);
3793 dmu_assign_arcbuf(bonus_db
, off
,
3794 bigbuf_arcbufs
[2 * j
], tx
);
3795 dmu_assign_arcbuf(bonus_db
,
3796 off
+ chunksize
/ 2,
3797 bigbuf_arcbufs
[2 * j
+ 1], tx
);
3800 dmu_buf_rele(dbt
, FTAG
);
3806 * Sanity check the stuff we just wrote.
3809 void *packcheck
= umem_alloc(packsize
, UMEM_NOFAIL
);
3810 void *bigcheck
= umem_alloc(bigsize
, UMEM_NOFAIL
);
3812 VERIFY(0 == dmu_read(os
, packobj
, packoff
,
3813 packsize
, packcheck
, DMU_READ_PREFETCH
));
3814 VERIFY(0 == dmu_read(os
, bigobj
, bigoff
,
3815 bigsize
, bigcheck
, DMU_READ_PREFETCH
));
3817 ASSERT(bcmp(packbuf
, packcheck
, packsize
) == 0);
3818 ASSERT(bcmp(bigbuf
, bigcheck
, bigsize
) == 0);
3820 umem_free(packcheck
, packsize
);
3821 umem_free(bigcheck
, bigsize
);
3824 txg_wait_open(dmu_objset_pool(os
), 0);
3825 } else if (i
== 3) {
3826 txg_wait_synced(dmu_objset_pool(os
), 0);
3830 dmu_buf_rele(bonus_db
, FTAG
);
3831 umem_free(packbuf
, packsize
);
3832 umem_free(bigbuf
, bigsize
);
3833 umem_free(bigbuf_arcbufs
, 2 * s
* sizeof (arc_buf_t
*));
3834 umem_free(od
, size
);
3839 ztest_dmu_write_parallel(ztest_ds_t
*zd
, uint64_t id
)
3843 od
= umem_alloc(sizeof(ztest_od_t
), UMEM_NOFAIL
);
3844 uint64_t offset
= (1ULL << (ztest_random(20) + 43)) +
3845 (ztest_random(ZTEST_RANGE_LOCKS
) << SPA_MAXBLOCKSHIFT
);
3848 * Have multiple threads write to large offsets in an object
3849 * to verify that parallel writes to an object -- even to the
3850 * same blocks within the object -- doesn't cause any trouble.
3852 ztest_od_init(od
, ID_PARALLEL
, FTAG
, 0, DMU_OT_UINT64_OTHER
, 0, 0);
3854 if (ztest_object_init(zd
, od
, sizeof (ztest_od_t
), B_FALSE
) != 0)
3857 while (ztest_random(10) != 0)
3858 ztest_io(zd
, od
->od_object
, offset
);
3860 umem_free(od
, sizeof(ztest_od_t
));
3864 ztest_dmu_prealloc(ztest_ds_t
*zd
, uint64_t id
)
3867 uint64_t offset
= (1ULL << (ztest_random(4) + SPA_MAXBLOCKSHIFT
)) +
3868 (ztest_random(ZTEST_RANGE_LOCKS
) << SPA_MAXBLOCKSHIFT
);
3869 uint64_t count
= ztest_random(20) + 1;
3870 uint64_t blocksize
= ztest_random_blocksize();
3873 od
= umem_alloc(sizeof(ztest_od_t
), UMEM_NOFAIL
);
3875 ztest_od_init(od
, id
, FTAG
, 0, DMU_OT_UINT64_OTHER
, blocksize
, 0);
3877 if (ztest_object_init(zd
, od
, sizeof (ztest_od_t
), !ztest_random(2)) != 0) {
3878 umem_free(od
, sizeof(ztest_od_t
));
3882 if (ztest_truncate(zd
, od
->od_object
, offset
, count
* blocksize
) != 0) {
3883 umem_free(od
, sizeof(ztest_od_t
));
3887 ztest_prealloc(zd
, od
->od_object
, offset
, count
* blocksize
);
3889 data
= umem_zalloc(blocksize
, UMEM_NOFAIL
);
3891 while (ztest_random(count
) != 0) {
3892 uint64_t randoff
= offset
+ (ztest_random(count
) * blocksize
);
3893 if (ztest_write(zd
, od
->od_object
, randoff
, blocksize
,
3896 while (ztest_random(4) != 0)
3897 ztest_io(zd
, od
->od_object
, randoff
);
3900 umem_free(data
, blocksize
);
3901 umem_free(od
, sizeof(ztest_od_t
));
3905 * Verify that zap_{create,destroy,add,remove,update} work as expected.
3907 #define ZTEST_ZAP_MIN_INTS 1
3908 #define ZTEST_ZAP_MAX_INTS 4
3909 #define ZTEST_ZAP_MAX_PROPS 1000
3912 ztest_zap(ztest_ds_t
*zd
, uint64_t id
)
3914 objset_t
*os
= zd
->zd_os
;
3917 uint64_t txg
, last_txg
;
3918 uint64_t value
[ZTEST_ZAP_MAX_INTS
];
3919 uint64_t zl_ints
, zl_intsize
, prop
;
3922 char propname
[100], txgname
[100];
3924 char *hc
[2] = { "s.acl.h", ".s.open.h.hyLZlg" };
3926 od
= umem_alloc(sizeof(ztest_od_t
), UMEM_NOFAIL
);
3927 ztest_od_init(od
, id
, FTAG
, 0, DMU_OT_ZAP_OTHER
, 0, 0);
3929 if (ztest_object_init(zd
, od
, sizeof (ztest_od_t
),
3930 !ztest_random(2)) != 0)
3933 object
= od
->od_object
;
3936 * Generate a known hash collision, and verify that
3937 * we can lookup and remove both entries.
3939 tx
= dmu_tx_create(os
);
3940 dmu_tx_hold_zap(tx
, object
, B_TRUE
, NULL
);
3941 txg
= ztest_tx_assign(tx
, TXG_MIGHTWAIT
, FTAG
);
3944 for (i
= 0; i
< 2; i
++) {
3946 VERIFY3U(0, ==, zap_add(os
, object
, hc
[i
], sizeof (uint64_t),
3949 for (i
= 0; i
< 2; i
++) {
3950 VERIFY3U(EEXIST
, ==, zap_add(os
, object
, hc
[i
],
3951 sizeof (uint64_t), 1, &value
[i
], tx
));
3953 zap_length(os
, object
, hc
[i
], &zl_intsize
, &zl_ints
));
3954 ASSERT3U(zl_intsize
, ==, sizeof (uint64_t));
3955 ASSERT3U(zl_ints
, ==, 1);
3957 for (i
= 0; i
< 2; i
++) {
3958 VERIFY3U(0, ==, zap_remove(os
, object
, hc
[i
], tx
));
3963 * Generate a buch of random entries.
3965 ints
= MAX(ZTEST_ZAP_MIN_INTS
, object
% ZTEST_ZAP_MAX_INTS
);
3967 prop
= ztest_random(ZTEST_ZAP_MAX_PROPS
);
3968 (void) sprintf(propname
, "prop_%llu", (u_longlong_t
)prop
);
3969 (void) sprintf(txgname
, "txg_%llu", (u_longlong_t
)prop
);
3970 bzero(value
, sizeof (value
));
3974 * If these zap entries already exist, validate their contents.
3976 error
= zap_length(os
, object
, txgname
, &zl_intsize
, &zl_ints
);
3978 ASSERT3U(zl_intsize
, ==, sizeof (uint64_t));
3979 ASSERT3U(zl_ints
, ==, 1);
3981 VERIFY(zap_lookup(os
, object
, txgname
, zl_intsize
,
3982 zl_ints
, &last_txg
) == 0);
3984 VERIFY(zap_length(os
, object
, propname
, &zl_intsize
,
3987 ASSERT3U(zl_intsize
, ==, sizeof (uint64_t));
3988 ASSERT3U(zl_ints
, ==, ints
);
3990 VERIFY(zap_lookup(os
, object
, propname
, zl_intsize
,
3991 zl_ints
, value
) == 0);
3993 for (i
= 0; i
< ints
; i
++) {
3994 ASSERT3U(value
[i
], ==, last_txg
+ object
+ i
);
3997 ASSERT3U(error
, ==, ENOENT
);
4001 * Atomically update two entries in our zap object.
4002 * The first is named txg_%llu, and contains the txg
4003 * in which the property was last updated. The second
4004 * is named prop_%llu, and the nth element of its value
4005 * should be txg + object + n.
4007 tx
= dmu_tx_create(os
);
4008 dmu_tx_hold_zap(tx
, object
, B_TRUE
, NULL
);
4009 txg
= ztest_tx_assign(tx
, TXG_MIGHTWAIT
, FTAG
);
4014 fatal(0, "zap future leak: old %llu new %llu", last_txg
, txg
);
4016 for (i
= 0; i
< ints
; i
++)
4017 value
[i
] = txg
+ object
+ i
;
4019 VERIFY3U(0, ==, zap_update(os
, object
, txgname
, sizeof (uint64_t),
4021 VERIFY3U(0, ==, zap_update(os
, object
, propname
, sizeof (uint64_t),
4027 * Remove a random pair of entries.
4029 prop
= ztest_random(ZTEST_ZAP_MAX_PROPS
);
4030 (void) sprintf(propname
, "prop_%llu", (u_longlong_t
)prop
);
4031 (void) sprintf(txgname
, "txg_%llu", (u_longlong_t
)prop
);
4033 error
= zap_length(os
, object
, txgname
, &zl_intsize
, &zl_ints
);
4035 if (error
== ENOENT
)
4038 ASSERT3U(error
, ==, 0);
4040 tx
= dmu_tx_create(os
);
4041 dmu_tx_hold_zap(tx
, object
, B_TRUE
, NULL
);
4042 txg
= ztest_tx_assign(tx
, TXG_MIGHTWAIT
, FTAG
);
4045 VERIFY3U(0, ==, zap_remove(os
, object
, txgname
, tx
));
4046 VERIFY3U(0, ==, zap_remove(os
, object
, propname
, tx
));
4049 umem_free(od
, sizeof(ztest_od_t
));
4053 * Testcase to test the upgrading of a microzap to fatzap.
4056 ztest_fzap(ztest_ds_t
*zd
, uint64_t id
)
4058 objset_t
*os
= zd
->zd_os
;
4060 uint64_t object
, txg
;
4063 od
= umem_alloc(sizeof(ztest_od_t
), UMEM_NOFAIL
);
4064 ztest_od_init(od
, id
, FTAG
, 0, DMU_OT_ZAP_OTHER
, 0, 0);
4066 if (ztest_object_init(zd
, od
, sizeof (ztest_od_t
),
4067 !ztest_random(2)) != 0)
4069 object
= od
->od_object
;
4072 * Add entries to this ZAP and make sure it spills over
4073 * and gets upgraded to a fatzap. Also, since we are adding
4074 * 2050 entries we should see ptrtbl growth and leaf-block split.
4076 for (i
= 0; i
< 2050; i
++) {
4077 char name
[MAXNAMELEN
];
4082 (void) snprintf(name
, sizeof (name
), "fzap-%llu-%llu",
4083 (u_longlong_t
)id
, (u_longlong_t
)value
);
4085 tx
= dmu_tx_create(os
);
4086 dmu_tx_hold_zap(tx
, object
, B_TRUE
, name
);
4087 txg
= ztest_tx_assign(tx
, TXG_MIGHTWAIT
, FTAG
);
4090 error
= zap_add(os
, object
, name
, sizeof (uint64_t), 1,
4092 ASSERT(error
== 0 || error
== EEXIST
);
4096 umem_free(od
, sizeof(ztest_od_t
));
4101 ztest_zap_parallel(ztest_ds_t
*zd
, uint64_t id
)
4103 objset_t
*os
= zd
->zd_os
;
4105 uint64_t txg
, object
, count
, wsize
, wc
, zl_wsize
, zl_wc
;
4107 int i
, namelen
, error
;
4108 int micro
= ztest_random(2);
4109 char name
[20], string_value
[20];
4112 od
= umem_alloc(sizeof(ztest_od_t
), UMEM_NOFAIL
);
4113 ztest_od_init(od
, ID_PARALLEL
, FTAG
, micro
, DMU_OT_ZAP_OTHER
, 0, 0);
4115 if (ztest_object_init(zd
, od
, sizeof (ztest_od_t
), B_FALSE
) != 0) {
4116 umem_free(od
, sizeof(ztest_od_t
));
4120 object
= od
->od_object
;
4123 * Generate a random name of the form 'xxx.....' where each
4124 * x is a random printable character and the dots are dots.
4125 * There are 94 such characters, and the name length goes from
4126 * 6 to 20, so there are 94^3 * 15 = 12,458,760 possible names.
4128 namelen
= ztest_random(sizeof (name
) - 5) + 5 + 1;
4130 for (i
= 0; i
< 3; i
++)
4131 name
[i
] = '!' + ztest_random('~' - '!' + 1);
4132 for (; i
< namelen
- 1; i
++)
4136 if ((namelen
& 1) || micro
) {
4137 wsize
= sizeof (txg
);
4143 data
= string_value
;
4147 VERIFY(zap_count(os
, object
, &count
) == 0);
4148 ASSERT(count
!= -1ULL);
4151 * Select an operation: length, lookup, add, update, remove.
4153 i
= ztest_random(5);
4156 tx
= dmu_tx_create(os
);
4157 dmu_tx_hold_zap(tx
, object
, B_TRUE
, NULL
);
4158 txg
= ztest_tx_assign(tx
, TXG_MIGHTWAIT
, FTAG
);
4161 bcopy(name
, string_value
, namelen
);
4165 bzero(string_value
, namelen
);
4171 error
= zap_length(os
, object
, name
, &zl_wsize
, &zl_wc
);
4173 ASSERT3U(wsize
, ==, zl_wsize
);
4174 ASSERT3U(wc
, ==, zl_wc
);
4176 ASSERT3U(error
, ==, ENOENT
);
4181 error
= zap_lookup(os
, object
, name
, wsize
, wc
, data
);
4183 if (data
== string_value
&&
4184 bcmp(name
, data
, namelen
) != 0)
4185 fatal(0, "name '%s' != val '%s' len %d",
4186 name
, data
, namelen
);
4188 ASSERT3U(error
, ==, ENOENT
);
4193 error
= zap_add(os
, object
, name
, wsize
, wc
, data
, tx
);
4194 ASSERT(error
== 0 || error
== EEXIST
);
4198 VERIFY(zap_update(os
, object
, name
, wsize
, wc
, data
, tx
) == 0);
4202 error
= zap_remove(os
, object
, name
, tx
);
4203 ASSERT(error
== 0 || error
== ENOENT
);
4210 umem_free(od
, sizeof(ztest_od_t
));
4214 * Commit callback data.
4216 typedef struct ztest_cb_data
{
4217 list_node_t zcd_node
;
4219 int zcd_expected_err
;
4220 boolean_t zcd_added
;
4221 boolean_t zcd_called
;
4225 /* This is the actual commit callback function */
4227 ztest_commit_callback(void *arg
, int error
)
4229 ztest_cb_data_t
*data
= arg
;
4230 uint64_t synced_txg
;
4232 VERIFY(data
!= NULL
);
4233 VERIFY3S(data
->zcd_expected_err
, ==, error
);
4234 VERIFY(!data
->zcd_called
);
4236 synced_txg
= spa_last_synced_txg(data
->zcd_spa
);
4237 if (data
->zcd_txg
> synced_txg
)
4238 fatal(0, "commit callback of txg %" PRIu64
" called prematurely"
4239 ", last synced txg = %" PRIu64
"\n", data
->zcd_txg
,
4242 data
->zcd_called
= B_TRUE
;
4244 if (error
== ECANCELED
) {
4245 ASSERT3U(data
->zcd_txg
, ==, 0);
4246 ASSERT(!data
->zcd_added
);
4249 * The private callback data should be destroyed here, but
4250 * since we are going to check the zcd_called field after
4251 * dmu_tx_abort(), we will destroy it there.
4256 ASSERT(data
->zcd_added
);
4257 ASSERT3U(data
->zcd_txg
, !=, 0);
4259 (void) mutex_enter(&zcl
.zcl_callbacks_lock
);
4261 /* See if this cb was called more quickly */
4262 if ((synced_txg
- data
->zcd_txg
) < zc_min_txg_delay
)
4263 zc_min_txg_delay
= synced_txg
- data
->zcd_txg
;
4265 /* Remove our callback from the list */
4266 list_remove(&zcl
.zcl_callbacks
, data
);
4268 (void) mutex_exit(&zcl
.zcl_callbacks_lock
);
4270 umem_free(data
, sizeof (ztest_cb_data_t
));
4273 /* Allocate and initialize callback data structure */
4274 static ztest_cb_data_t
*
4275 ztest_create_cb_data(objset_t
*os
, uint64_t txg
)
4277 ztest_cb_data_t
*cb_data
;
4279 cb_data
= umem_zalloc(sizeof (ztest_cb_data_t
), UMEM_NOFAIL
);
4281 cb_data
->zcd_txg
= txg
;
4282 cb_data
->zcd_spa
= dmu_objset_spa(os
);
4283 list_link_init(&cb_data
->zcd_node
);
4289 * Commit callback test.
4292 ztest_dmu_commit_callbacks(ztest_ds_t
*zd
, uint64_t id
)
4294 objset_t
*os
= zd
->zd_os
;
4297 ztest_cb_data_t
*cb_data
[3], *tmp_cb
;
4298 uint64_t old_txg
, txg
;
4301 od
= umem_alloc(sizeof(ztest_od_t
), UMEM_NOFAIL
);
4302 ztest_od_init(od
, id
, FTAG
, 0, DMU_OT_UINT64_OTHER
, 0, 0);
4304 if (ztest_object_init(zd
, od
, sizeof (ztest_od_t
), B_FALSE
) != 0) {
4305 umem_free(od
, sizeof(ztest_od_t
));
4309 tx
= dmu_tx_create(os
);
4311 cb_data
[0] = ztest_create_cb_data(os
, 0);
4312 dmu_tx_callback_register(tx
, ztest_commit_callback
, cb_data
[0]);
4314 dmu_tx_hold_write(tx
, od
->od_object
, 0, sizeof (uint64_t));
4316 /* Every once in a while, abort the transaction on purpose */
4317 if (ztest_random(100) == 0)
4321 error
= dmu_tx_assign(tx
, TXG_NOWAIT
);
4323 txg
= error
? 0 : dmu_tx_get_txg(tx
);
4325 cb_data
[0]->zcd_txg
= txg
;
4326 cb_data
[1] = ztest_create_cb_data(os
, txg
);
4327 dmu_tx_callback_register(tx
, ztest_commit_callback
, cb_data
[1]);
4331 * It's not a strict requirement to call the registered
4332 * callbacks from inside dmu_tx_abort(), but that's what
4333 * it's supposed to happen in the current implementation
4334 * so we will check for that.
4336 for (i
= 0; i
< 2; i
++) {
4337 cb_data
[i
]->zcd_expected_err
= ECANCELED
;
4338 VERIFY(!cb_data
[i
]->zcd_called
);
4343 for (i
= 0; i
< 2; i
++) {
4344 VERIFY(cb_data
[i
]->zcd_called
);
4345 umem_free(cb_data
[i
], sizeof (ztest_cb_data_t
));
4348 umem_free(od
, sizeof(ztest_od_t
));
4352 cb_data
[2] = ztest_create_cb_data(os
, txg
);
4353 dmu_tx_callback_register(tx
, ztest_commit_callback
, cb_data
[2]);
4356 * Read existing data to make sure there isn't a future leak.
4358 VERIFY(0 == dmu_read(os
, od
->od_object
, 0, sizeof (uint64_t),
4359 &old_txg
, DMU_READ_PREFETCH
));
4362 fatal(0, "future leak: got %" PRIu64
", open txg is %" PRIu64
,
4365 dmu_write(os
, od
->od_object
, 0, sizeof (uint64_t), &txg
, tx
);
4367 (void) mutex_enter(&zcl
.zcl_callbacks_lock
);
4370 * Since commit callbacks don't have any ordering requirement and since
4371 * it is theoretically possible for a commit callback to be called
4372 * after an arbitrary amount of time has elapsed since its txg has been
4373 * synced, it is difficult to reliably determine whether a commit
4374 * callback hasn't been called due to high load or due to a flawed
4377 * In practice, we will assume that if after a certain number of txgs a
4378 * commit callback hasn't been called, then most likely there's an
4379 * implementation bug..
4381 tmp_cb
= list_head(&zcl
.zcl_callbacks
);
4382 if (tmp_cb
!= NULL
&&
4383 tmp_cb
->zcd_txg
+ ZTEST_COMMIT_CB_THRESH
< txg
) {
4384 fatal(0, "Commit callback threshold exceeded, oldest txg: %"
4385 PRIu64
", open txg: %" PRIu64
"\n", tmp_cb
->zcd_txg
, txg
);
4389 * Let's find the place to insert our callbacks.
4391 * Even though the list is ordered by txg, it is possible for the
4392 * insertion point to not be the end because our txg may already be
4393 * quiescing at this point and other callbacks in the open txg
4394 * (from other objsets) may have sneaked in.
4396 tmp_cb
= list_tail(&zcl
.zcl_callbacks
);
4397 while (tmp_cb
!= NULL
&& tmp_cb
->zcd_txg
> txg
)
4398 tmp_cb
= list_prev(&zcl
.zcl_callbacks
, tmp_cb
);
4400 /* Add the 3 callbacks to the list */
4401 for (i
= 0; i
< 3; i
++) {
4403 list_insert_head(&zcl
.zcl_callbacks
, cb_data
[i
]);
4405 list_insert_after(&zcl
.zcl_callbacks
, tmp_cb
,
4408 cb_data
[i
]->zcd_added
= B_TRUE
;
4409 VERIFY(!cb_data
[i
]->zcd_called
);
4411 tmp_cb
= cb_data
[i
];
4416 (void) mutex_exit(&zcl
.zcl_callbacks_lock
);
4420 umem_free(od
, sizeof(ztest_od_t
));
4425 ztest_dsl_prop_get_set(ztest_ds_t
*zd
, uint64_t id
)
4427 zfs_prop_t proplist
[] = {
4429 ZFS_PROP_COMPRESSION
,
4433 ztest_shared_t
*zs
= ztest_shared
;
4436 (void) rw_enter(&zs
->zs_name_lock
, RW_READER
);
4438 for (p
= 0; p
< sizeof (proplist
) / sizeof (proplist
[0]); p
++)
4439 (void) ztest_dsl_prop_set_uint64(zd
->zd_name
, proplist
[p
],
4440 ztest_random_dsl_prop(proplist
[p
]), (int)ztest_random(2));
4442 (void) rw_exit(&zs
->zs_name_lock
);
4447 ztest_spa_prop_get_set(ztest_ds_t
*zd
, uint64_t id
)
4449 ztest_shared_t
*zs
= ztest_shared
;
4450 nvlist_t
*props
= NULL
;
4452 (void) rw_enter(&zs
->zs_name_lock
, RW_READER
);
4454 (void) ztest_spa_prop_set_uint64(zs
, ZPOOL_PROP_DEDUPDITTO
,
4455 ZIO_DEDUPDITTO_MIN
+ ztest_random(ZIO_DEDUPDITTO_MIN
));
4457 VERIFY3U(spa_prop_get(zs
->zs_spa
, &props
), ==, 0);
4459 if (zopt_verbose
>= 6)
4460 dump_nvlist(props
, 4);
4464 (void) rw_exit(&zs
->zs_name_lock
);
4468 * Test snapshot hold/release and deferred destroy.
4471 ztest_dmu_snapshot_hold(ztest_ds_t
*zd
, uint64_t id
)
4474 objset_t
*os
= zd
->zd_os
;
4478 char clonename
[100];
4480 char osname
[MAXNAMELEN
];
4482 (void) rw_enter(&ztest_shared
->zs_name_lock
, RW_READER
);
4484 dmu_objset_name(os
, osname
);
4486 (void) snprintf(snapname
, 100, "sh1_%llu", (u_longlong_t
)id
);
4487 (void) snprintf(fullname
, 100, "%s@%s", osname
, snapname
);
4488 (void) snprintf(clonename
, 100, "%s/ch1_%llu",osname
,(u_longlong_t
)id
);
4489 (void) snprintf(tag
, 100, "tag_%llu", (u_longlong_t
)id
);
4492 * Clean up from any previous run.
4494 (void) dmu_objset_destroy(clonename
, B_FALSE
);
4495 (void) dsl_dataset_user_release(osname
, snapname
, tag
, B_FALSE
);
4496 (void) dmu_objset_destroy(fullname
, B_FALSE
);
4499 * Create snapshot, clone it, mark snap for deferred destroy,
4500 * destroy clone, verify snap was also destroyed.
4502 error
= dmu_objset_snapshot(osname
, snapname
, NULL
, NULL
, FALSE
,
4505 if (error
== ENOSPC
) {
4506 ztest_record_enospc("dmu_objset_snapshot");
4509 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname
, error
);
4512 error
= dmu_objset_hold(fullname
, FTAG
, &origin
);
4514 fatal(0, "dmu_objset_hold(%s) = %d", fullname
, error
);
4516 error
= dmu_objset_clone(clonename
, dmu_objset_ds(origin
), 0);
4517 dmu_objset_rele(origin
, FTAG
);
4519 if (error
== ENOSPC
) {
4520 ztest_record_enospc("dmu_objset_clone");
4523 fatal(0, "dmu_objset_clone(%s) = %d", clonename
, error
);
4526 error
= dmu_objset_destroy(fullname
, B_TRUE
);
4528 fatal(0, "dmu_objset_destroy(%s, B_TRUE) = %d",
4532 error
= dmu_objset_destroy(clonename
, B_FALSE
);
4534 fatal(0, "dmu_objset_destroy(%s) = %d", clonename
, error
);
4536 error
= dmu_objset_hold(fullname
, FTAG
, &origin
);
4537 if (error
!= ENOENT
)
4538 fatal(0, "dmu_objset_hold(%s) = %d", fullname
, error
);
4541 * Create snapshot, add temporary hold, verify that we can't
4542 * destroy a held snapshot, mark for deferred destroy,
4543 * release hold, verify snapshot was destroyed.
4545 error
= dmu_objset_snapshot(osname
, snapname
, NULL
, NULL
, FALSE
,
4548 if (error
== ENOSPC
) {
4549 ztest_record_enospc("dmu_objset_snapshot");
4552 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname
, error
);
4555 error
= dsl_dataset_user_hold(osname
, snapname
, tag
, B_FALSE
,
4558 fatal(0, "dsl_dataset_user_hold(%s)", fullname
, tag
);
4560 error
= dmu_objset_destroy(fullname
, B_FALSE
);
4561 if (error
!= EBUSY
) {
4562 fatal(0, "dmu_objset_destroy(%s, B_FALSE) = %d",
4566 error
= dmu_objset_destroy(fullname
, B_TRUE
);
4568 fatal(0, "dmu_objset_destroy(%s, B_TRUE) = %d",
4572 error
= dsl_dataset_user_release(osname
, snapname
, tag
, B_FALSE
);
4574 fatal(0, "dsl_dataset_user_release(%s)", fullname
, tag
);
4576 VERIFY(dmu_objset_hold(fullname
, FTAG
, &origin
) == ENOENT
);
4579 (void) rw_exit(&ztest_shared
->zs_name_lock
);
4583 * Inject random faults into the on-disk data.
4587 ztest_fault_inject(ztest_ds_t
*zd
, uint64_t id
)
4589 ztest_shared_t
*zs
= ztest_shared
;
4590 spa_t
*spa
= zs
->zs_spa
;
4594 uint64_t bad
= 0x1990c0ffeedecadeull
;
4599 int bshift
= SPA_MAXBLOCKSHIFT
+ 2; /* don't scrog all labels */
4605 boolean_t islog
= B_FALSE
;
4607 path0
= umem_alloc(MAXPATHLEN
, UMEM_NOFAIL
);
4608 pathrand
= umem_alloc(MAXPATHLEN
, UMEM_NOFAIL
);
4610 mutex_enter(&zs
->zs_vdev_lock
);
4611 maxfaults
= MAXFAULTS();
4612 leaves
= MAX(zs
->zs_mirrors
, 1) * zopt_raidz
;
4613 mirror_save
= zs
->zs_mirrors
;
4614 mutex_exit(&zs
->zs_vdev_lock
);
4616 ASSERT(leaves
>= 1);
4619 * We need SCL_STATE here because we're going to look at vd0->vdev_tsd.
4621 spa_config_enter(spa
, SCL_STATE
, FTAG
, RW_READER
);
4623 if (ztest_random(2) == 0) {
4625 * Inject errors on a normal data device or slog device.
4627 top
= ztest_random_vdev_top(spa
, B_TRUE
);
4628 leaf
= ztest_random(leaves
) + zs
->zs_splits
;
4631 * Generate paths to the first leaf in this top-level vdev,
4632 * and to the random leaf we selected. We'll induce transient
4633 * write failures and random online/offline activity on leaf 0,
4634 * and we'll write random garbage to the randomly chosen leaf.
4636 (void) snprintf(path0
, MAXPATHLEN
, ztest_dev_template
,
4637 zopt_dir
, zopt_pool
, top
* leaves
+ zs
->zs_splits
);
4638 (void) snprintf(pathrand
, MAXPATHLEN
, ztest_dev_template
,
4639 zopt_dir
, zopt_pool
, top
* leaves
+ leaf
);
4641 vd0
= vdev_lookup_by_path(spa
->spa_root_vdev
, path0
);
4642 if (vd0
!= NULL
&& vd0
->vdev_top
->vdev_islog
)
4645 if (vd0
!= NULL
&& maxfaults
!= 1) {
4647 * Make vd0 explicitly claim to be unreadable,
4648 * or unwriteable, or reach behind its back
4649 * and close the underlying fd. We can do this if
4650 * maxfaults == 0 because we'll fail and reexecute,
4651 * and we can do it if maxfaults >= 2 because we'll
4652 * have enough redundancy. If maxfaults == 1, the
4653 * combination of this with injection of random data
4654 * corruption below exceeds the pool's fault tolerance.
4656 vdev_file_t
*vf
= vd0
->vdev_tsd
;
4658 if (vf
!= NULL
&& ztest_random(3) == 0) {
4659 (void) close(vf
->vf_vnode
->v_fd
);
4660 vf
->vf_vnode
->v_fd
= -1;
4661 } else if (ztest_random(2) == 0) {
4662 vd0
->vdev_cant_read
= B_TRUE
;
4664 vd0
->vdev_cant_write
= B_TRUE
;
4666 guid0
= vd0
->vdev_guid
;
4670 * Inject errors on an l2cache device.
4672 spa_aux_vdev_t
*sav
= &spa
->spa_l2cache
;
4674 if (sav
->sav_count
== 0) {
4675 spa_config_exit(spa
, SCL_STATE
, FTAG
);
4678 vd0
= sav
->sav_vdevs
[ztest_random(sav
->sav_count
)];
4679 guid0
= vd0
->vdev_guid
;
4680 (void) strcpy(path0
, vd0
->vdev_path
);
4681 (void) strcpy(pathrand
, vd0
->vdev_path
);
4685 maxfaults
= INT_MAX
; /* no limit on cache devices */
4688 spa_config_exit(spa
, SCL_STATE
, FTAG
);
4691 * If we can tolerate two or more faults, or we're dealing
4692 * with a slog, randomly online/offline vd0.
4694 if ((maxfaults
>= 2 || islog
) && guid0
!= 0) {
4695 if (ztest_random(10) < 6) {
4696 int flags
= (ztest_random(2) == 0 ?
4697 ZFS_OFFLINE_TEMPORARY
: 0);
4700 * We have to grab the zs_name_lock as writer to
4701 * prevent a race between offlining a slog and
4702 * destroying a dataset. Offlining the slog will
4703 * grab a reference on the dataset which may cause
4704 * dmu_objset_destroy() to fail with EBUSY thus
4705 * leaving the dataset in an inconsistent state.
4708 (void) rw_enter(&ztest_shared
->zs_name_lock
,
4711 VERIFY(vdev_offline(spa
, guid0
, flags
) != EBUSY
);
4714 (void) rw_exit(&ztest_shared
->zs_name_lock
);
4716 (void) vdev_online(spa
, guid0
, 0, NULL
);
4724 * We have at least single-fault tolerance, so inject data corruption.
4726 fd
= open(pathrand
, O_RDWR
);
4728 if (fd
== -1) /* we hit a gap in the device namespace */
4731 fsize
= lseek(fd
, 0, SEEK_END
);
4733 while (--iters
!= 0) {
4734 offset
= ztest_random(fsize
/ (leaves
<< bshift
)) *
4735 (leaves
<< bshift
) + (leaf
<< bshift
) +
4736 (ztest_random(1ULL << (bshift
- 1)) & -8ULL);
4738 if (offset
>= fsize
)
4741 mutex_enter(&zs
->zs_vdev_lock
);
4742 if (mirror_save
!= zs
->zs_mirrors
) {
4743 mutex_exit(&zs
->zs_vdev_lock
);
4748 if (pwrite(fd
, &bad
, sizeof (bad
), offset
) != sizeof (bad
))
4749 fatal(1, "can't inject bad word at 0x%llx in %s",
4752 mutex_exit(&zs
->zs_vdev_lock
);
4754 if (zopt_verbose
>= 7)
4755 (void) printf("injected bad word into %s,"
4756 " offset 0x%llx\n", pathrand
, (u_longlong_t
)offset
);
4761 umem_free(path0
, MAXPATHLEN
);
4762 umem_free(pathrand
, MAXPATHLEN
);
4766 * Verify that DDT repair works as expected.
4769 ztest_ddt_repair(ztest_ds_t
*zd
, uint64_t id
)
4771 ztest_shared_t
*zs
= ztest_shared
;
4772 spa_t
*spa
= zs
->zs_spa
;
4773 objset_t
*os
= zd
->zd_os
;
4775 uint64_t object
, blocksize
, txg
, pattern
, psize
;
4776 enum zio_checksum checksum
= spa_dedup_checksum(spa
);
4781 int copies
= 2 * ZIO_DEDUPDITTO_MIN
;
4784 blocksize
= ztest_random_blocksize();
4785 blocksize
= MIN(blocksize
, 2048); /* because we write so many */
4787 od
= umem_alloc(sizeof(ztest_od_t
), UMEM_NOFAIL
);
4788 ztest_od_init(od
, id
, FTAG
, 0, DMU_OT_UINT64_OTHER
, blocksize
, 0);
4790 if (ztest_object_init(zd
, od
, sizeof (ztest_od_t
), B_FALSE
) != 0) {
4791 umem_free(od
, sizeof(ztest_od_t
));
4796 * Take the name lock as writer to prevent anyone else from changing
4797 * the pool and dataset properies we need to maintain during this test.
4799 (void) rw_enter(&zs
->zs_name_lock
, RW_WRITER
);
4801 if (ztest_dsl_prop_set_uint64(zd
->zd_name
, ZFS_PROP_DEDUP
, checksum
,
4803 ztest_dsl_prop_set_uint64(zd
->zd_name
, ZFS_PROP_COPIES
, 1,
4805 (void) rw_exit(&zs
->zs_name_lock
);
4806 umem_free(od
, sizeof(ztest_od_t
));
4810 object
= od
[0].od_object
;
4811 blocksize
= od
[0].od_blocksize
;
4812 pattern
= zs
->zs_guid
^ dmu_objset_fsid_guid(os
);
4814 ASSERT(object
!= 0);
4816 tx
= dmu_tx_create(os
);
4817 dmu_tx_hold_write(tx
, object
, 0, copies
* blocksize
);
4818 txg
= ztest_tx_assign(tx
, TXG_WAIT
, FTAG
);
4820 (void) rw_exit(&zs
->zs_name_lock
);
4821 umem_free(od
, sizeof(ztest_od_t
));
4826 * Write all the copies of our block.
4828 for (i
= 0; i
< copies
; i
++) {
4829 uint64_t offset
= i
* blocksize
;
4830 VERIFY(dmu_buf_hold(os
, object
, offset
, FTAG
, &db
,
4831 DMU_READ_NO_PREFETCH
) == 0);
4832 ASSERT(db
->db_offset
== offset
);
4833 ASSERT(db
->db_size
== blocksize
);
4834 ASSERT(ztest_pattern_match(db
->db_data
, db
->db_size
, pattern
) ||
4835 ztest_pattern_match(db
->db_data
, db
->db_size
, 0ULL));
4836 dmu_buf_will_fill(db
, tx
);
4837 ztest_pattern_set(db
->db_data
, db
->db_size
, pattern
);
4838 dmu_buf_rele(db
, FTAG
);
4842 txg_wait_synced(spa_get_dsl(spa
), txg
);
4845 * Find out what block we got.
4847 VERIFY(dmu_buf_hold(os
, object
, 0, FTAG
, &db
,
4848 DMU_READ_NO_PREFETCH
) == 0);
4849 blk
= *((dmu_buf_impl_t
*)db
)->db_blkptr
;
4850 dmu_buf_rele(db
, FTAG
);
4853 * Damage the block. Dedup-ditto will save us when we read it later.
4855 psize
= BP_GET_PSIZE(&blk
);
4856 buf
= zio_buf_alloc(psize
);
4857 ztest_pattern_set(buf
, psize
, ~pattern
);
4859 (void) zio_wait(zio_rewrite(NULL
, spa
, 0, &blk
,
4860 buf
, psize
, NULL
, NULL
, ZIO_PRIORITY_SYNC_WRITE
,
4861 ZIO_FLAG_CANFAIL
| ZIO_FLAG_INDUCE_DAMAGE
, NULL
));
4863 zio_buf_free(buf
, psize
);
4865 (void) rw_exit(&zs
->zs_name_lock
);
4866 umem_free(od
, sizeof(ztest_od_t
));
4874 ztest_scrub(ztest_ds_t
*zd
, uint64_t id
)
4876 ztest_shared_t
*zs
= ztest_shared
;
4877 spa_t
*spa
= zs
->zs_spa
;
4879 (void) spa_scan(spa
, POOL_SCAN_SCRUB
);
4880 (void) poll(NULL
, 0, 100); /* wait a moment, then force a restart */
4881 (void) spa_scan(spa
, POOL_SCAN_SCRUB
);
4885 * Change the guid for the pool.
4889 ztest_reguid(ztest_ds_t
*zd
, uint64_t id
)
4891 ztest_shared_t
*zs
= ztest_shared
;
4892 spa_t
*spa
= zs
->zs_spa
;
4893 uint64_t orig
, load
;
4895 orig
= spa_guid(spa
);
4896 load
= spa_load_guid(spa
);
4897 if (spa_change_guid(spa
) != 0)
4900 if (zopt_verbose
>= 3) {
4901 (void) printf("Changed guid old %llu -> %llu\n",
4902 (u_longlong_t
)orig
, (u_longlong_t
)spa_guid(spa
));
4905 VERIFY3U(orig
, !=, spa_guid(spa
));
4906 VERIFY3U(load
, ==, spa_load_guid(spa
));
4910 * Rename the pool to a different name and then rename it back.
4914 ztest_spa_rename(ztest_ds_t
*zd
, uint64_t id
)
4916 ztest_shared_t
*zs
= ztest_shared
;
4917 char *oldname
, *newname
;
4920 (void) rw_enter(&zs
->zs_name_lock
, RW_WRITER
);
4922 oldname
= zs
->zs_pool
;
4923 newname
= umem_alloc(strlen(oldname
) + 5, UMEM_NOFAIL
);
4924 (void) strcpy(newname
, oldname
);
4925 (void) strcat(newname
, "_tmp");
4930 VERIFY3U(0, ==, spa_rename(oldname
, newname
));
4933 * Try to open it under the old name, which shouldn't exist
4935 VERIFY3U(ENOENT
, ==, spa_open(oldname
, &spa
, FTAG
));
4938 * Open it under the new name and make sure it's still the same spa_t.
4940 VERIFY3U(0, ==, spa_open(newname
, &spa
, FTAG
));
4942 ASSERT(spa
== zs
->zs_spa
);
4943 spa_close(spa
, FTAG
);
4946 * Rename it back to the original
4948 VERIFY3U(0, ==, spa_rename(newname
, oldname
));
4951 * Make sure it can still be opened
4953 VERIFY3U(0, ==, spa_open(oldname
, &spa
, FTAG
));
4955 ASSERT(spa
== zs
->zs_spa
);
4956 spa_close(spa
, FTAG
);
4958 umem_free(newname
, strlen(newname
) + 1);
4960 (void) rw_exit(&zs
->zs_name_lock
);
4964 * Verify pool integrity by running zdb.
4967 ztest_run_zdb(char *pool
)
4975 bin
= umem_alloc(MAXPATHLEN
+ MAXNAMELEN
+ 20, UMEM_NOFAIL
);
4976 zdb
= umem_alloc(MAXPATHLEN
+ MAXNAMELEN
+ 20, UMEM_NOFAIL
);
4977 zbuf
= umem_alloc(1024, UMEM_NOFAIL
);
4979 VERIFY(realpath(getexecname(), bin
) != NULL
);
4980 if (strncmp(bin
, "/usr/sbin/ztest", 15) == 0) {
4981 strcpy(bin
, "/usr/sbin/zdb"); /* Installed */
4982 } else if (strncmp(bin
, "/sbin/ztest", 11) == 0) {
4983 strcpy(bin
, "/sbin/zdb"); /* Installed */
4985 strstr(bin
, "/ztest/")[0] = '\0'; /* In-tree */
4986 strcat(bin
, "/zdb/zdb");
4990 "%s -bcc%s%s -U %s %s",
4992 zopt_verbose
>= 3 ? "s" : "",
4993 zopt_verbose
>= 4 ? "v" : "",
4997 if (zopt_verbose
>= 5)
4998 (void) printf("Executing %s\n", strstr(zdb
, "zdb "));
5000 fp
= popen(zdb
, "r");
5002 while (fgets(zbuf
, 1024, fp
) != NULL
)
5003 if (zopt_verbose
>= 3)
5004 (void) printf("%s", zbuf
);
5006 status
= pclose(fp
);
5011 ztest_dump_core
= 0;
5012 if (WIFEXITED(status
))
5013 fatal(0, "'%s' exit code %d", zdb
, WEXITSTATUS(status
));
5015 fatal(0, "'%s' died with signal %d", zdb
, WTERMSIG(status
));
5017 umem_free(bin
, MAXPATHLEN
+ MAXNAMELEN
+ 20);
5018 umem_free(zdb
, MAXPATHLEN
+ MAXNAMELEN
+ 20);
5019 umem_free(zbuf
, 1024);
5023 ztest_walk_pool_directory(char *header
)
5027 if (zopt_verbose
>= 6)
5028 (void) printf("%s\n", header
);
5030 mutex_enter(&spa_namespace_lock
);
5031 while ((spa
= spa_next(spa
)) != NULL
)
5032 if (zopt_verbose
>= 6)
5033 (void) printf("\t%s\n", spa_name(spa
));
5034 mutex_exit(&spa_namespace_lock
);
5038 ztest_spa_import_export(char *oldname
, char *newname
)
5040 nvlist_t
*config
, *newconfig
;
5044 if (zopt_verbose
>= 4) {
5045 (void) printf("import/export: old = %s, new = %s\n",
5050 * Clean up from previous runs.
5052 (void) spa_destroy(newname
);
5055 * Get the pool's configuration and guid.
5057 VERIFY3U(0, ==, spa_open(oldname
, &spa
, FTAG
));
5060 * Kick off a scrub to tickle scrub/export races.
5062 if (ztest_random(2) == 0)
5063 (void) spa_scan(spa
, POOL_SCAN_SCRUB
);
5065 pool_guid
= spa_guid(spa
);
5066 spa_close(spa
, FTAG
);
5068 ztest_walk_pool_directory("pools before export");
5073 VERIFY3U(0, ==, spa_export(oldname
, &config
, B_FALSE
, B_FALSE
));
5075 ztest_walk_pool_directory("pools after export");
5080 newconfig
= spa_tryimport(config
);
5081 ASSERT(newconfig
!= NULL
);
5082 nvlist_free(newconfig
);
5085 * Import it under the new name.
5087 VERIFY3U(0, ==, spa_import(newname
, config
, NULL
, 0));
5089 ztest_walk_pool_directory("pools after import");
5092 * Try to import it again -- should fail with EEXIST.
5094 VERIFY3U(EEXIST
, ==, spa_import(newname
, config
, NULL
, 0));
5097 * Try to import it under a different name -- should fail with EEXIST.
5099 VERIFY3U(EEXIST
, ==, spa_import(oldname
, config
, NULL
, 0));
5102 * Verify that the pool is no longer visible under the old name.
5104 VERIFY3U(ENOENT
, ==, spa_open(oldname
, &spa
, FTAG
));
5107 * Verify that we can open and close the pool using the new name.
5109 VERIFY3U(0, ==, spa_open(newname
, &spa
, FTAG
));
5110 ASSERT(pool_guid
== spa_guid(spa
));
5111 spa_close(spa
, FTAG
);
5113 nvlist_free(config
);
5117 ztest_resume(spa_t
*spa
)
5119 if (spa_suspended(spa
) && zopt_verbose
>= 6)
5120 (void) printf("resuming from suspended state\n");
5121 spa_vdev_state_enter(spa
, SCL_NONE
);
5122 vdev_clear(spa
, NULL
);
5123 (void) spa_vdev_state_exit(spa
, NULL
, 0);
5124 (void) zio_resume(spa
);
5128 ztest_resume_thread(void *arg
)
5132 while (!ztest_exiting
) {
5133 if (spa_suspended(spa
))
5135 (void) poll(NULL
, 0, 100);
5146 ztest_deadman_alarm(int sig
)
5148 fatal(0, "failed to complete within %d seconds of deadline", GRACE
);
5152 ztest_execute(ztest_info_t
*zi
, uint64_t id
)
5154 ztest_shared_t
*zs
= ztest_shared
;
5155 ztest_ds_t
*zd
= &zs
->zs_zd
[id
% zopt_datasets
];
5156 hrtime_t functime
= gethrtime();
5159 for (i
= 0; i
< zi
->zi_iters
; i
++)
5160 zi
->zi_func(zd
, id
);
5162 functime
= gethrtime() - functime
;
5164 atomic_add_64(&zi
->zi_call_count
, 1);
5165 atomic_add_64(&zi
->zi_call_time
, functime
);
5167 if (zopt_verbose
>= 4) {
5169 (void) dladdr((void *)zi
->zi_func
, &dli
);
5170 (void) printf("%6.2f sec in %s\n",
5171 (double)functime
/ NANOSEC
, dli
.dli_sname
);
5176 ztest_thread(void *arg
)
5178 uint64_t id
= (uintptr_t)arg
;
5179 ztest_shared_t
*zs
= ztest_shared
;
5184 while ((now
= gethrtime()) < zs
->zs_thread_stop
) {
5186 * See if it's time to force a crash.
5188 if (now
> zs
->zs_thread_kill
)
5192 * If we're getting ENOSPC with some regularity, stop.
5194 if (zs
->zs_enospc_count
> 10)
5198 * Pick a random function to execute.
5200 zi
= &zs
->zs_info
[ztest_random(ZTEST_FUNCS
)];
5201 call_next
= zi
->zi_call_next
;
5203 if (now
>= call_next
&&
5204 atomic_cas_64(&zi
->zi_call_next
, call_next
, call_next
+
5205 ztest_random(2 * zi
->zi_interval
[0] + 1)) == call_next
)
5206 ztest_execute(zi
, id
);
5215 ztest_dataset_name(char *dsname
, char *pool
, int d
)
5217 (void) snprintf(dsname
, MAXNAMELEN
, "%s/ds_%d", pool
, d
);
5221 ztest_dataset_destroy(ztest_shared_t
*zs
, int d
)
5223 char name
[MAXNAMELEN
];
5226 ztest_dataset_name(name
, zs
->zs_pool
, d
);
5228 if (zopt_verbose
>= 3)
5229 (void) printf("Destroying %s to free up space\n", name
);
5232 * Cleanup any non-standard clones and snapshots. In general,
5233 * ztest thread t operates on dataset (t % zopt_datasets),
5234 * so there may be more than one thing to clean up.
5236 for (t
= d
; t
< zopt_threads
; t
+= zopt_datasets
)
5237 ztest_dsl_dataset_cleanup(name
, t
);
5239 (void) dmu_objset_find(name
, ztest_objset_destroy_cb
, NULL
,
5240 DS_FIND_SNAPSHOTS
| DS_FIND_CHILDREN
);
5244 ztest_dataset_dirobj_verify(ztest_ds_t
*zd
)
5246 uint64_t usedobjs
, dirobjs
, scratch
;
5249 * ZTEST_DIROBJ is the object directory for the entire dataset.
5250 * Therefore, the number of objects in use should equal the
5251 * number of ZTEST_DIROBJ entries, +1 for ZTEST_DIROBJ itself.
5252 * If not, we have an object leak.
5254 * Note that we can only check this in ztest_dataset_open(),
5255 * when the open-context and syncing-context values agree.
5256 * That's because zap_count() returns the open-context value,
5257 * while dmu_objset_space() returns the rootbp fill count.
5259 VERIFY3U(0, ==, zap_count(zd
->zd_os
, ZTEST_DIROBJ
, &dirobjs
));
5260 dmu_objset_space(zd
->zd_os
, &scratch
, &scratch
, &usedobjs
, &scratch
);
5261 ASSERT3U(dirobjs
+ 1, ==, usedobjs
);
5265 ztest_dataset_open(ztest_shared_t
*zs
, int d
)
5267 ztest_ds_t
*zd
= &zs
->zs_zd
[d
];
5268 uint64_t committed_seq
= zd
->zd_seq
;
5271 char name
[MAXNAMELEN
];
5274 ztest_dataset_name(name
, zs
->zs_pool
, d
);
5276 (void) rw_enter(&zs
->zs_name_lock
, RW_READER
);
5278 error
= ztest_dataset_create(name
);
5279 if (error
== ENOSPC
) {
5280 (void) rw_exit(&zs
->zs_name_lock
);
5281 ztest_record_enospc(FTAG
);
5284 ASSERT(error
== 0 || error
== EEXIST
);
5286 VERIFY3U(dmu_objset_hold(name
, zd
, &os
), ==, 0);
5287 (void) rw_exit(&zs
->zs_name_lock
);
5289 ztest_zd_init(zd
, os
);
5291 zilog
= zd
->zd_zilog
;
5293 if (zilog
->zl_header
->zh_claim_lr_seq
!= 0 &&
5294 zilog
->zl_header
->zh_claim_lr_seq
< committed_seq
)
5295 fatal(0, "missing log records: claimed %llu < committed %llu",
5296 zilog
->zl_header
->zh_claim_lr_seq
, committed_seq
);
5298 ztest_dataset_dirobj_verify(zd
);
5300 zil_replay(os
, zd
, ztest_replay_vector
);
5302 ztest_dataset_dirobj_verify(zd
);
5304 if (zopt_verbose
>= 6)
5305 (void) printf("%s replay %llu blocks, %llu records, seq %llu\n",
5307 (u_longlong_t
)zilog
->zl_parse_blk_count
,
5308 (u_longlong_t
)zilog
->zl_parse_lr_count
,
5309 (u_longlong_t
)zilog
->zl_replaying_seq
);
5311 zilog
= zil_open(os
, ztest_get_data
);
5313 if (zilog
->zl_replaying_seq
!= 0 &&
5314 zilog
->zl_replaying_seq
< committed_seq
)
5315 fatal(0, "missing log records: replayed %llu < committed %llu",
5316 zilog
->zl_replaying_seq
, committed_seq
);
5322 ztest_dataset_close(ztest_shared_t
*zs
, int d
)
5324 ztest_ds_t
*zd
= &zs
->zs_zd
[d
];
5326 zil_close(zd
->zd_zilog
);
5327 dmu_objset_rele(zd
->zd_os
, zd
);
5333 * Kick off threads to run tests on all datasets in parallel.
5336 ztest_run(ztest_shared_t
*zs
)
5341 kthread_t
*resume_thread
;
5346 ztest_exiting
= B_FALSE
;
5349 * Initialize parent/child shared state.
5351 mutex_init(&zs
->zs_vdev_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
5352 rw_init(&zs
->zs_name_lock
, NULL
, RW_DEFAULT
, NULL
);
5354 zs
->zs_thread_start
= gethrtime();
5355 zs
->zs_thread_stop
= zs
->zs_thread_start
+ zopt_passtime
* NANOSEC
;
5356 zs
->zs_thread_stop
= MIN(zs
->zs_thread_stop
, zs
->zs_proc_stop
);
5357 zs
->zs_thread_kill
= zs
->zs_thread_stop
;
5358 if (ztest_random(100) < zopt_killrate
)
5359 zs
->zs_thread_kill
-= ztest_random(zopt_passtime
* NANOSEC
);
5361 mutex_init(&zcl
.zcl_callbacks_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
5363 list_create(&zcl
.zcl_callbacks
, sizeof (ztest_cb_data_t
),
5364 offsetof(ztest_cb_data_t
, zcd_node
));
5369 kernel_init(FREAD
| FWRITE
);
5370 VERIFY(spa_open(zs
->zs_pool
, &spa
, FTAG
) == 0);
5371 spa
->spa_debug
= B_TRUE
;
5374 VERIFY3U(0, ==, dmu_objset_hold(zs
->zs_pool
, FTAG
, &os
));
5375 zs
->zs_guid
= dmu_objset_fsid_guid(os
);
5376 dmu_objset_rele(os
, FTAG
);
5378 spa
->spa_dedup_ditto
= 2 * ZIO_DEDUPDITTO_MIN
;
5381 * We don't expect the pool to suspend unless maxfaults == 0,
5382 * in which case ztest_fault_inject() temporarily takes away
5383 * the only valid replica.
5385 if (MAXFAULTS() == 0)
5386 spa
->spa_failmode
= ZIO_FAILURE_MODE_WAIT
;
5388 spa
->spa_failmode
= ZIO_FAILURE_MODE_PANIC
;
5391 * Create a thread to periodically resume suspended I/O.
5393 VERIFY3P((resume_thread
= zk_thread_create(NULL
, 0,
5394 (thread_func_t
)ztest_resume_thread
, spa
, TS_RUN
, NULL
, 0, 0,
5395 PTHREAD_CREATE_JOINABLE
)), !=, NULL
);
5398 * Set a deadman alarm to abort() if we hang.
5400 signal(SIGALRM
, ztest_deadman_alarm
);
5401 alarm((zs
->zs_thread_stop
- zs
->zs_thread_start
) / NANOSEC
+ GRACE
);
5404 * Verify that we can safely inquire about about any object,
5405 * whether it's allocated or not. To make it interesting,
5406 * we probe a 5-wide window around each power of two.
5407 * This hits all edge cases, including zero and the max.
5409 for (t
= 0; t
< 64; t
++) {
5410 for (d
= -5; d
<= 5; d
++) {
5411 error
= dmu_object_info(spa
->spa_meta_objset
,
5412 (1ULL << t
) + d
, NULL
);
5413 ASSERT(error
== 0 || error
== ENOENT
||
5419 * If we got any ENOSPC errors on the previous run, destroy something.
5421 if (zs
->zs_enospc_count
!= 0) {
5422 int d
= ztest_random(zopt_datasets
);
5423 ztest_dataset_destroy(zs
, d
);
5425 zs
->zs_enospc_count
= 0;
5427 tid
= umem_zalloc(zopt_threads
* sizeof (kt_did_t
), UMEM_NOFAIL
);
5429 if (zopt_verbose
>= 4)
5430 (void) printf("starting main threads...\n");
5433 * Kick off all the tests that run in parallel.
5435 for (t
= 0; t
< zopt_threads
; t
++) {
5438 if (t
< zopt_datasets
&& ztest_dataset_open(zs
, t
) != 0)
5441 VERIFY3P(thread
= zk_thread_create(NULL
, 0,
5442 (thread_func_t
)ztest_thread
,
5443 (void *)(uintptr_t)t
, TS_RUN
, NULL
, 0, 0,
5444 PTHREAD_CREATE_JOINABLE
), !=, NULL
);
5445 tid
[t
] = thread
->t_tid
;
5449 * Wait for all of the tests to complete. We go in reverse order
5450 * so we don't close datasets while threads are still using them.
5452 for (t
= zopt_threads
- 1; t
>= 0; t
--) {
5453 thread_join(tid
[t
]);
5454 if (t
< zopt_datasets
)
5455 ztest_dataset_close(zs
, t
);
5458 txg_wait_synced(spa_get_dsl(spa
), 0);
5460 zs
->zs_alloc
= metaslab_class_get_alloc(spa_normal_class(spa
));
5461 zs
->zs_space
= metaslab_class_get_space(spa_normal_class(spa
));
5463 umem_free(tid
, zopt_threads
* sizeof (kt_did_t
));
5465 /* Kill the resume thread */
5466 ztest_exiting
= B_TRUE
;
5467 thread_join(resume_thread
->t_tid
);
5471 * Right before closing the pool, kick off a bunch of async I/O;
5472 * spa_close() should wait for it to complete.
5474 for (object
= 1; object
< 50; object
++)
5475 dmu_prefetch(spa
->spa_meta_objset
, object
, 0, 1ULL << 20);
5477 /* Verify that at least one commit cb was called in a timely fashion */
5478 if (zc_cb_counter
>= ZTEST_COMMIT_CB_MIN_REG
)
5479 VERIFY3U(zc_min_txg_delay
, ==, 0);
5481 spa_close(spa
, FTAG
);
5484 * Verify that we can loop over all pools.
5486 mutex_enter(&spa_namespace_lock
);
5487 for (spa
= spa_next(NULL
); spa
!= NULL
; spa
= spa_next(spa
))
5488 if (zopt_verbose
> 3)
5489 (void) printf("spa_next: found %s\n", spa_name(spa
));
5490 mutex_exit(&spa_namespace_lock
);
5493 * Verify that we can export the pool and reimport it under a
5496 if (ztest_random(2) == 0) {
5497 char name
[MAXNAMELEN
];
5498 (void) snprintf(name
, MAXNAMELEN
, "%s_import", zs
->zs_pool
);
5499 ztest_spa_import_export(zs
->zs_pool
, name
);
5500 ztest_spa_import_export(name
, zs
->zs_pool
);
5505 list_destroy(&zcl
.zcl_callbacks
);
5506 mutex_destroy(&zcl
.zcl_callbacks_lock
);
5507 rw_destroy(&zs
->zs_name_lock
);
5508 mutex_destroy(&zs
->zs_vdev_lock
);
5512 ztest_freeze(ztest_shared_t
*zs
)
5514 ztest_ds_t
*zd
= &zs
->zs_zd
[0];
5518 if (zopt_verbose
>= 3)
5519 (void) printf("testing spa_freeze()...\n");
5521 kernel_init(FREAD
| FWRITE
);
5522 VERIFY3U(0, ==, spa_open(zs
->zs_pool
, &spa
, FTAG
));
5523 VERIFY3U(0, ==, ztest_dataset_open(zs
, 0));
5526 * Force the first log block to be transactionally allocated.
5527 * We have to do this before we freeze the pool -- otherwise
5528 * the log chain won't be anchored.
5530 while (BP_IS_HOLE(&zd
->zd_zilog
->zl_header
->zh_log
)) {
5531 ztest_dmu_object_alloc_free(zd
, 0);
5532 zil_commit(zd
->zd_zilog
, 0);
5535 txg_wait_synced(spa_get_dsl(spa
), 0);
5538 * Freeze the pool. This stops spa_sync() from doing anything,
5539 * so that the only way to record changes from now on is the ZIL.
5544 * Run tests that generate log records but don't alter the pool config
5545 * or depend on DSL sync tasks (snapshots, objset create/destroy, etc).
5546 * We do a txg_wait_synced() after each iteration to force the txg
5547 * to increase well beyond the last synced value in the uberblock.
5548 * The ZIL should be OK with that.
5550 while (ztest_random(10) != 0 && numloops
++ < zopt_maxloops
) {
5551 ztest_dmu_write_parallel(zd
, 0);
5552 ztest_dmu_object_alloc_free(zd
, 0);
5553 txg_wait_synced(spa_get_dsl(spa
), 0);
5557 * Commit all of the changes we just generated.
5559 zil_commit(zd
->zd_zilog
, 0);
5560 txg_wait_synced(spa_get_dsl(spa
), 0);
5563 * Close our dataset and close the pool.
5565 ztest_dataset_close(zs
, 0);
5566 spa_close(spa
, FTAG
);
5570 * Open and close the pool and dataset to induce log replay.
5572 kernel_init(FREAD
| FWRITE
);
5573 VERIFY3U(0, ==, spa_open(zs
->zs_pool
, &spa
, FTAG
));
5574 VERIFY3U(0, ==, ztest_dataset_open(zs
, 0));
5575 ztest_dataset_close(zs
, 0);
5576 spa_close(spa
, FTAG
);
5581 print_time(hrtime_t t
, char *timebuf
)
5583 hrtime_t s
= t
/ NANOSEC
;
5584 hrtime_t m
= s
/ 60;
5585 hrtime_t h
= m
/ 60;
5586 hrtime_t d
= h
/ 24;
5595 (void) sprintf(timebuf
,
5596 "%llud%02lluh%02llum%02llus", d
, h
, m
, s
);
5598 (void) sprintf(timebuf
, "%lluh%02llum%02llus", h
, m
, s
);
5600 (void) sprintf(timebuf
, "%llum%02llus", m
, s
);
5602 (void) sprintf(timebuf
, "%llus", s
);
5606 make_random_props(void)
5610 if (ztest_random(2) == 0)
5613 VERIFY(nvlist_alloc(&props
, NV_UNIQUE_NAME
, 0) == 0);
5614 VERIFY(nvlist_add_uint64(props
, "autoreplace", 1) == 0);
5616 (void) printf("props:\n");
5617 dump_nvlist(props
, 4);
5623 * Create a storage pool with the given name and initial vdev size.
5624 * Then test spa_freeze() functionality.
5627 ztest_init(ztest_shared_t
*zs
)
5630 nvlist_t
*nvroot
, *props
;
5632 mutex_init(&zs
->zs_vdev_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
5633 rw_init(&zs
->zs_name_lock
, NULL
, RW_DEFAULT
, NULL
);
5635 kernel_init(FREAD
| FWRITE
);
5638 * Create the storage pool.
5640 (void) spa_destroy(zs
->zs_pool
);
5641 ztest_shared
->zs_vdev_next_leaf
= 0;
5643 zs
->zs_mirrors
= zopt_mirrors
;
5644 nvroot
= make_vdev_root(NULL
, NULL
, zopt_vdev_size
, 0,
5645 0, zopt_raidz
, zs
->zs_mirrors
, 1);
5646 props
= make_random_props();
5647 VERIFY3U(0, ==, spa_create(zs
->zs_pool
, nvroot
, props
, NULL
, NULL
));
5648 nvlist_free(nvroot
);
5650 VERIFY3U(0, ==, spa_open(zs
->zs_pool
, &spa
, FTAG
));
5651 metaslab_sz
= 1ULL << spa
->spa_root_vdev
->vdev_child
[0]->vdev_ms_shift
;
5652 spa_close(spa
, FTAG
);
5656 ztest_run_zdb(zs
->zs_pool
);
5660 ztest_run_zdb(zs
->zs_pool
);
5662 (void) rw_destroy(&zs
->zs_name_lock
);
5663 (void) mutex_destroy(&zs
->zs_vdev_lock
);
5667 main(int argc
, char **argv
)
5679 (void) setvbuf(stdout
, NULL
, _IOLBF
, 0);
5681 ztest_random_fd
= open("/dev/urandom", O_RDONLY
);
5683 dprintf_setup(&argc
, argv
);
5684 process_options(argc
, argv
);
5686 /* Override location of zpool.cache */
5687 VERIFY(asprintf((char **)&spa_config_path
, "%s/zpool.cache",
5691 * Blow away any existing copy of zpool.cache
5694 (void) remove(spa_config_path
);
5696 shared_size
= sizeof (*zs
) + zopt_datasets
* sizeof (ztest_ds_t
);
5698 zs
= ztest_shared
= (void *)mmap(0,
5699 P2ROUNDUP(shared_size
, getpagesize()),
5700 PROT_READ
| PROT_WRITE
, MAP_SHARED
| MAP_ANON
, -1, 0);
5702 if (zopt_verbose
>= 1) {
5703 (void) printf("%llu vdevs, %d datasets, %d threads,"
5704 " %llu seconds...\n",
5705 (u_longlong_t
)zopt_vdevs
, zopt_datasets
, zopt_threads
,
5706 (u_longlong_t
)zopt_time
);
5710 * Create and initialize our storage pool.
5712 for (i
= 1; i
<= zopt_init
; i
++) {
5713 bzero(zs
, sizeof (ztest_shared_t
));
5714 if (zopt_verbose
>= 3 && zopt_init
!= 1)
5715 (void) printf("ztest_init(), pass %d\n", i
);
5716 zs
->zs_pool
= zopt_pool
;
5720 zs
->zs_pool
= zopt_pool
;
5721 zs
->zs_proc_start
= gethrtime();
5722 zs
->zs_proc_stop
= zs
->zs_proc_start
+ zopt_time
* NANOSEC
;
5724 for (f
= 0; f
< ZTEST_FUNCS
; f
++) {
5725 zi
= &zs
->zs_info
[f
];
5726 *zi
= ztest_info
[f
];
5727 if (zs
->zs_proc_start
+ zi
->zi_interval
[0] > zs
->zs_proc_stop
)
5728 zi
->zi_call_next
= UINT64_MAX
;
5730 zi
->zi_call_next
= zs
->zs_proc_start
+
5731 ztest_random(2 * zi
->zi_interval
[0] + 1);
5735 * Run the tests in a loop. These tests include fault injection
5736 * to verify that self-healing data works, and forced crashes
5737 * to verify that we never lose on-disk consistency.
5739 while (gethrtime() < zs
->zs_proc_stop
) {
5744 * Initialize the workload counters for each function.
5746 for (f
= 0; f
< ZTEST_FUNCS
; f
++) {
5747 zi
= &zs
->zs_info
[f
];
5748 zi
->zi_call_count
= 0;
5749 zi
->zi_call_time
= 0;
5752 /* Set the allocation switch size */
5753 metaslab_df_alloc_threshold
= ztest_random(metaslab_sz
/ 4) + 1;
5758 fatal(1, "fork failed");
5760 if (pid
== 0) { /* child */
5761 struct rlimit rl
= { 1024, 1024 };
5762 (void) setrlimit(RLIMIT_NOFILE
, &rl
);
5763 (void) enable_extended_FILE_stdio(-1, -1);
5768 while (waitpid(pid
, &status
, 0) != pid
)
5771 if (WIFEXITED(status
)) {
5772 if (WEXITSTATUS(status
) != 0) {
5773 (void) fprintf(stderr
,
5774 "child exited with code %d\n",
5775 WEXITSTATUS(status
));
5778 } else if (WIFSIGNALED(status
)) {
5779 if (WTERMSIG(status
) != SIGKILL
) {
5780 (void) fprintf(stderr
,
5781 "child died with signal %d\n",
5787 (void) fprintf(stderr
, "something strange happened "
5794 if (zopt_verbose
>= 1) {
5795 hrtime_t now
= gethrtime();
5797 now
= MIN(now
, zs
->zs_proc_stop
);
5798 print_time(zs
->zs_proc_stop
- now
, timebuf
);
5799 nicenum(zs
->zs_space
, numbuf
);
5801 (void) printf("Pass %3d, %8s, %3llu ENOSPC, "
5802 "%4.1f%% of %5s used, %3.0f%% done, %8s to go\n",
5804 WIFEXITED(status
) ? "Complete" : "SIGKILL",
5805 (u_longlong_t
)zs
->zs_enospc_count
,
5806 100.0 * zs
->zs_alloc
/ zs
->zs_space
,
5808 100.0 * (now
- zs
->zs_proc_start
) /
5809 (zopt_time
* NANOSEC
), timebuf
);
5812 if (zopt_verbose
>= 2) {
5813 (void) printf("\nWorkload summary:\n\n");
5814 (void) printf("%7s %9s %s\n",
5815 "Calls", "Time", "Function");
5816 (void) printf("%7s %9s %s\n",
5817 "-----", "----", "--------");
5818 for (f
= 0; f
< ZTEST_FUNCS
; f
++) {
5821 zi
= &zs
->zs_info
[f
];
5822 print_time(zi
->zi_call_time
, timebuf
);
5823 (void) dladdr((void *)zi
->zi_func
, &dli
);
5824 (void) printf("%7llu %9s %s\n",
5825 (u_longlong_t
)zi
->zi_call_count
, timebuf
,
5828 (void) printf("\n");
5832 * It's possible that we killed a child during a rename test,
5833 * in which case we'll have a 'ztest_tmp' pool lying around
5834 * instead of 'ztest'. Do a blind rename in case this happened.
5837 if (spa_open(zopt_pool
, &spa
, FTAG
) == 0) {
5838 spa_close(spa
, FTAG
);
5840 char tmpname
[MAXNAMELEN
];
5842 kernel_init(FREAD
| FWRITE
);
5843 (void) snprintf(tmpname
, sizeof (tmpname
), "%s_tmp",
5845 (void) spa_rename(tmpname
, zopt_pool
);
5849 ztest_run_zdb(zopt_pool
);
5852 if (zopt_verbose
>= 1) {
5853 (void) printf("%d killed, %d completed, %.0f%% kill rate\n",
5854 kills
, iters
- kills
, (100.0 * kills
) / MAX(1, iters
));