4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012 by Delphix. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
28 * The objective of this program is to provide a DMU/ZAP/SPA stress test
29 * that runs entirely in userland, is easy to use, and easy to extend.
31 * The overall design of the ztest program is as follows:
33 * (1) For each major functional area (e.g. adding vdevs to a pool,
34 * creating and destroying datasets, reading and writing objects, etc)
35 * we have a simple routine to test that functionality. These
36 * individual routines do not have to do anything "stressful".
38 * (2) We turn these simple functionality tests into a stress test by
39 * running them all in parallel, with as many threads as desired,
40 * and spread across as many datasets, objects, and vdevs as desired.
42 * (3) While all this is happening, we inject faults into the pool to
43 * verify that self-healing data really works.
45 * (4) Every time we open a dataset, we change its checksum and compression
46 * functions. Thus even individual objects vary from block to block
47 * in which checksum they use and whether they're compressed.
49 * (5) To verify that we never lose on-disk consistency after a crash,
50 * we run the entire test in a child of the main process.
51 * At random times, the child self-immolates with a SIGKILL.
52 * This is the software equivalent of pulling the power cord.
53 * The parent then runs the test again, using the existing
54 * storage pool, as many times as desired. If backwards compatability
55 * testing is enabled ztest will sometimes run the "older" version
56 * of ztest after a SIGKILL.
58 * (6) To verify that we don't have future leaks or temporal incursions,
59 * many of the functional tests record the transaction group number
60 * as part of their data. When reading old data, they verify that
61 * the transaction group number is less than the current, open txg.
62 * If you add a new test, please do this if applicable.
64 * (7) Threads are created with a reduced stack size, for sanity checking.
65 * Therefore, it's important not to allocate huge buffers on the stack.
67 * When run with no arguments, ztest runs for about five minutes and
68 * produces no output if successful. To get a little bit of information,
69 * specify -V. To get more information, specify -VV, and so on.
71 * To turn this into an overnight stress test, use -T to specify run time.
73 * You can ask more more vdevs [-v], datasets [-d], or threads [-t]
74 * to increase the pool capacity, fanout, and overall stress level.
76 * Use the -k option to set the desired frequency of kills.
78 * When ztest invokes itself it passes all relevant information through a
79 * temporary file which is mmap-ed in the child process. This allows shared
80 * memory to survive the exec syscall. The ztest_shared_hdr_t struct is always
81 * stored at offset 0 of this file and contains information on the size and
82 * number of shared structures in the file. The information stored in this file
83 * must remain backwards compatible with older versions of ztest so that
84 * ztest can invoke them during backwards compatibility testing (-B).
87 #include <sys/zfs_context.h>
93 #include <sys/dmu_objset.h>
99 #include <sys/resource.h>
102 #include <sys/zil_impl.h>
103 #include <sys/vdev_impl.h>
104 #include <sys/vdev_file.h>
105 #include <sys/spa_impl.h>
106 #include <sys/metaslab_impl.h>
107 #include <sys/dsl_prop.h>
108 #include <sys/dsl_dataset.h>
109 #include <sys/dsl_scan.h>
110 #include <sys/zio_checksum.h>
111 #include <sys/refcount.h>
113 #include <stdio_ext.h>
121 #include <sys/fs/zfs.h>
122 #include <libnvpair.h>
124 #define ZTEST_FD_DATA 3
125 #define ZTEST_FD_RAND 4
127 typedef struct ztest_shared_hdr
{
128 uint64_t zh_hdr_size
;
129 uint64_t zh_opts_size
;
131 uint64_t zh_stats_size
;
132 uint64_t zh_stats_count
;
134 uint64_t zh_ds_count
;
135 } ztest_shared_hdr_t
;
137 static ztest_shared_hdr_t
*ztest_shared_hdr
;
139 typedef struct ztest_shared_opts
{
140 char zo_pool
[MAXNAMELEN
];
141 char zo_dir
[MAXNAMELEN
];
142 char zo_alt_ztest
[MAXNAMELEN
];
143 char zo_alt_libpath
[MAXNAMELEN
];
145 uint64_t zo_vdevtime
;
153 uint64_t zo_passtime
;
154 uint64_t zo_killrate
;
158 uint64_t zo_maxloops
;
159 uint64_t zo_metaslab_gang_bang
;
160 } ztest_shared_opts_t
;
162 static const ztest_shared_opts_t ztest_opts_defaults
= {
163 .zo_pool
= { 'z', 't', 'e', 's', 't', '\0' },
164 .zo_dir
= { '/', 't', 'm', 'p', '\0' },
165 .zo_alt_ztest
= { '\0' },
166 .zo_alt_libpath
= { '\0' },
168 .zo_ashift
= SPA_MINBLOCKSHIFT
,
171 .zo_raidz_parity
= 1,
172 .zo_vdev_size
= SPA_MINDEVSIZE
,
175 .zo_passtime
= 60, /* 60 seconds */
176 .zo_killrate
= 70, /* 70% kill rate */
179 .zo_time
= 300, /* 5 minutes */
180 .zo_maxloops
= 50, /* max loops during spa_freeze() */
181 .zo_metaslab_gang_bang
= 32 << 10
184 extern uint64_t metaslab_gang_bang
;
185 extern uint64_t metaslab_df_alloc_threshold
;
187 static ztest_shared_opts_t
*ztest_shared_opts
;
188 static ztest_shared_opts_t ztest_opts
;
190 typedef struct ztest_shared_ds
{
194 static ztest_shared_ds_t
*ztest_shared_ds
;
195 #define ZTEST_GET_SHARED_DS(d) (&ztest_shared_ds[d])
197 #define BT_MAGIC 0x123456789abcdefULL
198 #define MAXFAULTS() \
199 (MAX(zs->zs_mirrors, 1) * (ztest_opts.zo_raidz_parity + 1) - 1)
203 ZTEST_IO_WRITE_PATTERN
,
204 ZTEST_IO_WRITE_ZEROES
,
210 typedef struct ztest_block_tag
{
220 typedef struct bufwad
{
227 * XXX -- fix zfs range locks to be generic so we can use them here.
249 #define ZTEST_RANGE_LOCKS 64
250 #define ZTEST_OBJECT_LOCKS 64
253 * Object descriptor. Used as a template for object lookup/create/remove.
255 typedef struct ztest_od
{
258 dmu_object_type_t od_type
;
259 dmu_object_type_t od_crtype
;
260 uint64_t od_blocksize
;
261 uint64_t od_crblocksize
;
264 char od_name
[MAXNAMELEN
];
270 typedef struct ztest_ds
{
271 ztest_shared_ds_t
*zd_shared
;
273 krwlock_t zd_zilog_lock
;
275 ztest_od_t
*zd_od
; /* debugging aid */
276 char zd_name
[MAXNAMELEN
];
277 kmutex_t zd_dirobj_lock
;
278 rll_t zd_object_lock
[ZTEST_OBJECT_LOCKS
];
279 rll_t zd_range_lock
[ZTEST_RANGE_LOCKS
];
283 * Per-iteration state.
285 typedef void ztest_func_t(ztest_ds_t
*zd
, uint64_t id
);
287 typedef struct ztest_info
{
288 ztest_func_t
*zi_func
; /* test function */
289 uint64_t zi_iters
; /* iterations per execution */
290 uint64_t *zi_interval
; /* execute every <interval> seconds */
293 typedef struct ztest_shared_callstate
{
294 uint64_t zc_count
; /* per-pass count */
295 uint64_t zc_time
; /* per-pass time */
296 uint64_t zc_next
; /* next time to call this function */
297 } ztest_shared_callstate_t
;
299 static ztest_shared_callstate_t
*ztest_shared_callstate
;
300 #define ZTEST_GET_SHARED_CALLSTATE(c) (&ztest_shared_callstate[c])
303 * Note: these aren't static because we want dladdr() to work.
305 ztest_func_t ztest_dmu_read_write
;
306 ztest_func_t ztest_dmu_write_parallel
;
307 ztest_func_t ztest_dmu_object_alloc_free
;
308 ztest_func_t ztest_dmu_commit_callbacks
;
309 ztest_func_t ztest_zap
;
310 ztest_func_t ztest_zap_parallel
;
311 ztest_func_t ztest_zil_commit
;
312 ztest_func_t ztest_zil_remount
;
313 ztest_func_t ztest_dmu_read_write_zcopy
;
314 ztest_func_t ztest_dmu_objset_create_destroy
;
315 ztest_func_t ztest_dmu_prealloc
;
316 ztest_func_t ztest_fzap
;
317 ztest_func_t ztest_dmu_snapshot_create_destroy
;
318 ztest_func_t ztest_dsl_prop_get_set
;
319 ztest_func_t ztest_spa_prop_get_set
;
320 ztest_func_t ztest_spa_create_destroy
;
321 ztest_func_t ztest_fault_inject
;
322 ztest_func_t ztest_ddt_repair
;
323 ztest_func_t ztest_dmu_snapshot_hold
;
324 ztest_func_t ztest_spa_rename
;
325 ztest_func_t ztest_scrub
;
326 ztest_func_t ztest_dsl_dataset_promote_busy
;
327 ztest_func_t ztest_vdev_attach_detach
;
328 ztest_func_t ztest_vdev_LUN_growth
;
329 ztest_func_t ztest_vdev_add_remove
;
330 ztest_func_t ztest_vdev_aux_add_remove
;
331 ztest_func_t ztest_split_pool
;
332 ztest_func_t ztest_reguid
;
334 uint64_t zopt_always
= 0ULL * NANOSEC
; /* all the time */
335 uint64_t zopt_incessant
= 1ULL * NANOSEC
/ 10; /* every 1/10 second */
336 uint64_t zopt_often
= 1ULL * NANOSEC
; /* every second */
337 uint64_t zopt_sometimes
= 10ULL * NANOSEC
; /* every 10 seconds */
338 uint64_t zopt_rarely
= 60ULL * NANOSEC
; /* every 60 seconds */
340 ztest_info_t ztest_info
[] = {
341 { ztest_dmu_read_write
, 1, &zopt_always
},
342 { ztest_dmu_write_parallel
, 10, &zopt_always
},
343 { ztest_dmu_object_alloc_free
, 1, &zopt_always
},
344 { ztest_dmu_commit_callbacks
, 1, &zopt_always
},
345 { ztest_zap
, 30, &zopt_always
},
346 { ztest_zap_parallel
, 100, &zopt_always
},
347 { ztest_split_pool
, 1, &zopt_always
},
348 { ztest_zil_commit
, 1, &zopt_incessant
},
349 { ztest_zil_remount
, 1, &zopt_sometimes
},
350 { ztest_dmu_read_write_zcopy
, 1, &zopt_often
},
351 { ztest_dmu_objset_create_destroy
, 1, &zopt_often
},
352 { ztest_dsl_prop_get_set
, 1, &zopt_often
},
353 { ztest_spa_prop_get_set
, 1, &zopt_sometimes
},
355 { ztest_dmu_prealloc
, 1, &zopt_sometimes
},
357 { ztest_fzap
, 1, &zopt_sometimes
},
358 { ztest_dmu_snapshot_create_destroy
, 1, &zopt_sometimes
},
359 { ztest_spa_create_destroy
, 1, &zopt_sometimes
},
360 { ztest_fault_inject
, 1, &zopt_sometimes
},
361 { ztest_ddt_repair
, 1, &zopt_sometimes
},
362 { ztest_dmu_snapshot_hold
, 1, &zopt_sometimes
},
364 * The reguid test is currently broken. Disable it until
365 * we get around to fixing it.
368 { ztest_reguid
, 1, &zopt_sometimes
},
370 { ztest_spa_rename
, 1, &zopt_rarely
},
371 { ztest_scrub
, 1, &zopt_rarely
},
372 { ztest_dsl_dataset_promote_busy
, 1, &zopt_rarely
},
373 { ztest_vdev_attach_detach
, 1, &zopt_rarely
},
374 { ztest_vdev_LUN_growth
, 1, &zopt_rarely
},
375 { ztest_vdev_add_remove
, 1,
376 &ztest_opts
.zo_vdevtime
},
377 { ztest_vdev_aux_add_remove
, 1,
378 &ztest_opts
.zo_vdevtime
},
381 #define ZTEST_FUNCS (sizeof (ztest_info) / sizeof (ztest_info_t))
384 * The following struct is used to hold a list of uncalled commit callbacks.
385 * The callbacks are ordered by txg number.
387 typedef struct ztest_cb_list
{
388 kmutex_t zcl_callbacks_lock
;
389 list_t zcl_callbacks
;
393 * Stuff we need to share writably between parent and child.
395 typedef struct ztest_shared
{
396 boolean_t zs_do_init
;
397 hrtime_t zs_proc_start
;
398 hrtime_t zs_proc_stop
;
399 hrtime_t zs_thread_start
;
400 hrtime_t zs_thread_stop
;
401 hrtime_t zs_thread_kill
;
402 uint64_t zs_enospc_count
;
403 uint64_t zs_vdev_next_leaf
;
404 uint64_t zs_vdev_aux
;
409 uint64_t zs_metaslab_sz
;
410 uint64_t zs_metaslab_df_alloc_threshold
;
414 #define ID_PARALLEL -1ULL
416 static char ztest_dev_template
[] = "%s/%s.%llua";
417 static char ztest_aux_template
[] = "%s/%s.%s.%llu";
418 ztest_shared_t
*ztest_shared
;
420 static spa_t
*ztest_spa
= NULL
;
421 static ztest_ds_t
*ztest_ds
;
423 static kmutex_t ztest_vdev_lock
;
424 static krwlock_t ztest_name_lock
;
426 static boolean_t ztest_dump_core
= B_TRUE
;
427 static boolean_t ztest_exiting
;
429 /* Global commit callback list */
430 static ztest_cb_list_t zcl
;
431 /* Commit cb delay */
432 static uint64_t zc_min_txg_delay
= UINT64_MAX
;
433 static int zc_cb_counter
= 0;
436 * Minimum number of commit callbacks that need to be registered for us to check
437 * whether the minimum txg delay is acceptable.
439 #define ZTEST_COMMIT_CB_MIN_REG 100
442 * If a number of txgs equal to this threshold have been created after a commit
443 * callback has been registered but not called, then we assume there is an
444 * implementation bug.
446 #define ZTEST_COMMIT_CB_THRESH (TXG_CONCURRENT_STATES + 1000)
448 extern uint64_t metaslab_gang_bang
;
449 extern uint64_t metaslab_df_alloc_threshold
;
452 ZTEST_META_DNODE
= 0,
457 static void usage(boolean_t
) __NORETURN
;
460 * These libumem hooks provide a reasonable set of defaults for the allocator's
461 * debugging facilities.
464 _umem_debug_init(void)
466 return ("default,verbose"); /* $UMEM_DEBUG setting */
470 _umem_logging_init(void)
472 return ("fail,contents"); /* $UMEM_LOGGING setting */
475 #define FATAL_MSG_SZ 1024
480 fatal(int do_perror
, char *message
, ...)
483 int save_errno
= errno
;
486 (void) fflush(stdout
);
487 buf
= umem_alloc(FATAL_MSG_SZ
, UMEM_NOFAIL
);
489 va_start(args
, message
);
490 (void) sprintf(buf
, "ztest: ");
492 (void) vsprintf(buf
+ strlen(buf
), message
, args
);
495 (void) snprintf(buf
+ strlen(buf
), FATAL_MSG_SZ
- strlen(buf
),
496 ": %s", strerror(save_errno
));
498 (void) fprintf(stderr
, "%s\n", buf
);
499 fatal_msg
= buf
; /* to ease debugging */
506 str2shift(const char *buf
)
508 const char *ends
= "BKMGTPEZ";
513 for (i
= 0; i
< strlen(ends
); i
++) {
514 if (toupper(buf
[0]) == ends
[i
])
517 if (i
== strlen(ends
)) {
518 (void) fprintf(stderr
, "ztest: invalid bytes suffix: %s\n",
522 if (buf
[1] == '\0' || (toupper(buf
[1]) == 'B' && buf
[2] == '\0')) {
525 (void) fprintf(stderr
, "ztest: invalid bytes suffix: %s\n", buf
);
531 nicenumtoull(const char *buf
)
536 val
= strtoull(buf
, &end
, 0);
538 (void) fprintf(stderr
, "ztest: bad numeric value: %s\n", buf
);
540 } else if (end
[0] == '.') {
541 double fval
= strtod(buf
, &end
);
542 fval
*= pow(2, str2shift(end
));
543 if (fval
> UINT64_MAX
) {
544 (void) fprintf(stderr
, "ztest: value too large: %s\n",
548 val
= (uint64_t)fval
;
550 int shift
= str2shift(end
);
551 if (shift
>= 64 || (val
<< shift
) >> shift
!= val
) {
552 (void) fprintf(stderr
, "ztest: value too large: %s\n",
562 usage(boolean_t requested
)
564 const ztest_shared_opts_t
*zo
= &ztest_opts_defaults
;
566 char nice_vdev_size
[10];
567 char nice_gang_bang
[10];
568 FILE *fp
= requested
? stdout
: stderr
;
570 nicenum(zo
->zo_vdev_size
, nice_vdev_size
);
571 nicenum(zo
->zo_metaslab_gang_bang
, nice_gang_bang
);
573 (void) fprintf(fp
, "Usage: %s\n"
574 "\t[-v vdevs (default: %llu)]\n"
575 "\t[-s size_of_each_vdev (default: %s)]\n"
576 "\t[-a alignment_shift (default: %d)] use 0 for random\n"
577 "\t[-m mirror_copies (default: %d)]\n"
578 "\t[-r raidz_disks (default: %d)]\n"
579 "\t[-R raidz_parity (default: %d)]\n"
580 "\t[-d datasets (default: %d)]\n"
581 "\t[-t threads (default: %d)]\n"
582 "\t[-g gang_block_threshold (default: %s)]\n"
583 "\t[-i init_count (default: %d)] initialize pool i times\n"
584 "\t[-k kill_percentage (default: %llu%%)]\n"
585 "\t[-p pool_name (default: %s)]\n"
586 "\t[-f dir (default: %s)] file directory for vdev files\n"
587 "\t[-V] verbose (use multiple times for ever more blather)\n"
588 "\t[-E] use existing pool instead of creating new one\n"
589 "\t[-T time (default: %llu sec)] total run time\n"
590 "\t[-F freezeloops (default: %llu)] max loops in spa_freeze()\n"
591 "\t[-P passtime (default: %llu sec)] time per pass\n"
592 "\t[-B alt_ztest (default: <none>)] alternate ztest path\n"
593 "\t[-h] (print help)\n"
596 (u_longlong_t
)zo
->zo_vdevs
, /* -v */
597 nice_vdev_size
, /* -s */
598 zo
->zo_ashift
, /* -a */
599 zo
->zo_mirrors
, /* -m */
600 zo
->zo_raidz
, /* -r */
601 zo
->zo_raidz_parity
, /* -R */
602 zo
->zo_datasets
, /* -d */
603 zo
->zo_threads
, /* -t */
604 nice_gang_bang
, /* -g */
605 zo
->zo_init
, /* -i */
606 (u_longlong_t
)zo
->zo_killrate
, /* -k */
607 zo
->zo_pool
, /* -p */
609 (u_longlong_t
)zo
->zo_time
, /* -T */
610 (u_longlong_t
)zo
->zo_maxloops
, /* -F */
611 (u_longlong_t
)zo
->zo_passtime
);
612 exit(requested
? 0 : 1);
616 process_options(int argc
, char **argv
)
619 ztest_shared_opts_t
*zo
= &ztest_opts
;
623 char altdir
[MAXNAMELEN
] = { 0 };
625 bcopy(&ztest_opts_defaults
, zo
, sizeof (*zo
));
627 while ((opt
= getopt(argc
, argv
,
628 "v:s:a:m:r:R:d:t:g:i:k:p:f:VET:P:hF:B:")) != EOF
) {
645 value
= nicenumtoull(optarg
);
649 zo
->zo_vdevs
= value
;
652 zo
->zo_vdev_size
= MAX(SPA_MINDEVSIZE
, value
);
655 zo
->zo_ashift
= value
;
658 zo
->zo_mirrors
= value
;
661 zo
->zo_raidz
= MAX(1, value
);
664 zo
->zo_raidz_parity
= MIN(MAX(value
, 1), 3);
667 zo
->zo_datasets
= MAX(1, value
);
670 zo
->zo_threads
= MAX(1, value
);
673 zo
->zo_metaslab_gang_bang
= MAX(SPA_MINBLOCKSIZE
<< 1,
680 zo
->zo_killrate
= value
;
683 (void) strlcpy(zo
->zo_pool
, optarg
,
684 sizeof (zo
->zo_pool
));
687 path
= realpath(optarg
, NULL
);
689 (void) fprintf(stderr
, "error: %s: %s\n",
690 optarg
, strerror(errno
));
693 (void) strlcpy(zo
->zo_dir
, path
,
694 sizeof (zo
->zo_dir
));
707 zo
->zo_passtime
= MAX(1, value
);
710 zo
->zo_maxloops
= MAX(1, value
);
713 (void) strlcpy(altdir
, optarg
, sizeof (altdir
));
725 zo
->zo_raidz_parity
= MIN(zo
->zo_raidz_parity
, zo
->zo_raidz
- 1);
728 (zo
->zo_vdevs
> 0 ? zo
->zo_time
* NANOSEC
/ zo
->zo_vdevs
:
731 if (strlen(altdir
) > 0) {
732 char cmd
[MAXNAMELEN
];
733 char realaltdir
[MAXNAMELEN
];
739 (void) realpath(getexecname(), cmd
);
740 if (0 != access(altdir
, F_OK
)) {
741 ztest_dump_core
= B_FALSE
;
742 fatal(B_TRUE
, "invalid alternate ztest path: %s",
745 VERIFY(NULL
!= realpath(altdir
, realaltdir
));
748 * 'cmd' should be of the form "<anything>/usr/bin/<isa>/ztest".
749 * We want to extract <isa> to determine if we should use
750 * 32 or 64 bit binaries.
752 bin
= strstr(cmd
, "/usr/bin/");
753 ztest
= strstr(bin
, "/ztest");
755 isalen
= ztest
- isa
;
756 (void) snprintf(zo
->zo_alt_ztest
, sizeof (zo
->zo_alt_ztest
),
757 "%s/usr/bin/%.*s/ztest", realaltdir
, isalen
, isa
);
758 (void) snprintf(zo
->zo_alt_libpath
, sizeof (zo
->zo_alt_libpath
),
759 "%s/usr/lib/%.*s", realaltdir
, isalen
, isa
);
761 if (0 != access(zo
->zo_alt_ztest
, X_OK
)) {
762 ztest_dump_core
= B_FALSE
;
763 fatal(B_TRUE
, "invalid alternate ztest: %s",
765 } else if (0 != access(zo
->zo_alt_libpath
, X_OK
)) {
766 ztest_dump_core
= B_FALSE
;
767 fatal(B_TRUE
, "invalid alternate lib directory %s",
774 ztest_kill(ztest_shared_t
*zs
)
776 zs
->zs_alloc
= metaslab_class_get_alloc(spa_normal_class(ztest_spa
));
777 zs
->zs_space
= metaslab_class_get_space(spa_normal_class(ztest_spa
));
778 (void) kill(getpid(), SIGKILL
);
782 ztest_random(uint64_t range
)
789 if (read(ZTEST_FD_RAND
, &r
, sizeof (r
)) != sizeof (r
))
790 fatal(1, "short read from /dev/urandom");
797 ztest_record_enospc(const char *s
)
799 ztest_shared
->zs_enospc_count
++;
803 ztest_get_ashift(void)
805 if (ztest_opts
.zo_ashift
== 0)
806 return (SPA_MINBLOCKSHIFT
+ ztest_random(3));
807 return (ztest_opts
.zo_ashift
);
811 make_vdev_file(char *path
, char *aux
, size_t size
, uint64_t ashift
)
817 pathbuf
= umem_alloc(MAXPATHLEN
, UMEM_NOFAIL
);
820 ashift
= ztest_get_ashift();
826 vdev
= ztest_shared
->zs_vdev_aux
;
827 (void) snprintf(path
, MAXPATHLEN
,
828 ztest_aux_template
, ztest_opts
.zo_dir
,
829 ztest_opts
.zo_pool
, aux
, vdev
);
831 vdev
= ztest_shared
->zs_vdev_next_leaf
++;
832 (void) snprintf(path
, MAXPATHLEN
,
833 ztest_dev_template
, ztest_opts
.zo_dir
,
834 ztest_opts
.zo_pool
, vdev
);
839 int fd
= open(path
, O_RDWR
| O_CREAT
| O_TRUNC
, 0666);
841 fatal(1, "can't open %s", path
);
842 if (ftruncate(fd
, size
) != 0)
843 fatal(1, "can't ftruncate %s", path
);
847 VERIFY(nvlist_alloc(&file
, NV_UNIQUE_NAME
, 0) == 0);
848 VERIFY(nvlist_add_string(file
, ZPOOL_CONFIG_TYPE
, VDEV_TYPE_FILE
) == 0);
849 VERIFY(nvlist_add_string(file
, ZPOOL_CONFIG_PATH
, path
) == 0);
850 VERIFY(nvlist_add_uint64(file
, ZPOOL_CONFIG_ASHIFT
, ashift
) == 0);
851 umem_free(pathbuf
, MAXPATHLEN
);
857 make_vdev_raidz(char *path
, char *aux
, size_t size
, uint64_t ashift
, int r
)
859 nvlist_t
*raidz
, **child
;
863 return (make_vdev_file(path
, aux
, size
, ashift
));
864 child
= umem_alloc(r
* sizeof (nvlist_t
*), UMEM_NOFAIL
);
866 for (c
= 0; c
< r
; c
++)
867 child
[c
] = make_vdev_file(path
, aux
, size
, ashift
);
869 VERIFY(nvlist_alloc(&raidz
, NV_UNIQUE_NAME
, 0) == 0);
870 VERIFY(nvlist_add_string(raidz
, ZPOOL_CONFIG_TYPE
,
871 VDEV_TYPE_RAIDZ
) == 0);
872 VERIFY(nvlist_add_uint64(raidz
, ZPOOL_CONFIG_NPARITY
,
873 ztest_opts
.zo_raidz_parity
) == 0);
874 VERIFY(nvlist_add_nvlist_array(raidz
, ZPOOL_CONFIG_CHILDREN
,
877 for (c
= 0; c
< r
; c
++)
878 nvlist_free(child
[c
]);
880 umem_free(child
, r
* sizeof (nvlist_t
*));
886 make_vdev_mirror(char *path
, char *aux
, size_t size
, uint64_t ashift
,
889 nvlist_t
*mirror
, **child
;
893 return (make_vdev_raidz(path
, aux
, size
, ashift
, r
));
895 child
= umem_alloc(m
* sizeof (nvlist_t
*), UMEM_NOFAIL
);
897 for (c
= 0; c
< m
; c
++)
898 child
[c
] = make_vdev_raidz(path
, aux
, size
, ashift
, r
);
900 VERIFY(nvlist_alloc(&mirror
, NV_UNIQUE_NAME
, 0) == 0);
901 VERIFY(nvlist_add_string(mirror
, ZPOOL_CONFIG_TYPE
,
902 VDEV_TYPE_MIRROR
) == 0);
903 VERIFY(nvlist_add_nvlist_array(mirror
, ZPOOL_CONFIG_CHILDREN
,
906 for (c
= 0; c
< m
; c
++)
907 nvlist_free(child
[c
]);
909 umem_free(child
, m
* sizeof (nvlist_t
*));
915 make_vdev_root(char *path
, char *aux
, size_t size
, uint64_t ashift
,
916 int log
, int r
, int m
, int t
)
918 nvlist_t
*root
, **child
;
923 child
= umem_alloc(t
* sizeof (nvlist_t
*), UMEM_NOFAIL
);
925 for (c
= 0; c
< t
; c
++) {
926 child
[c
] = make_vdev_mirror(path
, aux
, size
, ashift
, r
, m
);
927 VERIFY(nvlist_add_uint64(child
[c
], ZPOOL_CONFIG_IS_LOG
,
931 VERIFY(nvlist_alloc(&root
, NV_UNIQUE_NAME
, 0) == 0);
932 VERIFY(nvlist_add_string(root
, ZPOOL_CONFIG_TYPE
, VDEV_TYPE_ROOT
) == 0);
933 VERIFY(nvlist_add_nvlist_array(root
, aux
? aux
: ZPOOL_CONFIG_CHILDREN
,
936 for (c
= 0; c
< t
; c
++)
937 nvlist_free(child
[c
]);
939 umem_free(child
, t
* sizeof (nvlist_t
*));
945 ztest_random_blocksize(void)
947 return (1 << (SPA_MINBLOCKSHIFT
+
948 ztest_random(SPA_MAXBLOCKSHIFT
- SPA_MINBLOCKSHIFT
+ 1)));
952 ztest_random_ibshift(void)
954 return (DN_MIN_INDBLKSHIFT
+
955 ztest_random(DN_MAX_INDBLKSHIFT
- DN_MIN_INDBLKSHIFT
+ 1));
959 ztest_random_vdev_top(spa_t
*spa
, boolean_t log_ok
)
962 vdev_t
*rvd
= spa
->spa_root_vdev
;
965 ASSERT(spa_config_held(spa
, SCL_ALL
, RW_READER
) != 0);
968 top
= ztest_random(rvd
->vdev_children
);
969 tvd
= rvd
->vdev_child
[top
];
970 } while (tvd
->vdev_ishole
|| (tvd
->vdev_islog
&& !log_ok
) ||
971 tvd
->vdev_mg
== NULL
|| tvd
->vdev_mg
->mg_class
== NULL
);
977 ztest_random_dsl_prop(zfs_prop_t prop
)
982 value
= zfs_prop_random_value(prop
, ztest_random(-1ULL));
983 } while (prop
== ZFS_PROP_CHECKSUM
&& value
== ZIO_CHECKSUM_OFF
);
989 ztest_dsl_prop_set_uint64(char *osname
, zfs_prop_t prop
, uint64_t value
,
992 const char *propname
= zfs_prop_to_name(prop
);
998 error
= dsl_prop_set(osname
, propname
,
999 (inherit
? ZPROP_SRC_NONE
: ZPROP_SRC_LOCAL
),
1000 sizeof (value
), 1, &value
);
1002 if (error
== ENOSPC
) {
1003 ztest_record_enospc(FTAG
);
1006 ASSERT3U(error
, ==, 0);
1008 setpoint
= umem_alloc(MAXPATHLEN
, UMEM_NOFAIL
);
1009 VERIFY3U(dsl_prop_get(osname
, propname
, sizeof (curval
),
1010 1, &curval
, setpoint
), ==, 0);
1012 if (ztest_opts
.zo_verbose
>= 6) {
1013 VERIFY(zfs_prop_index_to_string(prop
, curval
, &valname
) == 0);
1014 (void) printf("%s %s = %s at '%s'\n",
1015 osname
, propname
, valname
, setpoint
);
1017 umem_free(setpoint
, MAXPATHLEN
);
1023 ztest_spa_prop_set_uint64(zpool_prop_t prop
, uint64_t value
)
1025 spa_t
*spa
= ztest_spa
;
1026 nvlist_t
*props
= NULL
;
1029 VERIFY(nvlist_alloc(&props
, NV_UNIQUE_NAME
, 0) == 0);
1030 VERIFY(nvlist_add_uint64(props
, zpool_prop_to_name(prop
), value
) == 0);
1032 error
= spa_prop_set(spa
, props
);
1036 if (error
== ENOSPC
) {
1037 ztest_record_enospc(FTAG
);
1040 ASSERT3U(error
, ==, 0);
1046 ztest_rll_init(rll_t
*rll
)
1048 rll
->rll_writer
= NULL
;
1049 rll
->rll_readers
= 0;
1050 mutex_init(&rll
->rll_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1051 cv_init(&rll
->rll_cv
, NULL
, CV_DEFAULT
, NULL
);
1055 ztest_rll_destroy(rll_t
*rll
)
1057 ASSERT(rll
->rll_writer
== NULL
);
1058 ASSERT(rll
->rll_readers
== 0);
1059 mutex_destroy(&rll
->rll_lock
);
1060 cv_destroy(&rll
->rll_cv
);
1064 ztest_rll_lock(rll_t
*rll
, rl_type_t type
)
1066 mutex_enter(&rll
->rll_lock
);
1068 if (type
== RL_READER
) {
1069 while (rll
->rll_writer
!= NULL
)
1070 (void) cv_wait(&rll
->rll_cv
, &rll
->rll_lock
);
1073 while (rll
->rll_writer
!= NULL
|| rll
->rll_readers
)
1074 (void) cv_wait(&rll
->rll_cv
, &rll
->rll_lock
);
1075 rll
->rll_writer
= curthread
;
1078 mutex_exit(&rll
->rll_lock
);
1082 ztest_rll_unlock(rll_t
*rll
)
1084 mutex_enter(&rll
->rll_lock
);
1086 if (rll
->rll_writer
) {
1087 ASSERT(rll
->rll_readers
== 0);
1088 rll
->rll_writer
= NULL
;
1090 ASSERT(rll
->rll_readers
!= 0);
1091 ASSERT(rll
->rll_writer
== NULL
);
1095 if (rll
->rll_writer
== NULL
&& rll
->rll_readers
== 0)
1096 cv_broadcast(&rll
->rll_cv
);
1098 mutex_exit(&rll
->rll_lock
);
1102 ztest_object_lock(ztest_ds_t
*zd
, uint64_t object
, rl_type_t type
)
1104 rll_t
*rll
= &zd
->zd_object_lock
[object
& (ZTEST_OBJECT_LOCKS
- 1)];
1106 ztest_rll_lock(rll
, type
);
1110 ztest_object_unlock(ztest_ds_t
*zd
, uint64_t object
)
1112 rll_t
*rll
= &zd
->zd_object_lock
[object
& (ZTEST_OBJECT_LOCKS
- 1)];
1114 ztest_rll_unlock(rll
);
1118 ztest_range_lock(ztest_ds_t
*zd
, uint64_t object
, uint64_t offset
,
1119 uint64_t size
, rl_type_t type
)
1121 uint64_t hash
= object
^ (offset
% (ZTEST_RANGE_LOCKS
+ 1));
1122 rll_t
*rll
= &zd
->zd_range_lock
[hash
& (ZTEST_RANGE_LOCKS
- 1)];
1125 rl
= umem_alloc(sizeof (*rl
), UMEM_NOFAIL
);
1126 rl
->rl_object
= object
;
1127 rl
->rl_offset
= offset
;
1131 ztest_rll_lock(rll
, type
);
1137 ztest_range_unlock(rl_t
*rl
)
1139 rll_t
*rll
= rl
->rl_lock
;
1141 ztest_rll_unlock(rll
);
1143 umem_free(rl
, sizeof (*rl
));
1147 ztest_zd_init(ztest_ds_t
*zd
, ztest_shared_ds_t
*szd
, objset_t
*os
)
1150 zd
->zd_zilog
= dmu_objset_zil(os
);
1151 zd
->zd_shared
= szd
;
1152 dmu_objset_name(os
, zd
->zd_name
);
1155 if (zd
->zd_shared
!= NULL
)
1156 zd
->zd_shared
->zd_seq
= 0;
1158 rw_init(&zd
->zd_zilog_lock
, NULL
, RW_DEFAULT
, NULL
);
1159 mutex_init(&zd
->zd_dirobj_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1161 for (l
= 0; l
< ZTEST_OBJECT_LOCKS
; l
++)
1162 ztest_rll_init(&zd
->zd_object_lock
[l
]);
1164 for (l
= 0; l
< ZTEST_RANGE_LOCKS
; l
++)
1165 ztest_rll_init(&zd
->zd_range_lock
[l
]);
1169 ztest_zd_fini(ztest_ds_t
*zd
)
1173 mutex_destroy(&zd
->zd_dirobj_lock
);
1174 rw_destroy(&zd
->zd_zilog_lock
);
1176 for (l
= 0; l
< ZTEST_OBJECT_LOCKS
; l
++)
1177 ztest_rll_destroy(&zd
->zd_object_lock
[l
]);
1179 for (l
= 0; l
< ZTEST_RANGE_LOCKS
; l
++)
1180 ztest_rll_destroy(&zd
->zd_range_lock
[l
]);
1183 #define TXG_MIGHTWAIT (ztest_random(10) == 0 ? TXG_NOWAIT : TXG_WAIT)
1186 ztest_tx_assign(dmu_tx_t
*tx
, uint64_t txg_how
, const char *tag
)
1192 * Attempt to assign tx to some transaction group.
1194 error
= dmu_tx_assign(tx
, txg_how
);
1196 if (error
== ERESTART
) {
1197 ASSERT(txg_how
== TXG_NOWAIT
);
1200 ASSERT3U(error
, ==, ENOSPC
);
1201 ztest_record_enospc(tag
);
1206 txg
= dmu_tx_get_txg(tx
);
1212 ztest_pattern_set(void *buf
, uint64_t size
, uint64_t value
)
1215 uint64_t *ip_end
= (uint64_t *)((uintptr_t)buf
+ (uintptr_t)size
);
1223 ztest_pattern_match(void *buf
, uint64_t size
, uint64_t value
)
1226 uint64_t *ip_end
= (uint64_t *)((uintptr_t)buf
+ (uintptr_t)size
);
1230 diff
|= (value
- *ip
++);
1237 ztest_bt_generate(ztest_block_tag_t
*bt
, objset_t
*os
, uint64_t object
,
1238 uint64_t offset
, uint64_t gen
, uint64_t txg
, uint64_t crtxg
)
1240 bt
->bt_magic
= BT_MAGIC
;
1241 bt
->bt_objset
= dmu_objset_id(os
);
1242 bt
->bt_object
= object
;
1243 bt
->bt_offset
= offset
;
1246 bt
->bt_crtxg
= crtxg
;
1250 ztest_bt_verify(ztest_block_tag_t
*bt
, objset_t
*os
, uint64_t object
,
1251 uint64_t offset
, uint64_t gen
, uint64_t txg
, uint64_t crtxg
)
1253 ASSERT(bt
->bt_magic
== BT_MAGIC
);
1254 ASSERT(bt
->bt_objset
== dmu_objset_id(os
));
1255 ASSERT(bt
->bt_object
== object
);
1256 ASSERT(bt
->bt_offset
== offset
);
1257 ASSERT(bt
->bt_gen
<= gen
);
1258 ASSERT(bt
->bt_txg
<= txg
);
1259 ASSERT(bt
->bt_crtxg
== crtxg
);
1262 static ztest_block_tag_t
*
1263 ztest_bt_bonus(dmu_buf_t
*db
)
1265 dmu_object_info_t doi
;
1266 ztest_block_tag_t
*bt
;
1268 dmu_object_info_from_db(db
, &doi
);
1269 ASSERT3U(doi
.doi_bonus_size
, <=, db
->db_size
);
1270 ASSERT3U(doi
.doi_bonus_size
, >=, sizeof (*bt
));
1271 bt
= (void *)((char *)db
->db_data
+ doi
.doi_bonus_size
- sizeof (*bt
));
1280 #define lrz_type lr_mode
1281 #define lrz_blocksize lr_uid
1282 #define lrz_ibshift lr_gid
1283 #define lrz_bonustype lr_rdev
1284 #define lrz_bonuslen lr_crtime[1]
1287 ztest_log_create(ztest_ds_t
*zd
, dmu_tx_t
*tx
, lr_create_t
*lr
)
1289 char *name
= (void *)(lr
+ 1); /* name follows lr */
1290 size_t namesize
= strlen(name
) + 1;
1293 if (zil_replaying(zd
->zd_zilog
, tx
))
1296 itx
= zil_itx_create(TX_CREATE
, sizeof (*lr
) + namesize
);
1297 bcopy(&lr
->lr_common
+ 1, &itx
->itx_lr
+ 1,
1298 sizeof (*lr
) + namesize
- sizeof (lr_t
));
1300 zil_itx_assign(zd
->zd_zilog
, itx
, tx
);
1304 ztest_log_remove(ztest_ds_t
*zd
, dmu_tx_t
*tx
, lr_remove_t
*lr
, uint64_t object
)
1306 char *name
= (void *)(lr
+ 1); /* name follows lr */
1307 size_t namesize
= strlen(name
) + 1;
1310 if (zil_replaying(zd
->zd_zilog
, tx
))
1313 itx
= zil_itx_create(TX_REMOVE
, sizeof (*lr
) + namesize
);
1314 bcopy(&lr
->lr_common
+ 1, &itx
->itx_lr
+ 1,
1315 sizeof (*lr
) + namesize
- sizeof (lr_t
));
1317 itx
->itx_oid
= object
;
1318 zil_itx_assign(zd
->zd_zilog
, itx
, tx
);
1322 ztest_log_write(ztest_ds_t
*zd
, dmu_tx_t
*tx
, lr_write_t
*lr
)
1325 itx_wr_state_t write_state
= ztest_random(WR_NUM_STATES
);
1327 if (zil_replaying(zd
->zd_zilog
, tx
))
1330 if (lr
->lr_length
> ZIL_MAX_LOG_DATA
)
1331 write_state
= WR_INDIRECT
;
1333 itx
= zil_itx_create(TX_WRITE
,
1334 sizeof (*lr
) + (write_state
== WR_COPIED
? lr
->lr_length
: 0));
1336 if (write_state
== WR_COPIED
&&
1337 dmu_read(zd
->zd_os
, lr
->lr_foid
, lr
->lr_offset
, lr
->lr_length
,
1338 ((lr_write_t
*)&itx
->itx_lr
) + 1, DMU_READ_NO_PREFETCH
) != 0) {
1339 zil_itx_destroy(itx
);
1340 itx
= zil_itx_create(TX_WRITE
, sizeof (*lr
));
1341 write_state
= WR_NEED_COPY
;
1343 itx
->itx_private
= zd
;
1344 itx
->itx_wr_state
= write_state
;
1345 itx
->itx_sync
= (ztest_random(8) == 0);
1346 itx
->itx_sod
+= (write_state
== WR_NEED_COPY
? lr
->lr_length
: 0);
1348 bcopy(&lr
->lr_common
+ 1, &itx
->itx_lr
+ 1,
1349 sizeof (*lr
) - sizeof (lr_t
));
1351 zil_itx_assign(zd
->zd_zilog
, itx
, tx
);
1355 ztest_log_truncate(ztest_ds_t
*zd
, dmu_tx_t
*tx
, lr_truncate_t
*lr
)
1359 if (zil_replaying(zd
->zd_zilog
, tx
))
1362 itx
= zil_itx_create(TX_TRUNCATE
, sizeof (*lr
));
1363 bcopy(&lr
->lr_common
+ 1, &itx
->itx_lr
+ 1,
1364 sizeof (*lr
) - sizeof (lr_t
));
1366 itx
->itx_sync
= B_FALSE
;
1367 zil_itx_assign(zd
->zd_zilog
, itx
, tx
);
1371 ztest_log_setattr(ztest_ds_t
*zd
, dmu_tx_t
*tx
, lr_setattr_t
*lr
)
1375 if (zil_replaying(zd
->zd_zilog
, tx
))
1378 itx
= zil_itx_create(TX_SETATTR
, sizeof (*lr
));
1379 bcopy(&lr
->lr_common
+ 1, &itx
->itx_lr
+ 1,
1380 sizeof (*lr
) - sizeof (lr_t
));
1382 itx
->itx_sync
= B_FALSE
;
1383 zil_itx_assign(zd
->zd_zilog
, itx
, tx
);
1390 ztest_replay_create(ztest_ds_t
*zd
, lr_create_t
*lr
, boolean_t byteswap
)
1392 char *name
= (void *)(lr
+ 1); /* name follows lr */
1393 objset_t
*os
= zd
->zd_os
;
1394 ztest_block_tag_t
*bbt
;
1401 byteswap_uint64_array(lr
, sizeof (*lr
));
1403 ASSERT(lr
->lr_doid
== ZTEST_DIROBJ
);
1404 ASSERT(name
[0] != '\0');
1406 tx
= dmu_tx_create(os
);
1408 dmu_tx_hold_zap(tx
, lr
->lr_doid
, B_TRUE
, name
);
1410 if (lr
->lrz_type
== DMU_OT_ZAP_OTHER
) {
1411 dmu_tx_hold_zap(tx
, DMU_NEW_OBJECT
, B_TRUE
, NULL
);
1413 dmu_tx_hold_bonus(tx
, DMU_NEW_OBJECT
);
1416 txg
= ztest_tx_assign(tx
, TXG_WAIT
, FTAG
);
1420 ASSERT(dmu_objset_zil(os
)->zl_replay
== !!lr
->lr_foid
);
1422 if (lr
->lrz_type
== DMU_OT_ZAP_OTHER
) {
1423 if (lr
->lr_foid
== 0) {
1424 lr
->lr_foid
= zap_create(os
,
1425 lr
->lrz_type
, lr
->lrz_bonustype
,
1426 lr
->lrz_bonuslen
, tx
);
1428 error
= zap_create_claim(os
, lr
->lr_foid
,
1429 lr
->lrz_type
, lr
->lrz_bonustype
,
1430 lr
->lrz_bonuslen
, tx
);
1433 if (lr
->lr_foid
== 0) {
1434 lr
->lr_foid
= dmu_object_alloc(os
,
1435 lr
->lrz_type
, 0, lr
->lrz_bonustype
,
1436 lr
->lrz_bonuslen
, tx
);
1438 error
= dmu_object_claim(os
, lr
->lr_foid
,
1439 lr
->lrz_type
, 0, lr
->lrz_bonustype
,
1440 lr
->lrz_bonuslen
, tx
);
1445 ASSERT3U(error
, ==, EEXIST
);
1446 ASSERT(zd
->zd_zilog
->zl_replay
);
1451 ASSERT(lr
->lr_foid
!= 0);
1453 if (lr
->lrz_type
!= DMU_OT_ZAP_OTHER
)
1454 VERIFY3U(0, ==, dmu_object_set_blocksize(os
, lr
->lr_foid
,
1455 lr
->lrz_blocksize
, lr
->lrz_ibshift
, tx
));
1457 VERIFY3U(0, ==, dmu_bonus_hold(os
, lr
->lr_foid
, FTAG
, &db
));
1458 bbt
= ztest_bt_bonus(db
);
1459 dmu_buf_will_dirty(db
, tx
);
1460 ztest_bt_generate(bbt
, os
, lr
->lr_foid
, -1ULL, lr
->lr_gen
, txg
, txg
);
1461 dmu_buf_rele(db
, FTAG
);
1463 VERIFY3U(0, ==, zap_add(os
, lr
->lr_doid
, name
, sizeof (uint64_t), 1,
1466 (void) ztest_log_create(zd
, tx
, lr
);
1474 ztest_replay_remove(ztest_ds_t
*zd
, lr_remove_t
*lr
, boolean_t byteswap
)
1476 char *name
= (void *)(lr
+ 1); /* name follows lr */
1477 objset_t
*os
= zd
->zd_os
;
1478 dmu_object_info_t doi
;
1480 uint64_t object
, txg
;
1483 byteswap_uint64_array(lr
, sizeof (*lr
));
1485 ASSERT(lr
->lr_doid
== ZTEST_DIROBJ
);
1486 ASSERT(name
[0] != '\0');
1489 zap_lookup(os
, lr
->lr_doid
, name
, sizeof (object
), 1, &object
));
1490 ASSERT(object
!= 0);
1492 ztest_object_lock(zd
, object
, RL_WRITER
);
1494 VERIFY3U(0, ==, dmu_object_info(os
, object
, &doi
));
1496 tx
= dmu_tx_create(os
);
1498 dmu_tx_hold_zap(tx
, lr
->lr_doid
, B_FALSE
, name
);
1499 dmu_tx_hold_free(tx
, object
, 0, DMU_OBJECT_END
);
1501 txg
= ztest_tx_assign(tx
, TXG_WAIT
, FTAG
);
1503 ztest_object_unlock(zd
, object
);
1507 if (doi
.doi_type
== DMU_OT_ZAP_OTHER
) {
1508 VERIFY3U(0, ==, zap_destroy(os
, object
, tx
));
1510 VERIFY3U(0, ==, dmu_object_free(os
, object
, tx
));
1513 VERIFY3U(0, ==, zap_remove(os
, lr
->lr_doid
, name
, tx
));
1515 (void) ztest_log_remove(zd
, tx
, lr
, object
);
1519 ztest_object_unlock(zd
, object
);
1525 ztest_replay_write(ztest_ds_t
*zd
, lr_write_t
*lr
, boolean_t byteswap
)
1527 objset_t
*os
= zd
->zd_os
;
1528 void *data
= lr
+ 1; /* data follows lr */
1529 uint64_t offset
, length
;
1530 ztest_block_tag_t
*bt
= data
;
1531 ztest_block_tag_t
*bbt
;
1532 uint64_t gen
, txg
, lrtxg
, crtxg
;
1533 dmu_object_info_t doi
;
1536 arc_buf_t
*abuf
= NULL
;
1540 byteswap_uint64_array(lr
, sizeof (*lr
));
1542 offset
= lr
->lr_offset
;
1543 length
= lr
->lr_length
;
1545 /* If it's a dmu_sync() block, write the whole block */
1546 if (lr
->lr_common
.lrc_reclen
== sizeof (lr_write_t
)) {
1547 uint64_t blocksize
= BP_GET_LSIZE(&lr
->lr_blkptr
);
1548 if (length
< blocksize
) {
1549 offset
-= offset
% blocksize
;
1554 if (bt
->bt_magic
== BSWAP_64(BT_MAGIC
))
1555 byteswap_uint64_array(bt
, sizeof (*bt
));
1557 if (bt
->bt_magic
!= BT_MAGIC
)
1560 ztest_object_lock(zd
, lr
->lr_foid
, RL_READER
);
1561 rl
= ztest_range_lock(zd
, lr
->lr_foid
, offset
, length
, RL_WRITER
);
1563 VERIFY3U(0, ==, dmu_bonus_hold(os
, lr
->lr_foid
, FTAG
, &db
));
1565 dmu_object_info_from_db(db
, &doi
);
1567 bbt
= ztest_bt_bonus(db
);
1568 ASSERT3U(bbt
->bt_magic
, ==, BT_MAGIC
);
1570 crtxg
= bbt
->bt_crtxg
;
1571 lrtxg
= lr
->lr_common
.lrc_txg
;
1573 tx
= dmu_tx_create(os
);
1575 dmu_tx_hold_write(tx
, lr
->lr_foid
, offset
, length
);
1577 if (ztest_random(8) == 0 && length
== doi
.doi_data_block_size
&&
1578 P2PHASE(offset
, length
) == 0)
1579 abuf
= dmu_request_arcbuf(db
, length
);
1581 txg
= ztest_tx_assign(tx
, TXG_WAIT
, FTAG
);
1584 dmu_return_arcbuf(abuf
);
1585 dmu_buf_rele(db
, FTAG
);
1586 ztest_range_unlock(rl
);
1587 ztest_object_unlock(zd
, lr
->lr_foid
);
1593 * Usually, verify the old data before writing new data --
1594 * but not always, because we also want to verify correct
1595 * behavior when the data was not recently read into cache.
1597 ASSERT(offset
% doi
.doi_data_block_size
== 0);
1598 if (ztest_random(4) != 0) {
1599 int prefetch
= ztest_random(2) ?
1600 DMU_READ_PREFETCH
: DMU_READ_NO_PREFETCH
;
1601 ztest_block_tag_t rbt
;
1603 VERIFY(dmu_read(os
, lr
->lr_foid
, offset
,
1604 sizeof (rbt
), &rbt
, prefetch
) == 0);
1605 if (rbt
.bt_magic
== BT_MAGIC
) {
1606 ztest_bt_verify(&rbt
, os
, lr
->lr_foid
,
1607 offset
, gen
, txg
, crtxg
);
1612 * Writes can appear to be newer than the bonus buffer because
1613 * the ztest_get_data() callback does a dmu_read() of the
1614 * open-context data, which may be different than the data
1615 * as it was when the write was generated.
1617 if (zd
->zd_zilog
->zl_replay
) {
1618 ztest_bt_verify(bt
, os
, lr
->lr_foid
, offset
,
1619 MAX(gen
, bt
->bt_gen
), MAX(txg
, lrtxg
),
1624 * Set the bt's gen/txg to the bonus buffer's gen/txg
1625 * so that all of the usual ASSERTs will work.
1627 ztest_bt_generate(bt
, os
, lr
->lr_foid
, offset
, gen
, txg
, crtxg
);
1631 dmu_write(os
, lr
->lr_foid
, offset
, length
, data
, tx
);
1633 bcopy(data
, abuf
->b_data
, length
);
1634 dmu_assign_arcbuf(db
, offset
, abuf
, tx
);
1637 (void) ztest_log_write(zd
, tx
, lr
);
1639 dmu_buf_rele(db
, FTAG
);
1643 ztest_range_unlock(rl
);
1644 ztest_object_unlock(zd
, lr
->lr_foid
);
1650 ztest_replay_truncate(ztest_ds_t
*zd
, lr_truncate_t
*lr
, boolean_t byteswap
)
1652 objset_t
*os
= zd
->zd_os
;
1658 byteswap_uint64_array(lr
, sizeof (*lr
));
1660 ztest_object_lock(zd
, lr
->lr_foid
, RL_READER
);
1661 rl
= ztest_range_lock(zd
, lr
->lr_foid
, lr
->lr_offset
, lr
->lr_length
,
1664 tx
= dmu_tx_create(os
);
1666 dmu_tx_hold_free(tx
, lr
->lr_foid
, lr
->lr_offset
, lr
->lr_length
);
1668 txg
= ztest_tx_assign(tx
, TXG_WAIT
, FTAG
);
1670 ztest_range_unlock(rl
);
1671 ztest_object_unlock(zd
, lr
->lr_foid
);
1675 VERIFY(dmu_free_range(os
, lr
->lr_foid
, lr
->lr_offset
,
1676 lr
->lr_length
, tx
) == 0);
1678 (void) ztest_log_truncate(zd
, tx
, lr
);
1682 ztest_range_unlock(rl
);
1683 ztest_object_unlock(zd
, lr
->lr_foid
);
1689 ztest_replay_setattr(ztest_ds_t
*zd
, lr_setattr_t
*lr
, boolean_t byteswap
)
1691 objset_t
*os
= zd
->zd_os
;
1694 ztest_block_tag_t
*bbt
;
1695 uint64_t txg
, lrtxg
, crtxg
;
1698 byteswap_uint64_array(lr
, sizeof (*lr
));
1700 ztest_object_lock(zd
, lr
->lr_foid
, RL_WRITER
);
1702 VERIFY3U(0, ==, dmu_bonus_hold(os
, lr
->lr_foid
, FTAG
, &db
));
1704 tx
= dmu_tx_create(os
);
1705 dmu_tx_hold_bonus(tx
, lr
->lr_foid
);
1707 txg
= ztest_tx_assign(tx
, TXG_WAIT
, FTAG
);
1709 dmu_buf_rele(db
, FTAG
);
1710 ztest_object_unlock(zd
, lr
->lr_foid
);
1714 bbt
= ztest_bt_bonus(db
);
1715 ASSERT3U(bbt
->bt_magic
, ==, BT_MAGIC
);
1716 crtxg
= bbt
->bt_crtxg
;
1717 lrtxg
= lr
->lr_common
.lrc_txg
;
1719 if (zd
->zd_zilog
->zl_replay
) {
1720 ASSERT(lr
->lr_size
!= 0);
1721 ASSERT(lr
->lr_mode
!= 0);
1725 * Randomly change the size and increment the generation.
1727 lr
->lr_size
= (ztest_random(db
->db_size
/ sizeof (*bbt
)) + 1) *
1729 lr
->lr_mode
= bbt
->bt_gen
+ 1;
1734 * Verify that the current bonus buffer is not newer than our txg.
1736 ztest_bt_verify(bbt
, os
, lr
->lr_foid
, -1ULL, lr
->lr_mode
,
1737 MAX(txg
, lrtxg
), crtxg
);
1739 dmu_buf_will_dirty(db
, tx
);
1741 ASSERT3U(lr
->lr_size
, >=, sizeof (*bbt
));
1742 ASSERT3U(lr
->lr_size
, <=, db
->db_size
);
1743 VERIFY3U(dmu_set_bonus(db
, lr
->lr_size
, tx
), ==, 0);
1744 bbt
= ztest_bt_bonus(db
);
1746 ztest_bt_generate(bbt
, os
, lr
->lr_foid
, -1ULL, lr
->lr_mode
, txg
, crtxg
);
1748 dmu_buf_rele(db
, FTAG
);
1750 (void) ztest_log_setattr(zd
, tx
, lr
);
1754 ztest_object_unlock(zd
, lr
->lr_foid
);
1759 zil_replay_func_t
*ztest_replay_vector
[TX_MAX_TYPE
] = {
1760 NULL
, /* 0 no such transaction type */
1761 (zil_replay_func_t
*)ztest_replay_create
, /* TX_CREATE */
1762 NULL
, /* TX_MKDIR */
1763 NULL
, /* TX_MKXATTR */
1764 NULL
, /* TX_SYMLINK */
1765 (zil_replay_func_t
*)ztest_replay_remove
, /* TX_REMOVE */
1766 NULL
, /* TX_RMDIR */
1768 NULL
, /* TX_RENAME */
1769 (zil_replay_func_t
*)ztest_replay_write
, /* TX_WRITE */
1770 (zil_replay_func_t
*)ztest_replay_truncate
, /* TX_TRUNCATE */
1771 (zil_replay_func_t
*)ztest_replay_setattr
, /* TX_SETATTR */
1773 NULL
, /* TX_CREATE_ACL */
1774 NULL
, /* TX_CREATE_ATTR */
1775 NULL
, /* TX_CREATE_ACL_ATTR */
1776 NULL
, /* TX_MKDIR_ACL */
1777 NULL
, /* TX_MKDIR_ATTR */
1778 NULL
, /* TX_MKDIR_ACL_ATTR */
1779 NULL
, /* TX_WRITE2 */
1783 * ZIL get_data callbacks
1787 ztest_get_done(zgd_t
*zgd
, int error
)
1789 ztest_ds_t
*zd
= zgd
->zgd_private
;
1790 uint64_t object
= zgd
->zgd_rl
->rl_object
;
1793 dmu_buf_rele(zgd
->zgd_db
, zgd
);
1795 ztest_range_unlock(zgd
->zgd_rl
);
1796 ztest_object_unlock(zd
, object
);
1798 if (error
== 0 && zgd
->zgd_bp
)
1799 zil_add_block(zgd
->zgd_zilog
, zgd
->zgd_bp
);
1801 umem_free(zgd
, sizeof (*zgd
));
1805 ztest_get_data(void *arg
, lr_write_t
*lr
, char *buf
, zio_t
*zio
)
1807 ztest_ds_t
*zd
= arg
;
1808 objset_t
*os
= zd
->zd_os
;
1809 uint64_t object
= lr
->lr_foid
;
1810 uint64_t offset
= lr
->lr_offset
;
1811 uint64_t size
= lr
->lr_length
;
1812 blkptr_t
*bp
= &lr
->lr_blkptr
;
1813 uint64_t txg
= lr
->lr_common
.lrc_txg
;
1815 dmu_object_info_t doi
;
1820 ztest_object_lock(zd
, object
, RL_READER
);
1821 error
= dmu_bonus_hold(os
, object
, FTAG
, &db
);
1823 ztest_object_unlock(zd
, object
);
1827 crtxg
= ztest_bt_bonus(db
)->bt_crtxg
;
1829 if (crtxg
== 0 || crtxg
> txg
) {
1830 dmu_buf_rele(db
, FTAG
);
1831 ztest_object_unlock(zd
, object
);
1835 dmu_object_info_from_db(db
, &doi
);
1836 dmu_buf_rele(db
, FTAG
);
1839 zgd
= umem_zalloc(sizeof (*zgd
), UMEM_NOFAIL
);
1840 zgd
->zgd_zilog
= zd
->zd_zilog
;
1841 zgd
->zgd_private
= zd
;
1843 if (buf
!= NULL
) { /* immediate write */
1844 zgd
->zgd_rl
= ztest_range_lock(zd
, object
, offset
, size
,
1847 error
= dmu_read(os
, object
, offset
, size
, buf
,
1848 DMU_READ_NO_PREFETCH
);
1851 size
= doi
.doi_data_block_size
;
1853 offset
= P2ALIGN(offset
, size
);
1855 ASSERT(offset
< size
);
1859 zgd
->zgd_rl
= ztest_range_lock(zd
, object
, offset
, size
,
1862 error
= dmu_buf_hold(os
, object
, offset
, zgd
, &db
,
1863 DMU_READ_NO_PREFETCH
);
1869 ASSERT(db
->db_offset
== offset
);
1870 ASSERT(db
->db_size
== size
);
1872 error
= dmu_sync(zio
, lr
->lr_common
.lrc_txg
,
1873 ztest_get_done
, zgd
);
1880 ztest_get_done(zgd
, error
);
1886 ztest_lr_alloc(size_t lrsize
, char *name
)
1889 size_t namesize
= name
? strlen(name
) + 1 : 0;
1891 lr
= umem_zalloc(lrsize
+ namesize
, UMEM_NOFAIL
);
1894 bcopy(name
, lr
+ lrsize
, namesize
);
1900 ztest_lr_free(void *lr
, size_t lrsize
, char *name
)
1902 size_t namesize
= name
? strlen(name
) + 1 : 0;
1904 umem_free(lr
, lrsize
+ namesize
);
1908 * Lookup a bunch of objects. Returns the number of objects not found.
1911 ztest_lookup(ztest_ds_t
*zd
, ztest_od_t
*od
, int count
)
1917 ASSERT(mutex_held(&zd
->zd_dirobj_lock
));
1919 for (i
= 0; i
< count
; i
++, od
++) {
1921 error
= zap_lookup(zd
->zd_os
, od
->od_dir
, od
->od_name
,
1922 sizeof (uint64_t), 1, &od
->od_object
);
1924 ASSERT(error
== ENOENT
);
1925 ASSERT(od
->od_object
== 0);
1929 ztest_block_tag_t
*bbt
;
1930 dmu_object_info_t doi
;
1932 ASSERT(od
->od_object
!= 0);
1933 ASSERT(missing
== 0); /* there should be no gaps */
1935 ztest_object_lock(zd
, od
->od_object
, RL_READER
);
1936 VERIFY3U(0, ==, dmu_bonus_hold(zd
->zd_os
,
1937 od
->od_object
, FTAG
, &db
));
1938 dmu_object_info_from_db(db
, &doi
);
1939 bbt
= ztest_bt_bonus(db
);
1940 ASSERT3U(bbt
->bt_magic
, ==, BT_MAGIC
);
1941 od
->od_type
= doi
.doi_type
;
1942 od
->od_blocksize
= doi
.doi_data_block_size
;
1943 od
->od_gen
= bbt
->bt_gen
;
1944 dmu_buf_rele(db
, FTAG
);
1945 ztest_object_unlock(zd
, od
->od_object
);
1953 ztest_create(ztest_ds_t
*zd
, ztest_od_t
*od
, int count
)
1958 ASSERT(mutex_held(&zd
->zd_dirobj_lock
));
1960 for (i
= 0; i
< count
; i
++, od
++) {
1967 lr_create_t
*lr
= ztest_lr_alloc(sizeof (*lr
), od
->od_name
);
1969 lr
->lr_doid
= od
->od_dir
;
1970 lr
->lr_foid
= 0; /* 0 to allocate, > 0 to claim */
1971 lr
->lrz_type
= od
->od_crtype
;
1972 lr
->lrz_blocksize
= od
->od_crblocksize
;
1973 lr
->lrz_ibshift
= ztest_random_ibshift();
1974 lr
->lrz_bonustype
= DMU_OT_UINT64_OTHER
;
1975 lr
->lrz_bonuslen
= dmu_bonus_max();
1976 lr
->lr_gen
= od
->od_crgen
;
1977 lr
->lr_crtime
[0] = time(NULL
);
1979 if (ztest_replay_create(zd
, lr
, B_FALSE
) != 0) {
1980 ASSERT(missing
== 0);
1984 od
->od_object
= lr
->lr_foid
;
1985 od
->od_type
= od
->od_crtype
;
1986 od
->od_blocksize
= od
->od_crblocksize
;
1987 od
->od_gen
= od
->od_crgen
;
1988 ASSERT(od
->od_object
!= 0);
1991 ztest_lr_free(lr
, sizeof (*lr
), od
->od_name
);
1998 ztest_remove(ztest_ds_t
*zd
, ztest_od_t
*od
, int count
)
2004 ASSERT(mutex_held(&zd
->zd_dirobj_lock
));
2008 for (i
= count
- 1; i
>= 0; i
--, od
--) {
2014 if (od
->od_object
== 0)
2017 lr_remove_t
*lr
= ztest_lr_alloc(sizeof (*lr
), od
->od_name
);
2019 lr
->lr_doid
= od
->od_dir
;
2021 if ((error
= ztest_replay_remove(zd
, lr
, B_FALSE
)) != 0) {
2022 ASSERT3U(error
, ==, ENOSPC
);
2027 ztest_lr_free(lr
, sizeof (*lr
), od
->od_name
);
2034 ztest_write(ztest_ds_t
*zd
, uint64_t object
, uint64_t offset
, uint64_t size
,
2040 lr
= ztest_lr_alloc(sizeof (*lr
) + size
, NULL
);
2042 lr
->lr_foid
= object
;
2043 lr
->lr_offset
= offset
;
2044 lr
->lr_length
= size
;
2046 BP_ZERO(&lr
->lr_blkptr
);
2048 bcopy(data
, lr
+ 1, size
);
2050 error
= ztest_replay_write(zd
, lr
, B_FALSE
);
2052 ztest_lr_free(lr
, sizeof (*lr
) + size
, NULL
);
2058 ztest_truncate(ztest_ds_t
*zd
, uint64_t object
, uint64_t offset
, uint64_t size
)
2063 lr
= ztest_lr_alloc(sizeof (*lr
), NULL
);
2065 lr
->lr_foid
= object
;
2066 lr
->lr_offset
= offset
;
2067 lr
->lr_length
= size
;
2069 error
= ztest_replay_truncate(zd
, lr
, B_FALSE
);
2071 ztest_lr_free(lr
, sizeof (*lr
), NULL
);
2077 ztest_setattr(ztest_ds_t
*zd
, uint64_t object
)
2082 lr
= ztest_lr_alloc(sizeof (*lr
), NULL
);
2084 lr
->lr_foid
= object
;
2088 error
= ztest_replay_setattr(zd
, lr
, B_FALSE
);
2090 ztest_lr_free(lr
, sizeof (*lr
), NULL
);
2096 ztest_prealloc(ztest_ds_t
*zd
, uint64_t object
, uint64_t offset
, uint64_t size
)
2098 objset_t
*os
= zd
->zd_os
;
2103 txg_wait_synced(dmu_objset_pool(os
), 0);
2105 ztest_object_lock(zd
, object
, RL_READER
);
2106 rl
= ztest_range_lock(zd
, object
, offset
, size
, RL_WRITER
);
2108 tx
= dmu_tx_create(os
);
2110 dmu_tx_hold_write(tx
, object
, offset
, size
);
2112 txg
= ztest_tx_assign(tx
, TXG_WAIT
, FTAG
);
2115 dmu_prealloc(os
, object
, offset
, size
, tx
);
2117 txg_wait_synced(dmu_objset_pool(os
), txg
);
2119 (void) dmu_free_long_range(os
, object
, offset
, size
);
2122 ztest_range_unlock(rl
);
2123 ztest_object_unlock(zd
, object
);
2127 ztest_io(ztest_ds_t
*zd
, uint64_t object
, uint64_t offset
)
2129 ztest_block_tag_t wbt
;
2130 dmu_object_info_t doi
;
2131 enum ztest_io_type io_type
;
2135 VERIFY(dmu_object_info(zd
->zd_os
, object
, &doi
) == 0);
2136 blocksize
= doi
.doi_data_block_size
;
2137 data
= umem_alloc(blocksize
, UMEM_NOFAIL
);
2140 * Pick an i/o type at random, biased toward writing block tags.
2142 io_type
= ztest_random(ZTEST_IO_TYPES
);
2143 if (ztest_random(2) == 0)
2144 io_type
= ZTEST_IO_WRITE_TAG
;
2146 (void) rw_enter(&zd
->zd_zilog_lock
, RW_READER
);
2150 case ZTEST_IO_WRITE_TAG
:
2151 ztest_bt_generate(&wbt
, zd
->zd_os
, object
, offset
, 0, 0, 0);
2152 (void) ztest_write(zd
, object
, offset
, sizeof (wbt
), &wbt
);
2155 case ZTEST_IO_WRITE_PATTERN
:
2156 (void) memset(data
, 'a' + (object
+ offset
) % 5, blocksize
);
2157 if (ztest_random(2) == 0) {
2159 * Induce fletcher2 collisions to ensure that
2160 * zio_ddt_collision() detects and resolves them
2161 * when using fletcher2-verify for deduplication.
2163 ((uint64_t *)data
)[0] ^= 1ULL << 63;
2164 ((uint64_t *)data
)[4] ^= 1ULL << 63;
2166 (void) ztest_write(zd
, object
, offset
, blocksize
, data
);
2169 case ZTEST_IO_WRITE_ZEROES
:
2170 bzero(data
, blocksize
);
2171 (void) ztest_write(zd
, object
, offset
, blocksize
, data
);
2174 case ZTEST_IO_TRUNCATE
:
2175 (void) ztest_truncate(zd
, object
, offset
, blocksize
);
2178 case ZTEST_IO_SETATTR
:
2179 (void) ztest_setattr(zd
, object
);
2185 (void) rw_exit(&zd
->zd_zilog_lock
);
2187 umem_free(data
, blocksize
);
2191 * Initialize an object description template.
2194 ztest_od_init(ztest_od_t
*od
, uint64_t id
, char *tag
, uint64_t index
,
2195 dmu_object_type_t type
, uint64_t blocksize
, uint64_t gen
)
2197 od
->od_dir
= ZTEST_DIROBJ
;
2200 od
->od_crtype
= type
;
2201 od
->od_crblocksize
= blocksize
? blocksize
: ztest_random_blocksize();
2204 od
->od_type
= DMU_OT_NONE
;
2205 od
->od_blocksize
= 0;
2208 (void) snprintf(od
->od_name
, sizeof (od
->od_name
), "%s(%lld)[%llu]",
2209 tag
, (longlong_t
)id
, (u_longlong_t
)index
);
2213 * Lookup or create the objects for a test using the od template.
2214 * If the objects do not all exist, or if 'remove' is specified,
2215 * remove any existing objects and create new ones. Otherwise,
2216 * use the existing objects.
2219 ztest_object_init(ztest_ds_t
*zd
, ztest_od_t
*od
, size_t size
, boolean_t remove
)
2221 int count
= size
/ sizeof (*od
);
2224 mutex_enter(&zd
->zd_dirobj_lock
);
2225 if ((ztest_lookup(zd
, od
, count
) != 0 || remove
) &&
2226 (ztest_remove(zd
, od
, count
) != 0 ||
2227 ztest_create(zd
, od
, count
) != 0))
2230 mutex_exit(&zd
->zd_dirobj_lock
);
2237 ztest_zil_commit(ztest_ds_t
*zd
, uint64_t id
)
2239 zilog_t
*zilog
= zd
->zd_zilog
;
2241 (void) rw_enter(&zd
->zd_zilog_lock
, RW_READER
);
2243 zil_commit(zilog
, ztest_random(ZTEST_OBJECTS
));
2246 * Remember the committed values in zd, which is in parent/child
2247 * shared memory. If we die, the next iteration of ztest_run()
2248 * will verify that the log really does contain this record.
2250 mutex_enter(&zilog
->zl_lock
);
2251 ASSERT(zd
->zd_shared
!= NULL
);
2252 ASSERT3U(zd
->zd_shared
->zd_seq
, <=, zilog
->zl_commit_lr_seq
);
2253 zd
->zd_shared
->zd_seq
= zilog
->zl_commit_lr_seq
;
2254 mutex_exit(&zilog
->zl_lock
);
2256 (void) rw_exit(&zd
->zd_zilog_lock
);
2260 * This function is designed to simulate the operations that occur during a
2261 * mount/unmount operation. We hold the dataset across these operations in an
2262 * attempt to expose any implicit assumptions about ZIL management.
2266 ztest_zil_remount(ztest_ds_t
*zd
, uint64_t id
)
2268 objset_t
*os
= zd
->zd_os
;
2270 (void) rw_enter(&zd
->zd_zilog_lock
, RW_WRITER
);
2272 /* zfs_sb_teardown() */
2273 zil_close(zd
->zd_zilog
);
2275 /* zfsvfs_setup() */
2276 VERIFY(zil_open(os
, ztest_get_data
) == zd
->zd_zilog
);
2277 zil_replay(os
, zd
, ztest_replay_vector
);
2279 (void) rw_exit(&zd
->zd_zilog_lock
);
2283 * Verify that we can't destroy an active pool, create an existing pool,
2284 * or create a pool with a bad vdev spec.
2288 ztest_spa_create_destroy(ztest_ds_t
*zd
, uint64_t id
)
2290 ztest_shared_opts_t
*zo
= &ztest_opts
;
2295 * Attempt to create using a bad file.
2297 nvroot
= make_vdev_root("/dev/bogus", NULL
, 0, 0, 0, 0, 0, 1);
2298 VERIFY3U(ENOENT
, ==,
2299 spa_create("ztest_bad_file", nvroot
, NULL
, NULL
, NULL
));
2300 nvlist_free(nvroot
);
2303 * Attempt to create using a bad mirror.
2305 nvroot
= make_vdev_root("/dev/bogus", NULL
, 0, 0, 0, 0, 2, 1);
2306 VERIFY3U(ENOENT
, ==,
2307 spa_create("ztest_bad_mirror", nvroot
, NULL
, NULL
, NULL
));
2308 nvlist_free(nvroot
);
2311 * Attempt to create an existing pool. It shouldn't matter
2312 * what's in the nvroot; we should fail with EEXIST.
2314 (void) rw_enter(&ztest_name_lock
, RW_READER
);
2315 nvroot
= make_vdev_root("/dev/bogus", NULL
, 0, 0, 0, 0, 0, 1);
2316 VERIFY3U(EEXIST
, ==, spa_create(zo
->zo_pool
, nvroot
, NULL
, NULL
, NULL
));
2317 nvlist_free(nvroot
);
2318 VERIFY3U(0, ==, spa_open(zo
->zo_pool
, &spa
, FTAG
));
2319 VERIFY3U(EBUSY
, ==, spa_destroy(zo
->zo_pool
));
2320 spa_close(spa
, FTAG
);
2322 (void) rw_exit(&ztest_name_lock
);
2326 vdev_lookup_by_path(vdev_t
*vd
, const char *path
)
2331 if (vd
->vdev_path
!= NULL
&& strcmp(path
, vd
->vdev_path
) == 0)
2334 for (c
= 0; c
< vd
->vdev_children
; c
++)
2335 if ((mvd
= vdev_lookup_by_path(vd
->vdev_child
[c
], path
)) !=
2343 * Find the first available hole which can be used as a top-level.
2346 find_vdev_hole(spa_t
*spa
)
2348 vdev_t
*rvd
= spa
->spa_root_vdev
;
2351 ASSERT(spa_config_held(spa
, SCL_VDEV
, RW_READER
) == SCL_VDEV
);
2353 for (c
= 0; c
< rvd
->vdev_children
; c
++) {
2354 vdev_t
*cvd
= rvd
->vdev_child
[c
];
2356 if (cvd
->vdev_ishole
)
2363 * Verify that vdev_add() works as expected.
2367 ztest_vdev_add_remove(ztest_ds_t
*zd
, uint64_t id
)
2369 ztest_shared_t
*zs
= ztest_shared
;
2370 spa_t
*spa
= ztest_spa
;
2376 mutex_enter(&ztest_vdev_lock
);
2378 MAX(zs
->zs_mirrors
+ zs
->zs_splits
, 1) * ztest_opts
.zo_raidz
;
2380 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
2382 ztest_shared
->zs_vdev_next_leaf
= find_vdev_hole(spa
) * leaves
;
2385 * If we have slogs then remove them 1/4 of the time.
2387 if (spa_has_slogs(spa
) && ztest_random(4) == 0) {
2389 * Grab the guid from the head of the log class rotor.
2391 guid
= spa_log_class(spa
)->mc_rotor
->mg_vd
->vdev_guid
;
2393 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
2396 * We have to grab the zs_name_lock as writer to
2397 * prevent a race between removing a slog (dmu_objset_find)
2398 * and destroying a dataset. Removing the slog will
2399 * grab a reference on the dataset which may cause
2400 * dmu_objset_destroy() to fail with EBUSY thus
2401 * leaving the dataset in an inconsistent state.
2403 rw_enter(&ztest_name_lock
, RW_WRITER
);
2404 error
= spa_vdev_remove(spa
, guid
, B_FALSE
);
2405 rw_exit(&ztest_name_lock
);
2407 if (error
&& error
!= EEXIST
)
2408 fatal(0, "spa_vdev_remove() = %d", error
);
2410 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
2413 * Make 1/4 of the devices be log devices.
2415 nvroot
= make_vdev_root(NULL
, NULL
,
2416 ztest_opts
.zo_vdev_size
, 0,
2417 ztest_random(4) == 0, ztest_opts
.zo_raidz
,
2420 error
= spa_vdev_add(spa
, nvroot
);
2421 nvlist_free(nvroot
);
2423 if (error
== ENOSPC
)
2424 ztest_record_enospc("spa_vdev_add");
2425 else if (error
!= 0)
2426 fatal(0, "spa_vdev_add() = %d", error
);
2429 mutex_exit(&ztest_vdev_lock
);
2433 * Verify that adding/removing aux devices (l2arc, hot spare) works as expected.
2437 ztest_vdev_aux_add_remove(ztest_ds_t
*zd
, uint64_t id
)
2439 ztest_shared_t
*zs
= ztest_shared
;
2440 spa_t
*spa
= ztest_spa
;
2441 vdev_t
*rvd
= spa
->spa_root_vdev
;
2442 spa_aux_vdev_t
*sav
;
2448 path
= umem_alloc(MAXPATHLEN
, UMEM_NOFAIL
);
2450 if (ztest_random(2) == 0) {
2451 sav
= &spa
->spa_spares
;
2452 aux
= ZPOOL_CONFIG_SPARES
;
2454 sav
= &spa
->spa_l2cache
;
2455 aux
= ZPOOL_CONFIG_L2CACHE
;
2458 mutex_enter(&ztest_vdev_lock
);
2460 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
2462 if (sav
->sav_count
!= 0 && ztest_random(4) == 0) {
2464 * Pick a random device to remove.
2466 guid
= sav
->sav_vdevs
[ztest_random(sav
->sav_count
)]->vdev_guid
;
2469 * Find an unused device we can add.
2471 zs
->zs_vdev_aux
= 0;
2474 (void) snprintf(path
, sizeof (path
), ztest_aux_template
,
2475 ztest_opts
.zo_dir
, ztest_opts
.zo_pool
, aux
,
2477 for (c
= 0; c
< sav
->sav_count
; c
++)
2478 if (strcmp(sav
->sav_vdevs
[c
]->vdev_path
,
2481 if (c
== sav
->sav_count
&&
2482 vdev_lookup_by_path(rvd
, path
) == NULL
)
2488 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
2494 nvlist_t
*nvroot
= make_vdev_root(NULL
, aux
,
2495 (ztest_opts
.zo_vdev_size
* 5) / 4, 0, 0, 0, 0, 1);
2496 error
= spa_vdev_add(spa
, nvroot
);
2498 fatal(0, "spa_vdev_add(%p) = %d", nvroot
, error
);
2499 nvlist_free(nvroot
);
2502 * Remove an existing device. Sometimes, dirty its
2503 * vdev state first to make sure we handle removal
2504 * of devices that have pending state changes.
2506 if (ztest_random(2) == 0)
2507 (void) vdev_online(spa
, guid
, 0, NULL
);
2509 error
= spa_vdev_remove(spa
, guid
, B_FALSE
);
2510 if (error
!= 0 && error
!= EBUSY
)
2511 fatal(0, "spa_vdev_remove(%llu) = %d", guid
, error
);
2514 mutex_exit(&ztest_vdev_lock
);
2516 umem_free(path
, MAXPATHLEN
);
2520 * split a pool if it has mirror tlvdevs
2524 ztest_split_pool(ztest_ds_t
*zd
, uint64_t id
)
2526 ztest_shared_t
*zs
= ztest_shared
;
2527 spa_t
*spa
= ztest_spa
;
2528 vdev_t
*rvd
= spa
->spa_root_vdev
;
2529 nvlist_t
*tree
, **child
, *config
, *split
, **schild
;
2530 uint_t c
, children
, schildren
= 0, lastlogid
= 0;
2533 mutex_enter(&ztest_vdev_lock
);
2535 /* ensure we have a useable config; mirrors of raidz aren't supported */
2536 if (zs
->zs_mirrors
< 3 || ztest_opts
.zo_raidz
> 1) {
2537 mutex_exit(&ztest_vdev_lock
);
2541 /* clean up the old pool, if any */
2542 (void) spa_destroy("splitp");
2544 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
2546 /* generate a config from the existing config */
2547 mutex_enter(&spa
->spa_props_lock
);
2548 VERIFY(nvlist_lookup_nvlist(spa
->spa_config
, ZPOOL_CONFIG_VDEV_TREE
,
2550 mutex_exit(&spa
->spa_props_lock
);
2552 VERIFY(nvlist_lookup_nvlist_array(tree
, ZPOOL_CONFIG_CHILDREN
, &child
,
2555 schild
= malloc(rvd
->vdev_children
* sizeof (nvlist_t
*));
2556 for (c
= 0; c
< children
; c
++) {
2557 vdev_t
*tvd
= rvd
->vdev_child
[c
];
2561 if (tvd
->vdev_islog
|| tvd
->vdev_ops
== &vdev_hole_ops
) {
2562 VERIFY(nvlist_alloc(&schild
[schildren
], NV_UNIQUE_NAME
,
2564 VERIFY(nvlist_add_string(schild
[schildren
],
2565 ZPOOL_CONFIG_TYPE
, VDEV_TYPE_HOLE
) == 0);
2566 VERIFY(nvlist_add_uint64(schild
[schildren
],
2567 ZPOOL_CONFIG_IS_HOLE
, 1) == 0);
2569 lastlogid
= schildren
;
2574 VERIFY(nvlist_lookup_nvlist_array(child
[c
],
2575 ZPOOL_CONFIG_CHILDREN
, &mchild
, &mchildren
) == 0);
2576 VERIFY(nvlist_dup(mchild
[0], &schild
[schildren
++], 0) == 0);
2579 /* OK, create a config that can be used to split */
2580 VERIFY(nvlist_alloc(&split
, NV_UNIQUE_NAME
, 0) == 0);
2581 VERIFY(nvlist_add_string(split
, ZPOOL_CONFIG_TYPE
,
2582 VDEV_TYPE_ROOT
) == 0);
2583 VERIFY(nvlist_add_nvlist_array(split
, ZPOOL_CONFIG_CHILDREN
, schild
,
2584 lastlogid
!= 0 ? lastlogid
: schildren
) == 0);
2586 VERIFY(nvlist_alloc(&config
, NV_UNIQUE_NAME
, 0) == 0);
2587 VERIFY(nvlist_add_nvlist(config
, ZPOOL_CONFIG_VDEV_TREE
, split
) == 0);
2589 for (c
= 0; c
< schildren
; c
++)
2590 nvlist_free(schild
[c
]);
2594 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
2596 (void) rw_enter(&ztest_name_lock
, RW_WRITER
);
2597 error
= spa_vdev_split_mirror(spa
, "splitp", config
, NULL
, B_FALSE
);
2598 (void) rw_exit(&ztest_name_lock
);
2600 nvlist_free(config
);
2603 (void) printf("successful split - results:\n");
2604 mutex_enter(&spa_namespace_lock
);
2605 show_pool_stats(spa
);
2606 show_pool_stats(spa_lookup("splitp"));
2607 mutex_exit(&spa_namespace_lock
);
2611 mutex_exit(&ztest_vdev_lock
);
2616 * Verify that we can attach and detach devices.
2620 ztest_vdev_attach_detach(ztest_ds_t
*zd
, uint64_t id
)
2622 ztest_shared_t
*zs
= ztest_shared
;
2623 spa_t
*spa
= ztest_spa
;
2624 spa_aux_vdev_t
*sav
= &spa
->spa_spares
;
2625 vdev_t
*rvd
= spa
->spa_root_vdev
;
2626 vdev_t
*oldvd
, *newvd
, *pvd
;
2630 uint64_t ashift
= ztest_get_ashift();
2631 uint64_t oldguid
, pguid
;
2632 size_t oldsize
, newsize
;
2633 char *oldpath
, *newpath
;
2635 int oldvd_has_siblings
= B_FALSE
;
2636 int newvd_is_spare
= B_FALSE
;
2638 int error
, expected_error
;
2640 oldpath
= umem_alloc(MAXPATHLEN
, UMEM_NOFAIL
);
2641 newpath
= umem_alloc(MAXPATHLEN
, UMEM_NOFAIL
);
2643 mutex_enter(&ztest_vdev_lock
);
2644 leaves
= MAX(zs
->zs_mirrors
, 1) * ztest_opts
.zo_raidz
;
2646 spa_config_enter(spa
, SCL_VDEV
, FTAG
, RW_READER
);
2649 * Decide whether to do an attach or a replace.
2651 replacing
= ztest_random(2);
2654 * Pick a random top-level vdev.
2656 top
= ztest_random_vdev_top(spa
, B_TRUE
);
2659 * Pick a random leaf within it.
2661 leaf
= ztest_random(leaves
);
2666 oldvd
= rvd
->vdev_child
[top
];
2667 if (zs
->zs_mirrors
>= 1) {
2668 ASSERT(oldvd
->vdev_ops
== &vdev_mirror_ops
);
2669 ASSERT(oldvd
->vdev_children
>= zs
->zs_mirrors
);
2670 oldvd
= oldvd
->vdev_child
[leaf
/ ztest_opts
.zo_raidz
];
2672 if (ztest_opts
.zo_raidz
> 1) {
2673 ASSERT(oldvd
->vdev_ops
== &vdev_raidz_ops
);
2674 ASSERT(oldvd
->vdev_children
== ztest_opts
.zo_raidz
);
2675 oldvd
= oldvd
->vdev_child
[leaf
% ztest_opts
.zo_raidz
];
2679 * If we're already doing an attach or replace, oldvd may be a
2680 * mirror vdev -- in which case, pick a random child.
2682 while (oldvd
->vdev_children
!= 0) {
2683 oldvd_has_siblings
= B_TRUE
;
2684 ASSERT(oldvd
->vdev_children
>= 2);
2685 oldvd
= oldvd
->vdev_child
[ztest_random(oldvd
->vdev_children
)];
2688 oldguid
= oldvd
->vdev_guid
;
2689 oldsize
= vdev_get_min_asize(oldvd
);
2690 oldvd_is_log
= oldvd
->vdev_top
->vdev_islog
;
2691 (void) strcpy(oldpath
, oldvd
->vdev_path
);
2692 pvd
= oldvd
->vdev_parent
;
2693 pguid
= pvd
->vdev_guid
;
2696 * If oldvd has siblings, then half of the time, detach it.
2698 if (oldvd_has_siblings
&& ztest_random(2) == 0) {
2699 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
2700 error
= spa_vdev_detach(spa
, oldguid
, pguid
, B_FALSE
);
2701 if (error
!= 0 && error
!= ENODEV
&& error
!= EBUSY
&&
2703 fatal(0, "detach (%s) returned %d", oldpath
, error
);
2708 * For the new vdev, choose with equal probability between the two
2709 * standard paths (ending in either 'a' or 'b') or a random hot spare.
2711 if (sav
->sav_count
!= 0 && ztest_random(3) == 0) {
2712 newvd
= sav
->sav_vdevs
[ztest_random(sav
->sav_count
)];
2713 newvd_is_spare
= B_TRUE
;
2714 (void) strcpy(newpath
, newvd
->vdev_path
);
2716 (void) snprintf(newpath
, MAXPATHLEN
, ztest_dev_template
,
2717 ztest_opts
.zo_dir
, ztest_opts
.zo_pool
,
2718 top
* leaves
+ leaf
);
2719 if (ztest_random(2) == 0)
2720 newpath
[strlen(newpath
) - 1] = 'b';
2721 newvd
= vdev_lookup_by_path(rvd
, newpath
);
2725 newsize
= vdev_get_min_asize(newvd
);
2728 * Make newsize a little bigger or smaller than oldsize.
2729 * If it's smaller, the attach should fail.
2730 * If it's larger, and we're doing a replace,
2731 * we should get dynamic LUN growth when we're done.
2733 newsize
= 10 * oldsize
/ (9 + ztest_random(3));
2737 * If pvd is not a mirror or root, the attach should fail with ENOTSUP,
2738 * unless it's a replace; in that case any non-replacing parent is OK.
2740 * If newvd is already part of the pool, it should fail with EBUSY.
2742 * If newvd is too small, it should fail with EOVERFLOW.
2744 if (pvd
->vdev_ops
!= &vdev_mirror_ops
&&
2745 pvd
->vdev_ops
!= &vdev_root_ops
&& (!replacing
||
2746 pvd
->vdev_ops
== &vdev_replacing_ops
||
2747 pvd
->vdev_ops
== &vdev_spare_ops
))
2748 expected_error
= ENOTSUP
;
2749 else if (newvd_is_spare
&& (!replacing
|| oldvd_is_log
))
2750 expected_error
= ENOTSUP
;
2751 else if (newvd
== oldvd
)
2752 expected_error
= replacing
? 0 : EBUSY
;
2753 else if (vdev_lookup_by_path(rvd
, newpath
) != NULL
)
2754 expected_error
= EBUSY
;
2755 else if (newsize
< oldsize
)
2756 expected_error
= EOVERFLOW
;
2757 else if (ashift
> oldvd
->vdev_top
->vdev_ashift
)
2758 expected_error
= EDOM
;
2762 spa_config_exit(spa
, SCL_VDEV
, FTAG
);
2765 * Build the nvlist describing newpath.
2767 root
= make_vdev_root(newpath
, NULL
, newvd
== NULL
? newsize
: 0,
2768 ashift
, 0, 0, 0, 1);
2770 error
= spa_vdev_attach(spa
, oldguid
, root
, replacing
);
2775 * If our parent was the replacing vdev, but the replace completed,
2776 * then instead of failing with ENOTSUP we may either succeed,
2777 * fail with ENODEV, or fail with EOVERFLOW.
2779 if (expected_error
== ENOTSUP
&&
2780 (error
== 0 || error
== ENODEV
|| error
== EOVERFLOW
))
2781 expected_error
= error
;
2784 * If someone grew the LUN, the replacement may be too small.
2786 if (error
== EOVERFLOW
|| error
== EBUSY
)
2787 expected_error
= error
;
2789 /* XXX workaround 6690467 */
2790 if (error
!= expected_error
&& expected_error
!= EBUSY
) {
2791 fatal(0, "attach (%s %llu, %s %llu, %d) "
2792 "returned %d, expected %d",
2793 oldpath
, (longlong_t
)oldsize
, newpath
,
2794 (longlong_t
)newsize
, replacing
, error
, expected_error
);
2797 mutex_exit(&ztest_vdev_lock
);
2799 umem_free(oldpath
, MAXPATHLEN
);
2800 umem_free(newpath
, MAXPATHLEN
);
2804 * Callback function which expands the physical size of the vdev.
2807 grow_vdev(vdev_t
*vd
, void *arg
)
2809 ASSERTV(spa_t
*spa
= vd
->vdev_spa
);
2810 size_t *newsize
= arg
;
2814 ASSERT(spa_config_held(spa
, SCL_STATE
, RW_READER
) == SCL_STATE
);
2815 ASSERT(vd
->vdev_ops
->vdev_op_leaf
);
2817 if ((fd
= open(vd
->vdev_path
, O_RDWR
)) == -1)
2820 fsize
= lseek(fd
, 0, SEEK_END
);
2821 VERIFY(ftruncate(fd
, *newsize
) == 0);
2823 if (ztest_opts
.zo_verbose
>= 6) {
2824 (void) printf("%s grew from %lu to %lu bytes\n",
2825 vd
->vdev_path
, (ulong_t
)fsize
, (ulong_t
)*newsize
);
2832 * Callback function which expands a given vdev by calling vdev_online().
2836 online_vdev(vdev_t
*vd
, void *arg
)
2838 spa_t
*spa
= vd
->vdev_spa
;
2839 vdev_t
*tvd
= vd
->vdev_top
;
2840 uint64_t guid
= vd
->vdev_guid
;
2841 uint64_t generation
= spa
->spa_config_generation
+ 1;
2842 vdev_state_t newstate
= VDEV_STATE_UNKNOWN
;
2845 ASSERT(spa_config_held(spa
, SCL_STATE
, RW_READER
) == SCL_STATE
);
2846 ASSERT(vd
->vdev_ops
->vdev_op_leaf
);
2848 /* Calling vdev_online will initialize the new metaslabs */
2849 spa_config_exit(spa
, SCL_STATE
, spa
);
2850 error
= vdev_online(spa
, guid
, ZFS_ONLINE_EXPAND
, &newstate
);
2851 spa_config_enter(spa
, SCL_STATE
, spa
, RW_READER
);
2854 * If vdev_online returned an error or the underlying vdev_open
2855 * failed then we abort the expand. The only way to know that
2856 * vdev_open fails is by checking the returned newstate.
2858 if (error
|| newstate
!= VDEV_STATE_HEALTHY
) {
2859 if (ztest_opts
.zo_verbose
>= 5) {
2860 (void) printf("Unable to expand vdev, state %llu, "
2861 "error %d\n", (u_longlong_t
)newstate
, error
);
2865 ASSERT3U(newstate
, ==, VDEV_STATE_HEALTHY
);
2868 * Since we dropped the lock we need to ensure that we're
2869 * still talking to the original vdev. It's possible this
2870 * vdev may have been detached/replaced while we were
2871 * trying to online it.
2873 if (generation
!= spa
->spa_config_generation
) {
2874 if (ztest_opts
.zo_verbose
>= 5) {
2875 (void) printf("vdev configuration has changed, "
2876 "guid %llu, state %llu, expected gen %llu, "
2879 (u_longlong_t
)tvd
->vdev_state
,
2880 (u_longlong_t
)generation
,
2881 (u_longlong_t
)spa
->spa_config_generation
);
2889 * Traverse the vdev tree calling the supplied function.
2890 * We continue to walk the tree until we either have walked all
2891 * children or we receive a non-NULL return from the callback.
2892 * If a NULL callback is passed, then we just return back the first
2893 * leaf vdev we encounter.
2896 vdev_walk_tree(vdev_t
*vd
, vdev_t
*(*func
)(vdev_t
*, void *), void *arg
)
2900 if (vd
->vdev_ops
->vdev_op_leaf
) {
2904 return (func(vd
, arg
));
2907 for (c
= 0; c
< vd
->vdev_children
; c
++) {
2908 vdev_t
*cvd
= vd
->vdev_child
[c
];
2909 if ((cvd
= vdev_walk_tree(cvd
, func
, arg
)) != NULL
)
2916 * Verify that dynamic LUN growth works as expected.
2920 ztest_vdev_LUN_growth(ztest_ds_t
*zd
, uint64_t id
)
2922 spa_t
*spa
= ztest_spa
;
2924 metaslab_class_t
*mc
;
2925 metaslab_group_t
*mg
;
2926 size_t psize
, newsize
;
2928 uint64_t old_class_space
, new_class_space
, old_ms_count
, new_ms_count
;
2930 mutex_enter(&ztest_vdev_lock
);
2931 spa_config_enter(spa
, SCL_STATE
, spa
, RW_READER
);
2933 top
= ztest_random_vdev_top(spa
, B_TRUE
);
2935 tvd
= spa
->spa_root_vdev
->vdev_child
[top
];
2938 old_ms_count
= tvd
->vdev_ms_count
;
2939 old_class_space
= metaslab_class_get_space(mc
);
2942 * Determine the size of the first leaf vdev associated with
2943 * our top-level device.
2945 vd
= vdev_walk_tree(tvd
, NULL
, NULL
);
2946 ASSERT3P(vd
, !=, NULL
);
2947 ASSERT(vd
->vdev_ops
->vdev_op_leaf
);
2949 psize
= vd
->vdev_psize
;
2952 * We only try to expand the vdev if it's healthy, less than 4x its
2953 * original size, and it has a valid psize.
2955 if (tvd
->vdev_state
!= VDEV_STATE_HEALTHY
||
2956 psize
== 0 || psize
>= 4 * ztest_opts
.zo_vdev_size
) {
2957 spa_config_exit(spa
, SCL_STATE
, spa
);
2958 mutex_exit(&ztest_vdev_lock
);
2962 newsize
= psize
+ psize
/ 8;
2963 ASSERT3U(newsize
, >, psize
);
2965 if (ztest_opts
.zo_verbose
>= 6) {
2966 (void) printf("Expanding LUN %s from %lu to %lu\n",
2967 vd
->vdev_path
, (ulong_t
)psize
, (ulong_t
)newsize
);
2971 * Growing the vdev is a two step process:
2972 * 1). expand the physical size (i.e. relabel)
2973 * 2). online the vdev to create the new metaslabs
2975 if (vdev_walk_tree(tvd
, grow_vdev
, &newsize
) != NULL
||
2976 vdev_walk_tree(tvd
, online_vdev
, NULL
) != NULL
||
2977 tvd
->vdev_state
!= VDEV_STATE_HEALTHY
) {
2978 if (ztest_opts
.zo_verbose
>= 5) {
2979 (void) printf("Could not expand LUN because "
2980 "the vdev configuration changed.\n");
2982 spa_config_exit(spa
, SCL_STATE
, spa
);
2983 mutex_exit(&ztest_vdev_lock
);
2987 spa_config_exit(spa
, SCL_STATE
, spa
);
2990 * Expanding the LUN will update the config asynchronously,
2991 * thus we must wait for the async thread to complete any
2992 * pending tasks before proceeding.
2996 mutex_enter(&spa
->spa_async_lock
);
2997 done
= (spa
->spa_async_thread
== NULL
&& !spa
->spa_async_tasks
);
2998 mutex_exit(&spa
->spa_async_lock
);
3001 txg_wait_synced(spa_get_dsl(spa
), 0);
3002 (void) poll(NULL
, 0, 100);
3005 spa_config_enter(spa
, SCL_STATE
, spa
, RW_READER
);
3007 tvd
= spa
->spa_root_vdev
->vdev_child
[top
];
3008 new_ms_count
= tvd
->vdev_ms_count
;
3009 new_class_space
= metaslab_class_get_space(mc
);
3011 if (tvd
->vdev_mg
!= mg
|| mg
->mg_class
!= mc
) {
3012 if (ztest_opts
.zo_verbose
>= 5) {
3013 (void) printf("Could not verify LUN expansion due to "
3014 "intervening vdev offline or remove.\n");
3016 spa_config_exit(spa
, SCL_STATE
, spa
);
3017 mutex_exit(&ztest_vdev_lock
);
3022 * Make sure we were able to grow the vdev.
3024 if (new_ms_count
<= old_ms_count
)
3025 fatal(0, "LUN expansion failed: ms_count %llu <= %llu\n",
3026 old_ms_count
, new_ms_count
);
3029 * Make sure we were able to grow the pool.
3031 if (new_class_space
<= old_class_space
)
3032 fatal(0, "LUN expansion failed: class_space %llu <= %llu\n",
3033 old_class_space
, new_class_space
);
3035 if (ztest_opts
.zo_verbose
>= 5) {
3036 char oldnumbuf
[6], newnumbuf
[6];
3038 nicenum(old_class_space
, oldnumbuf
);
3039 nicenum(new_class_space
, newnumbuf
);
3040 (void) printf("%s grew from %s to %s\n",
3041 spa
->spa_name
, oldnumbuf
, newnumbuf
);
3044 spa_config_exit(spa
, SCL_STATE
, spa
);
3045 mutex_exit(&ztest_vdev_lock
);
3049 * Verify that dmu_objset_{create,destroy,open,close} work as expected.
3053 ztest_objset_create_cb(objset_t
*os
, void *arg
, cred_t
*cr
, dmu_tx_t
*tx
)
3056 * Create the objects common to all ztest datasets.
3058 VERIFY(zap_create_claim(os
, ZTEST_DIROBJ
,
3059 DMU_OT_ZAP_OTHER
, DMU_OT_NONE
, 0, tx
) == 0);
3063 ztest_dataset_create(char *dsname
)
3065 uint64_t zilset
= ztest_random(100);
3066 int err
= dmu_objset_create(dsname
, DMU_OST_OTHER
, 0,
3067 ztest_objset_create_cb
, NULL
);
3069 if (err
|| zilset
< 80)
3072 if (ztest_opts
.zo_verbose
>= 5)
3073 (void) printf("Setting dataset %s to sync always\n", dsname
);
3074 return (ztest_dsl_prop_set_uint64(dsname
, ZFS_PROP_SYNC
,
3075 ZFS_SYNC_ALWAYS
, B_FALSE
));
3080 ztest_objset_destroy_cb(const char *name
, void *arg
)
3083 dmu_object_info_t doi
;
3087 * Verify that the dataset contains a directory object.
3089 VERIFY3U(0, ==, dmu_objset_hold(name
, FTAG
, &os
));
3090 error
= dmu_object_info(os
, ZTEST_DIROBJ
, &doi
);
3091 if (error
!= ENOENT
) {
3092 /* We could have crashed in the middle of destroying it */
3093 ASSERT3U(error
, ==, 0);
3094 ASSERT3U(doi
.doi_type
, ==, DMU_OT_ZAP_OTHER
);
3095 ASSERT3S(doi
.doi_physical_blocks_512
, >=, 0);
3097 dmu_objset_rele(os
, FTAG
);
3100 * Destroy the dataset.
3102 VERIFY3U(0, ==, dmu_objset_destroy(name
, B_FALSE
));
3107 ztest_snapshot_create(char *osname
, uint64_t id
)
3109 char snapname
[MAXNAMELEN
];
3112 (void) snprintf(snapname
, MAXNAMELEN
, "%s@%llu", osname
,
3115 error
= dmu_objset_snapshot(osname
, strchr(snapname
, '@') + 1,
3116 NULL
, NULL
, B_FALSE
, B_FALSE
, -1);
3117 if (error
== ENOSPC
) {
3118 ztest_record_enospc(FTAG
);
3121 if (error
!= 0 && error
!= EEXIST
)
3122 fatal(0, "ztest_snapshot_create(%s) = %d", snapname
, error
);
3127 ztest_snapshot_destroy(char *osname
, uint64_t id
)
3129 char snapname
[MAXNAMELEN
];
3132 (void) snprintf(snapname
, MAXNAMELEN
, "%s@%llu", osname
,
3135 error
= dmu_objset_destroy(snapname
, B_FALSE
);
3136 if (error
!= 0 && error
!= ENOENT
)
3137 fatal(0, "ztest_snapshot_destroy(%s) = %d", snapname
, error
);
3143 ztest_dmu_objset_create_destroy(ztest_ds_t
*zd
, uint64_t id
)
3153 zdtmp
= umem_alloc(sizeof (ztest_ds_t
), UMEM_NOFAIL
);
3154 name
= umem_alloc(MAXNAMELEN
, UMEM_NOFAIL
);
3156 (void) rw_enter(&ztest_name_lock
, RW_READER
);
3158 (void) snprintf(name
, MAXNAMELEN
, "%s/temp_%llu",
3159 ztest_opts
.zo_pool
, (u_longlong_t
)id
);
3162 * If this dataset exists from a previous run, process its replay log
3163 * half of the time. If we don't replay it, then dmu_objset_destroy()
3164 * (invoked from ztest_objset_destroy_cb()) should just throw it away.
3166 if (ztest_random(2) == 0 &&
3167 dmu_objset_own(name
, DMU_OST_OTHER
, B_FALSE
, FTAG
, &os
) == 0) {
3168 ztest_zd_init(zdtmp
, NULL
, os
);
3169 zil_replay(os
, zdtmp
, ztest_replay_vector
);
3170 ztest_zd_fini(zdtmp
);
3171 dmu_objset_disown(os
, FTAG
);
3175 * There may be an old instance of the dataset we're about to
3176 * create lying around from a previous run. If so, destroy it
3177 * and all of its snapshots.
3179 (void) dmu_objset_find(name
, ztest_objset_destroy_cb
, NULL
,
3180 DS_FIND_CHILDREN
| DS_FIND_SNAPSHOTS
);
3183 * Verify that the destroyed dataset is no longer in the namespace.
3185 VERIFY3U(ENOENT
, ==, dmu_objset_hold(name
, FTAG
, &os
));
3188 * Verify that we can create a new dataset.
3190 error
= ztest_dataset_create(name
);
3192 if (error
== ENOSPC
) {
3193 ztest_record_enospc(FTAG
);
3196 fatal(0, "dmu_objset_create(%s) = %d", name
, error
);
3200 dmu_objset_own(name
, DMU_OST_OTHER
, B_FALSE
, FTAG
, &os
));
3202 ztest_zd_init(zdtmp
, NULL
, os
);
3205 * Open the intent log for it.
3207 zilog
= zil_open(os
, ztest_get_data
);
3210 * Put some objects in there, do a little I/O to them,
3211 * and randomly take a couple of snapshots along the way.
3213 iters
= ztest_random(5);
3214 for (i
= 0; i
< iters
; i
++) {
3215 ztest_dmu_object_alloc_free(zdtmp
, id
);
3216 if (ztest_random(iters
) == 0)
3217 (void) ztest_snapshot_create(name
, i
);
3221 * Verify that we cannot create an existing dataset.
3223 VERIFY3U(EEXIST
, ==,
3224 dmu_objset_create(name
, DMU_OST_OTHER
, 0, NULL
, NULL
));
3227 * Verify that we can hold an objset that is also owned.
3229 VERIFY3U(0, ==, dmu_objset_hold(name
, FTAG
, &os2
));
3230 dmu_objset_rele(os2
, FTAG
);
3233 * Verify that we cannot own an objset that is already owned.
3236 dmu_objset_own(name
, DMU_OST_OTHER
, B_FALSE
, FTAG
, &os2
));
3239 dmu_objset_disown(os
, FTAG
);
3240 ztest_zd_fini(zdtmp
);
3242 (void) rw_exit(&ztest_name_lock
);
3244 umem_free(name
, MAXNAMELEN
);
3245 umem_free(zdtmp
, sizeof (ztest_ds_t
));
3249 * Verify that dmu_snapshot_{create,destroy,open,close} work as expected.
3252 ztest_dmu_snapshot_create_destroy(ztest_ds_t
*zd
, uint64_t id
)
3254 (void) rw_enter(&ztest_name_lock
, RW_READER
);
3255 (void) ztest_snapshot_destroy(zd
->zd_name
, id
);
3256 (void) ztest_snapshot_create(zd
->zd_name
, id
);
3257 (void) rw_exit(&ztest_name_lock
);
3261 * Cleanup non-standard snapshots and clones.
3264 ztest_dsl_dataset_cleanup(char *osname
, uint64_t id
)
3273 snap1name
= umem_alloc(MAXNAMELEN
, UMEM_NOFAIL
);
3274 clone1name
= umem_alloc(MAXNAMELEN
, UMEM_NOFAIL
);
3275 snap2name
= umem_alloc(MAXNAMELEN
, UMEM_NOFAIL
);
3276 clone2name
= umem_alloc(MAXNAMELEN
, UMEM_NOFAIL
);
3277 snap3name
= umem_alloc(MAXNAMELEN
, UMEM_NOFAIL
);
3279 (void) snprintf(snap1name
, MAXNAMELEN
, "%s@s1_%llu",
3280 osname
, (u_longlong_t
)id
);
3281 (void) snprintf(clone1name
, MAXNAMELEN
, "%s/c1_%llu",
3282 osname
, (u_longlong_t
)id
);
3283 (void) snprintf(snap2name
, MAXNAMELEN
, "%s@s2_%llu",
3284 clone1name
, (u_longlong_t
)id
);
3285 (void) snprintf(clone2name
, MAXNAMELEN
, "%s/c2_%llu",
3286 osname
, (u_longlong_t
)id
);
3287 (void) snprintf(snap3name
, MAXNAMELEN
, "%s@s3_%llu",
3288 clone1name
, (u_longlong_t
)id
);
3290 error
= dmu_objset_destroy(clone2name
, B_FALSE
);
3291 if (error
&& error
!= ENOENT
)
3292 fatal(0, "dmu_objset_destroy(%s) = %d", clone2name
, error
);
3293 error
= dmu_objset_destroy(snap3name
, B_FALSE
);
3294 if (error
&& error
!= ENOENT
)
3295 fatal(0, "dmu_objset_destroy(%s) = %d", snap3name
, error
);
3296 error
= dmu_objset_destroy(snap2name
, B_FALSE
);
3297 if (error
&& error
!= ENOENT
)
3298 fatal(0, "dmu_objset_destroy(%s) = %d", snap2name
, error
);
3299 error
= dmu_objset_destroy(clone1name
, B_FALSE
);
3300 if (error
&& error
!= ENOENT
)
3301 fatal(0, "dmu_objset_destroy(%s) = %d", clone1name
, error
);
3302 error
= dmu_objset_destroy(snap1name
, B_FALSE
);
3303 if (error
&& error
!= ENOENT
)
3304 fatal(0, "dmu_objset_destroy(%s) = %d", snap1name
, error
);
3306 umem_free(snap1name
, MAXNAMELEN
);
3307 umem_free(clone1name
, MAXNAMELEN
);
3308 umem_free(snap2name
, MAXNAMELEN
);
3309 umem_free(clone2name
, MAXNAMELEN
);
3310 umem_free(snap3name
, MAXNAMELEN
);
3314 * Verify dsl_dataset_promote handles EBUSY
3317 ztest_dsl_dataset_promote_busy(ztest_ds_t
*zd
, uint64_t id
)
3326 char *osname
= zd
->zd_name
;
3329 snap1name
= umem_alloc(MAXNAMELEN
, UMEM_NOFAIL
);
3330 clone1name
= umem_alloc(MAXNAMELEN
, UMEM_NOFAIL
);
3331 snap2name
= umem_alloc(MAXNAMELEN
, UMEM_NOFAIL
);
3332 clone2name
= umem_alloc(MAXNAMELEN
, UMEM_NOFAIL
);
3333 snap3name
= umem_alloc(MAXNAMELEN
, UMEM_NOFAIL
);
3335 (void) rw_enter(&ztest_name_lock
, RW_READER
);
3337 ztest_dsl_dataset_cleanup(osname
, id
);
3339 (void) snprintf(snap1name
, MAXNAMELEN
, "%s@s1_%llu",
3340 osname
, (u_longlong_t
)id
);
3341 (void) snprintf(clone1name
, MAXNAMELEN
, "%s/c1_%llu",
3342 osname
, (u_longlong_t
)id
);
3343 (void) snprintf(snap2name
, MAXNAMELEN
, "%s@s2_%llu",
3344 clone1name
, (u_longlong_t
)id
);
3345 (void) snprintf(clone2name
, MAXNAMELEN
, "%s/c2_%llu",
3346 osname
, (u_longlong_t
)id
);
3347 (void) snprintf(snap3name
, MAXNAMELEN
, "%s@s3_%llu",
3348 clone1name
, (u_longlong_t
)id
);
3350 error
= dmu_objset_snapshot(osname
, strchr(snap1name
, '@')+1,
3351 NULL
, NULL
, B_FALSE
, B_FALSE
, -1);
3352 if (error
&& error
!= EEXIST
) {
3353 if (error
== ENOSPC
) {
3354 ztest_record_enospc(FTAG
);
3357 fatal(0, "dmu_take_snapshot(%s) = %d", snap1name
, error
);
3360 error
= dmu_objset_hold(snap1name
, FTAG
, &clone
);
3362 fatal(0, "dmu_open_snapshot(%s) = %d", snap1name
, error
);
3364 error
= dmu_objset_clone(clone1name
, dmu_objset_ds(clone
), 0);
3365 dmu_objset_rele(clone
, FTAG
);
3367 if (error
== ENOSPC
) {
3368 ztest_record_enospc(FTAG
);
3371 fatal(0, "dmu_objset_create(%s) = %d", clone1name
, error
);
3374 error
= dmu_objset_snapshot(clone1name
, strchr(snap2name
, '@')+1,
3375 NULL
, NULL
, B_FALSE
, B_FALSE
, -1);
3376 if (error
&& error
!= EEXIST
) {
3377 if (error
== ENOSPC
) {
3378 ztest_record_enospc(FTAG
);
3381 fatal(0, "dmu_open_snapshot(%s) = %d", snap2name
, error
);
3384 error
= dmu_objset_snapshot(clone1name
, strchr(snap3name
, '@')+1,
3385 NULL
, NULL
, B_FALSE
, B_FALSE
, -1);
3386 if (error
&& error
!= EEXIST
) {
3387 if (error
== ENOSPC
) {
3388 ztest_record_enospc(FTAG
);
3391 fatal(0, "dmu_open_snapshot(%s) = %d", snap3name
, error
);
3394 error
= dmu_objset_hold(snap3name
, FTAG
, &clone
);
3396 fatal(0, "dmu_open_snapshot(%s) = %d", snap3name
, error
);
3398 error
= dmu_objset_clone(clone2name
, dmu_objset_ds(clone
), 0);
3399 dmu_objset_rele(clone
, FTAG
);
3401 if (error
== ENOSPC
) {
3402 ztest_record_enospc(FTAG
);
3405 fatal(0, "dmu_objset_create(%s) = %d", clone2name
, error
);
3408 error
= dsl_dataset_own(snap2name
, B_FALSE
, FTAG
, &ds
);
3410 fatal(0, "dsl_dataset_own(%s) = %d", snap2name
, error
);
3411 error
= dsl_dataset_promote(clone2name
, NULL
);
3413 fatal(0, "dsl_dataset_promote(%s), %d, not EBUSY", clone2name
,
3415 dsl_dataset_disown(ds
, FTAG
);
3418 ztest_dsl_dataset_cleanup(osname
, id
);
3420 (void) rw_exit(&ztest_name_lock
);
3422 umem_free(snap1name
, MAXNAMELEN
);
3423 umem_free(clone1name
, MAXNAMELEN
);
3424 umem_free(snap2name
, MAXNAMELEN
);
3425 umem_free(clone2name
, MAXNAMELEN
);
3426 umem_free(snap3name
, MAXNAMELEN
);
3429 #undef OD_ARRAY_SIZE
3430 #define OD_ARRAY_SIZE 4
3433 * Verify that dmu_object_{alloc,free} work as expected.
3436 ztest_dmu_object_alloc_free(ztest_ds_t
*zd
, uint64_t id
)
3443 size
= sizeof(ztest_od_t
) * OD_ARRAY_SIZE
;
3444 od
= umem_alloc(size
, UMEM_NOFAIL
);
3445 batchsize
= OD_ARRAY_SIZE
;
3447 for (b
= 0; b
< batchsize
; b
++)
3448 ztest_od_init(od
+ b
, id
, FTAG
, b
, DMU_OT_UINT64_OTHER
, 0, 0);
3451 * Destroy the previous batch of objects, create a new batch,
3452 * and do some I/O on the new objects.
3454 if (ztest_object_init(zd
, od
, size
, B_TRUE
) != 0)
3457 while (ztest_random(4 * batchsize
) != 0)
3458 ztest_io(zd
, od
[ztest_random(batchsize
)].od_object
,
3459 ztest_random(ZTEST_RANGE_LOCKS
) << SPA_MAXBLOCKSHIFT
);
3461 umem_free(od
, size
);
3464 #undef OD_ARRAY_SIZE
3465 #define OD_ARRAY_SIZE 2
3468 * Verify that dmu_{read,write} work as expected.
3471 ztest_dmu_read_write(ztest_ds_t
*zd
, uint64_t id
)
3476 objset_t
*os
= zd
->zd_os
;
3477 size
= sizeof(ztest_od_t
) * OD_ARRAY_SIZE
;
3478 od
= umem_alloc(size
, UMEM_NOFAIL
);
3480 int i
, freeit
, error
;
3482 bufwad_t
*packbuf
, *bigbuf
, *pack
, *bigH
, *bigT
;
3483 uint64_t packobj
, packoff
, packsize
, bigobj
, bigoff
, bigsize
;
3484 uint64_t chunksize
= (1000 + ztest_random(1000)) * sizeof (uint64_t);
3485 uint64_t regions
= 997;
3486 uint64_t stride
= 123456789ULL;
3487 uint64_t width
= 40;
3488 int free_percent
= 5;
3491 * This test uses two objects, packobj and bigobj, that are always
3492 * updated together (i.e. in the same tx) so that their contents are
3493 * in sync and can be compared. Their contents relate to each other
3494 * in a simple way: packobj is a dense array of 'bufwad' structures,
3495 * while bigobj is a sparse array of the same bufwads. Specifically,
3496 * for any index n, there are three bufwads that should be identical:
3498 * packobj, at offset n * sizeof (bufwad_t)
3499 * bigobj, at the head of the nth chunk
3500 * bigobj, at the tail of the nth chunk
3502 * The chunk size is arbitrary. It doesn't have to be a power of two,
3503 * and it doesn't have any relation to the object blocksize.
3504 * The only requirement is that it can hold at least two bufwads.
3506 * Normally, we write the bufwad to each of these locations.
3507 * However, free_percent of the time we instead write zeroes to
3508 * packobj and perform a dmu_free_range() on bigobj. By comparing
3509 * bigobj to packobj, we can verify that the DMU is correctly
3510 * tracking which parts of an object are allocated and free,
3511 * and that the contents of the allocated blocks are correct.
3515 * Read the directory info. If it's the first time, set things up.
3517 ztest_od_init(od
, id
, FTAG
, 0, DMU_OT_UINT64_OTHER
, 0, chunksize
);
3518 ztest_od_init(od
+ 1, id
, FTAG
, 1, DMU_OT_UINT64_OTHER
, 0, chunksize
);
3520 if (ztest_object_init(zd
, od
, size
, B_FALSE
) != 0) {
3521 umem_free(od
, size
);
3525 bigobj
= od
[0].od_object
;
3526 packobj
= od
[1].od_object
;
3527 chunksize
= od
[0].od_gen
;
3528 ASSERT(chunksize
== od
[1].od_gen
);
3531 * Prefetch a random chunk of the big object.
3532 * Our aim here is to get some async reads in flight
3533 * for blocks that we may free below; the DMU should
3534 * handle this race correctly.
3536 n
= ztest_random(regions
) * stride
+ ztest_random(width
);
3537 s
= 1 + ztest_random(2 * width
- 1);
3538 dmu_prefetch(os
, bigobj
, n
* chunksize
, s
* chunksize
);
3541 * Pick a random index and compute the offsets into packobj and bigobj.
3543 n
= ztest_random(regions
) * stride
+ ztest_random(width
);
3544 s
= 1 + ztest_random(width
- 1);
3546 packoff
= n
* sizeof (bufwad_t
);
3547 packsize
= s
* sizeof (bufwad_t
);
3549 bigoff
= n
* chunksize
;
3550 bigsize
= s
* chunksize
;
3552 packbuf
= umem_alloc(packsize
, UMEM_NOFAIL
);
3553 bigbuf
= umem_alloc(bigsize
, UMEM_NOFAIL
);
3556 * free_percent of the time, free a range of bigobj rather than
3559 freeit
= (ztest_random(100) < free_percent
);
3562 * Read the current contents of our objects.
3564 error
= dmu_read(os
, packobj
, packoff
, packsize
, packbuf
,
3566 ASSERT3U(error
, ==, 0);
3567 error
= dmu_read(os
, bigobj
, bigoff
, bigsize
, bigbuf
,
3569 ASSERT3U(error
, ==, 0);
3572 * Get a tx for the mods to both packobj and bigobj.
3574 tx
= dmu_tx_create(os
);
3576 dmu_tx_hold_write(tx
, packobj
, packoff
, packsize
);
3579 dmu_tx_hold_free(tx
, bigobj
, bigoff
, bigsize
);
3581 dmu_tx_hold_write(tx
, bigobj
, bigoff
, bigsize
);
3583 txg
= ztest_tx_assign(tx
, TXG_MIGHTWAIT
, FTAG
);
3585 umem_free(packbuf
, packsize
);
3586 umem_free(bigbuf
, bigsize
);
3587 umem_free(od
, size
);
3591 dmu_object_set_checksum(os
, bigobj
,
3592 (enum zio_checksum
)ztest_random_dsl_prop(ZFS_PROP_CHECKSUM
), tx
);
3594 dmu_object_set_compress(os
, bigobj
,
3595 (enum zio_compress
)ztest_random_dsl_prop(ZFS_PROP_COMPRESSION
), tx
);
3598 * For each index from n to n + s, verify that the existing bufwad
3599 * in packobj matches the bufwads at the head and tail of the
3600 * corresponding chunk in bigobj. Then update all three bufwads
3601 * with the new values we want to write out.
3603 for (i
= 0; i
< s
; i
++) {
3605 pack
= (bufwad_t
*)((char *)packbuf
+ i
* sizeof (bufwad_t
));
3607 bigH
= (bufwad_t
*)((char *)bigbuf
+ i
* chunksize
);
3609 bigT
= (bufwad_t
*)((char *)bigH
+ chunksize
) - 1;
3611 ASSERT((uintptr_t)bigH
- (uintptr_t)bigbuf
< bigsize
);
3612 ASSERT((uintptr_t)bigT
- (uintptr_t)bigbuf
< bigsize
);
3614 if (pack
->bw_txg
> txg
)
3615 fatal(0, "future leak: got %llx, open txg is %llx",
3618 if (pack
->bw_data
!= 0 && pack
->bw_index
!= n
+ i
)
3619 fatal(0, "wrong index: got %llx, wanted %llx+%llx",
3620 pack
->bw_index
, n
, i
);
3622 if (bcmp(pack
, bigH
, sizeof (bufwad_t
)) != 0)
3623 fatal(0, "pack/bigH mismatch in %p/%p", pack
, bigH
);
3625 if (bcmp(pack
, bigT
, sizeof (bufwad_t
)) != 0)
3626 fatal(0, "pack/bigT mismatch in %p/%p", pack
, bigT
);
3629 bzero(pack
, sizeof (bufwad_t
));
3631 pack
->bw_index
= n
+ i
;
3633 pack
->bw_data
= 1 + ztest_random(-2ULL);
3640 * We've verified all the old bufwads, and made new ones.
3641 * Now write them out.
3643 dmu_write(os
, packobj
, packoff
, packsize
, packbuf
, tx
);
3646 if (ztest_opts
.zo_verbose
>= 7) {
3647 (void) printf("freeing offset %llx size %llx"
3649 (u_longlong_t
)bigoff
,
3650 (u_longlong_t
)bigsize
,
3653 VERIFY(0 == dmu_free_range(os
, bigobj
, bigoff
, bigsize
, tx
));
3655 if (ztest_opts
.zo_verbose
>= 7) {
3656 (void) printf("writing offset %llx size %llx"
3658 (u_longlong_t
)bigoff
,
3659 (u_longlong_t
)bigsize
,
3662 dmu_write(os
, bigobj
, bigoff
, bigsize
, bigbuf
, tx
);
3668 * Sanity check the stuff we just wrote.
3671 void *packcheck
= umem_alloc(packsize
, UMEM_NOFAIL
);
3672 void *bigcheck
= umem_alloc(bigsize
, UMEM_NOFAIL
);
3674 VERIFY(0 == dmu_read(os
, packobj
, packoff
,
3675 packsize
, packcheck
, DMU_READ_PREFETCH
));
3676 VERIFY(0 == dmu_read(os
, bigobj
, bigoff
,
3677 bigsize
, bigcheck
, DMU_READ_PREFETCH
));
3679 ASSERT(bcmp(packbuf
, packcheck
, packsize
) == 0);
3680 ASSERT(bcmp(bigbuf
, bigcheck
, bigsize
) == 0);
3682 umem_free(packcheck
, packsize
);
3683 umem_free(bigcheck
, bigsize
);
3686 umem_free(packbuf
, packsize
);
3687 umem_free(bigbuf
, bigsize
);
3688 umem_free(od
, size
);
3692 compare_and_update_pbbufs(uint64_t s
, bufwad_t
*packbuf
, bufwad_t
*bigbuf
,
3693 uint64_t bigsize
, uint64_t n
, uint64_t chunksize
, uint64_t txg
)
3701 * For each index from n to n + s, verify that the existing bufwad
3702 * in packobj matches the bufwads at the head and tail of the
3703 * corresponding chunk in bigobj. Then update all three bufwads
3704 * with the new values we want to write out.
3706 for (i
= 0; i
< s
; i
++) {
3708 pack
= (bufwad_t
*)((char *)packbuf
+ i
* sizeof (bufwad_t
));
3710 bigH
= (bufwad_t
*)((char *)bigbuf
+ i
* chunksize
);
3712 bigT
= (bufwad_t
*)((char *)bigH
+ chunksize
) - 1;
3714 ASSERT((uintptr_t)bigH
- (uintptr_t)bigbuf
< bigsize
);
3715 ASSERT((uintptr_t)bigT
- (uintptr_t)bigbuf
< bigsize
);
3717 if (pack
->bw_txg
> txg
)
3718 fatal(0, "future leak: got %llx, open txg is %llx",
3721 if (pack
->bw_data
!= 0 && pack
->bw_index
!= n
+ i
)
3722 fatal(0, "wrong index: got %llx, wanted %llx+%llx",
3723 pack
->bw_index
, n
, i
);
3725 if (bcmp(pack
, bigH
, sizeof (bufwad_t
)) != 0)
3726 fatal(0, "pack/bigH mismatch in %p/%p", pack
, bigH
);
3728 if (bcmp(pack
, bigT
, sizeof (bufwad_t
)) != 0)
3729 fatal(0, "pack/bigT mismatch in %p/%p", pack
, bigT
);
3731 pack
->bw_index
= n
+ i
;
3733 pack
->bw_data
= 1 + ztest_random(-2ULL);
3740 #undef OD_ARRAY_SIZE
3741 #define OD_ARRAY_SIZE 2
3744 ztest_dmu_read_write_zcopy(ztest_ds_t
*zd
, uint64_t id
)
3746 objset_t
*os
= zd
->zd_os
;
3753 bufwad_t
*packbuf
, *bigbuf
;
3754 uint64_t packobj
, packoff
, packsize
, bigobj
, bigoff
, bigsize
;
3755 uint64_t blocksize
= ztest_random_blocksize();
3756 uint64_t chunksize
= blocksize
;
3757 uint64_t regions
= 997;
3758 uint64_t stride
= 123456789ULL;
3760 dmu_buf_t
*bonus_db
;
3761 arc_buf_t
**bigbuf_arcbufs
;
3762 dmu_object_info_t doi
;
3764 size
= sizeof(ztest_od_t
) * OD_ARRAY_SIZE
;
3765 od
= umem_alloc(size
, UMEM_NOFAIL
);
3768 * This test uses two objects, packobj and bigobj, that are always
3769 * updated together (i.e. in the same tx) so that their contents are
3770 * in sync and can be compared. Their contents relate to each other
3771 * in a simple way: packobj is a dense array of 'bufwad' structures,
3772 * while bigobj is a sparse array of the same bufwads. Specifically,
3773 * for any index n, there are three bufwads that should be identical:
3775 * packobj, at offset n * sizeof (bufwad_t)
3776 * bigobj, at the head of the nth chunk
3777 * bigobj, at the tail of the nth chunk
3779 * The chunk size is set equal to bigobj block size so that
3780 * dmu_assign_arcbuf() can be tested for object updates.
3784 * Read the directory info. If it's the first time, set things up.
3786 ztest_od_init(od
, id
, FTAG
, 0, DMU_OT_UINT64_OTHER
, blocksize
, 0);
3787 ztest_od_init(od
+ 1, id
, FTAG
, 1, DMU_OT_UINT64_OTHER
, 0, chunksize
);
3790 if (ztest_object_init(zd
, od
, size
, B_FALSE
) != 0) {
3791 umem_free(od
, size
);
3795 bigobj
= od
[0].od_object
;
3796 packobj
= od
[1].od_object
;
3797 blocksize
= od
[0].od_blocksize
;
3798 chunksize
= blocksize
;
3799 ASSERT(chunksize
== od
[1].od_gen
);
3801 VERIFY(dmu_object_info(os
, bigobj
, &doi
) == 0);
3802 VERIFY(ISP2(doi
.doi_data_block_size
));
3803 VERIFY(chunksize
== doi
.doi_data_block_size
);
3804 VERIFY(chunksize
>= 2 * sizeof (bufwad_t
));
3807 * Pick a random index and compute the offsets into packobj and bigobj.
3809 n
= ztest_random(regions
) * stride
+ ztest_random(width
);
3810 s
= 1 + ztest_random(width
- 1);
3812 packoff
= n
* sizeof (bufwad_t
);
3813 packsize
= s
* sizeof (bufwad_t
);
3815 bigoff
= n
* chunksize
;
3816 bigsize
= s
* chunksize
;
3818 packbuf
= umem_zalloc(packsize
, UMEM_NOFAIL
);
3819 bigbuf
= umem_zalloc(bigsize
, UMEM_NOFAIL
);
3821 VERIFY3U(0, ==, dmu_bonus_hold(os
, bigobj
, FTAG
, &bonus_db
));
3823 bigbuf_arcbufs
= umem_zalloc(2 * s
* sizeof (arc_buf_t
*), UMEM_NOFAIL
);
3826 * Iteration 0 test zcopy for DB_UNCACHED dbufs.
3827 * Iteration 1 test zcopy to already referenced dbufs.
3828 * Iteration 2 test zcopy to dirty dbuf in the same txg.
3829 * Iteration 3 test zcopy to dbuf dirty in previous txg.
3830 * Iteration 4 test zcopy when dbuf is no longer dirty.
3831 * Iteration 5 test zcopy when it can't be done.
3832 * Iteration 6 one more zcopy write.
3834 for (i
= 0; i
< 7; i
++) {
3839 * In iteration 5 (i == 5) use arcbufs
3840 * that don't match bigobj blksz to test
3841 * dmu_assign_arcbuf() when it can't directly
3842 * assign an arcbuf to a dbuf.
3844 for (j
= 0; j
< s
; j
++) {
3847 dmu_request_arcbuf(bonus_db
, chunksize
);
3849 bigbuf_arcbufs
[2 * j
] =
3850 dmu_request_arcbuf(bonus_db
, chunksize
/ 2);
3851 bigbuf_arcbufs
[2 * j
+ 1] =
3852 dmu_request_arcbuf(bonus_db
, chunksize
/ 2);
3857 * Get a tx for the mods to both packobj and bigobj.
3859 tx
= dmu_tx_create(os
);
3861 dmu_tx_hold_write(tx
, packobj
, packoff
, packsize
);
3862 dmu_tx_hold_write(tx
, bigobj
, bigoff
, bigsize
);
3864 txg
= ztest_tx_assign(tx
, TXG_MIGHTWAIT
, FTAG
);
3866 umem_free(packbuf
, packsize
);
3867 umem_free(bigbuf
, bigsize
);
3868 for (j
= 0; j
< s
; j
++) {
3870 dmu_return_arcbuf(bigbuf_arcbufs
[j
]);
3873 bigbuf_arcbufs
[2 * j
]);
3875 bigbuf_arcbufs
[2 * j
+ 1]);
3878 umem_free(bigbuf_arcbufs
, 2 * s
* sizeof (arc_buf_t
*));
3879 umem_free(od
, size
);
3880 dmu_buf_rele(bonus_db
, FTAG
);
3885 * 50% of the time don't read objects in the 1st iteration to
3886 * test dmu_assign_arcbuf() for the case when there're no
3887 * existing dbufs for the specified offsets.
3889 if (i
!= 0 || ztest_random(2) != 0) {
3890 error
= dmu_read(os
, packobj
, packoff
,
3891 packsize
, packbuf
, DMU_READ_PREFETCH
);
3892 ASSERT3U(error
, ==, 0);
3893 error
= dmu_read(os
, bigobj
, bigoff
, bigsize
,
3894 bigbuf
, DMU_READ_PREFETCH
);
3895 ASSERT3U(error
, ==, 0);
3897 compare_and_update_pbbufs(s
, packbuf
, bigbuf
, bigsize
,
3901 * We've verified all the old bufwads, and made new ones.
3902 * Now write them out.
3904 dmu_write(os
, packobj
, packoff
, packsize
, packbuf
, tx
);
3905 if (ztest_opts
.zo_verbose
>= 7) {
3906 (void) printf("writing offset %llx size %llx"
3908 (u_longlong_t
)bigoff
,
3909 (u_longlong_t
)bigsize
,
3912 for (off
= bigoff
, j
= 0; j
< s
; j
++, off
+= chunksize
) {
3915 bcopy((caddr_t
)bigbuf
+ (off
- bigoff
),
3916 bigbuf_arcbufs
[j
]->b_data
, chunksize
);
3918 bcopy((caddr_t
)bigbuf
+ (off
- bigoff
),
3919 bigbuf_arcbufs
[2 * j
]->b_data
,
3921 bcopy((caddr_t
)bigbuf
+ (off
- bigoff
) +
3923 bigbuf_arcbufs
[2 * j
+ 1]->b_data
,
3928 VERIFY(dmu_buf_hold(os
, bigobj
, off
,
3929 FTAG
, &dbt
, DMU_READ_NO_PREFETCH
) == 0);
3932 dmu_assign_arcbuf(bonus_db
, off
,
3933 bigbuf_arcbufs
[j
], tx
);
3935 dmu_assign_arcbuf(bonus_db
, off
,
3936 bigbuf_arcbufs
[2 * j
], tx
);
3937 dmu_assign_arcbuf(bonus_db
,
3938 off
+ chunksize
/ 2,
3939 bigbuf_arcbufs
[2 * j
+ 1], tx
);
3942 dmu_buf_rele(dbt
, FTAG
);
3948 * Sanity check the stuff we just wrote.
3951 void *packcheck
= umem_alloc(packsize
, UMEM_NOFAIL
);
3952 void *bigcheck
= umem_alloc(bigsize
, UMEM_NOFAIL
);
3954 VERIFY(0 == dmu_read(os
, packobj
, packoff
,
3955 packsize
, packcheck
, DMU_READ_PREFETCH
));
3956 VERIFY(0 == dmu_read(os
, bigobj
, bigoff
,
3957 bigsize
, bigcheck
, DMU_READ_PREFETCH
));
3959 ASSERT(bcmp(packbuf
, packcheck
, packsize
) == 0);
3960 ASSERT(bcmp(bigbuf
, bigcheck
, bigsize
) == 0);
3962 umem_free(packcheck
, packsize
);
3963 umem_free(bigcheck
, bigsize
);
3966 txg_wait_open(dmu_objset_pool(os
), 0);
3967 } else if (i
== 3) {
3968 txg_wait_synced(dmu_objset_pool(os
), 0);
3972 dmu_buf_rele(bonus_db
, FTAG
);
3973 umem_free(packbuf
, packsize
);
3974 umem_free(bigbuf
, bigsize
);
3975 umem_free(bigbuf_arcbufs
, 2 * s
* sizeof (arc_buf_t
*));
3976 umem_free(od
, size
);
3981 ztest_dmu_write_parallel(ztest_ds_t
*zd
, uint64_t id
)
3985 od
= umem_alloc(sizeof(ztest_od_t
), UMEM_NOFAIL
);
3986 uint64_t offset
= (1ULL << (ztest_random(20) + 43)) +
3987 (ztest_random(ZTEST_RANGE_LOCKS
) << SPA_MAXBLOCKSHIFT
);
3990 * Have multiple threads write to large offsets in an object
3991 * to verify that parallel writes to an object -- even to the
3992 * same blocks within the object -- doesn't cause any trouble.
3994 ztest_od_init(od
, ID_PARALLEL
, FTAG
, 0, DMU_OT_UINT64_OTHER
, 0, 0);
3996 if (ztest_object_init(zd
, od
, sizeof (ztest_od_t
), B_FALSE
) != 0)
3999 while (ztest_random(10) != 0)
4000 ztest_io(zd
, od
->od_object
, offset
);
4002 umem_free(od
, sizeof(ztest_od_t
));
4006 ztest_dmu_prealloc(ztest_ds_t
*zd
, uint64_t id
)
4009 uint64_t offset
= (1ULL << (ztest_random(4) + SPA_MAXBLOCKSHIFT
)) +
4010 (ztest_random(ZTEST_RANGE_LOCKS
) << SPA_MAXBLOCKSHIFT
);
4011 uint64_t count
= ztest_random(20) + 1;
4012 uint64_t blocksize
= ztest_random_blocksize();
4015 od
= umem_alloc(sizeof(ztest_od_t
), UMEM_NOFAIL
);
4017 ztest_od_init(od
, id
, FTAG
, 0, DMU_OT_UINT64_OTHER
, blocksize
, 0);
4019 if (ztest_object_init(zd
, od
, sizeof (ztest_od_t
), !ztest_random(2)) != 0) {
4020 umem_free(od
, sizeof(ztest_od_t
));
4024 if (ztest_truncate(zd
, od
->od_object
, offset
, count
* blocksize
) != 0) {
4025 umem_free(od
, sizeof(ztest_od_t
));
4029 ztest_prealloc(zd
, od
->od_object
, offset
, count
* blocksize
);
4031 data
= umem_zalloc(blocksize
, UMEM_NOFAIL
);
4033 while (ztest_random(count
) != 0) {
4034 uint64_t randoff
= offset
+ (ztest_random(count
) * blocksize
);
4035 if (ztest_write(zd
, od
->od_object
, randoff
, blocksize
,
4038 while (ztest_random(4) != 0)
4039 ztest_io(zd
, od
->od_object
, randoff
);
4042 umem_free(data
, blocksize
);
4043 umem_free(od
, sizeof(ztest_od_t
));
4047 * Verify that zap_{create,destroy,add,remove,update} work as expected.
4049 #define ZTEST_ZAP_MIN_INTS 1
4050 #define ZTEST_ZAP_MAX_INTS 4
4051 #define ZTEST_ZAP_MAX_PROPS 1000
4054 ztest_zap(ztest_ds_t
*zd
, uint64_t id
)
4056 objset_t
*os
= zd
->zd_os
;
4059 uint64_t txg
, last_txg
;
4060 uint64_t value
[ZTEST_ZAP_MAX_INTS
];
4061 uint64_t zl_ints
, zl_intsize
, prop
;
4064 char propname
[100], txgname
[100];
4066 char *hc
[2] = { "s.acl.h", ".s.open.h.hyLZlg" };
4068 od
= umem_alloc(sizeof(ztest_od_t
), UMEM_NOFAIL
);
4069 ztest_od_init(od
, id
, FTAG
, 0, DMU_OT_ZAP_OTHER
, 0, 0);
4071 if (ztest_object_init(zd
, od
, sizeof (ztest_od_t
),
4072 !ztest_random(2)) != 0)
4075 object
= od
->od_object
;
4078 * Generate a known hash collision, and verify that
4079 * we can lookup and remove both entries.
4081 tx
= dmu_tx_create(os
);
4082 dmu_tx_hold_zap(tx
, object
, B_TRUE
, NULL
);
4083 txg
= ztest_tx_assign(tx
, TXG_MIGHTWAIT
, FTAG
);
4086 for (i
= 0; i
< 2; i
++) {
4088 VERIFY3U(0, ==, zap_add(os
, object
, hc
[i
], sizeof (uint64_t),
4091 for (i
= 0; i
< 2; i
++) {
4092 VERIFY3U(EEXIST
, ==, zap_add(os
, object
, hc
[i
],
4093 sizeof (uint64_t), 1, &value
[i
], tx
));
4095 zap_length(os
, object
, hc
[i
], &zl_intsize
, &zl_ints
));
4096 ASSERT3U(zl_intsize
, ==, sizeof (uint64_t));
4097 ASSERT3U(zl_ints
, ==, 1);
4099 for (i
= 0; i
< 2; i
++) {
4100 VERIFY3U(0, ==, zap_remove(os
, object
, hc
[i
], tx
));
4105 * Generate a buch of random entries.
4107 ints
= MAX(ZTEST_ZAP_MIN_INTS
, object
% ZTEST_ZAP_MAX_INTS
);
4109 prop
= ztest_random(ZTEST_ZAP_MAX_PROPS
);
4110 (void) sprintf(propname
, "prop_%llu", (u_longlong_t
)prop
);
4111 (void) sprintf(txgname
, "txg_%llu", (u_longlong_t
)prop
);
4112 bzero(value
, sizeof (value
));
4116 * If these zap entries already exist, validate their contents.
4118 error
= zap_length(os
, object
, txgname
, &zl_intsize
, &zl_ints
);
4120 ASSERT3U(zl_intsize
, ==, sizeof (uint64_t));
4121 ASSERT3U(zl_ints
, ==, 1);
4123 VERIFY(zap_lookup(os
, object
, txgname
, zl_intsize
,
4124 zl_ints
, &last_txg
) == 0);
4126 VERIFY(zap_length(os
, object
, propname
, &zl_intsize
,
4129 ASSERT3U(zl_intsize
, ==, sizeof (uint64_t));
4130 ASSERT3U(zl_ints
, ==, ints
);
4132 VERIFY(zap_lookup(os
, object
, propname
, zl_intsize
,
4133 zl_ints
, value
) == 0);
4135 for (i
= 0; i
< ints
; i
++) {
4136 ASSERT3U(value
[i
], ==, last_txg
+ object
+ i
);
4139 ASSERT3U(error
, ==, ENOENT
);
4143 * Atomically update two entries in our zap object.
4144 * The first is named txg_%llu, and contains the txg
4145 * in which the property was last updated. The second
4146 * is named prop_%llu, and the nth element of its value
4147 * should be txg + object + n.
4149 tx
= dmu_tx_create(os
);
4150 dmu_tx_hold_zap(tx
, object
, B_TRUE
, NULL
);
4151 txg
= ztest_tx_assign(tx
, TXG_MIGHTWAIT
, FTAG
);
4156 fatal(0, "zap future leak: old %llu new %llu", last_txg
, txg
);
4158 for (i
= 0; i
< ints
; i
++)
4159 value
[i
] = txg
+ object
+ i
;
4161 VERIFY3U(0, ==, zap_update(os
, object
, txgname
, sizeof (uint64_t),
4163 VERIFY3U(0, ==, zap_update(os
, object
, propname
, sizeof (uint64_t),
4169 * Remove a random pair of entries.
4171 prop
= ztest_random(ZTEST_ZAP_MAX_PROPS
);
4172 (void) sprintf(propname
, "prop_%llu", (u_longlong_t
)prop
);
4173 (void) sprintf(txgname
, "txg_%llu", (u_longlong_t
)prop
);
4175 error
= zap_length(os
, object
, txgname
, &zl_intsize
, &zl_ints
);
4177 if (error
== ENOENT
)
4180 ASSERT3U(error
, ==, 0);
4182 tx
= dmu_tx_create(os
);
4183 dmu_tx_hold_zap(tx
, object
, B_TRUE
, NULL
);
4184 txg
= ztest_tx_assign(tx
, TXG_MIGHTWAIT
, FTAG
);
4187 VERIFY3U(0, ==, zap_remove(os
, object
, txgname
, tx
));
4188 VERIFY3U(0, ==, zap_remove(os
, object
, propname
, tx
));
4191 umem_free(od
, sizeof(ztest_od_t
));
4195 * Testcase to test the upgrading of a microzap to fatzap.
4198 ztest_fzap(ztest_ds_t
*zd
, uint64_t id
)
4200 objset_t
*os
= zd
->zd_os
;
4202 uint64_t object
, txg
;
4205 od
= umem_alloc(sizeof(ztest_od_t
), UMEM_NOFAIL
);
4206 ztest_od_init(od
, id
, FTAG
, 0, DMU_OT_ZAP_OTHER
, 0, 0);
4208 if (ztest_object_init(zd
, od
, sizeof (ztest_od_t
),
4209 !ztest_random(2)) != 0)
4211 object
= od
->od_object
;
4214 * Add entries to this ZAP and make sure it spills over
4215 * and gets upgraded to a fatzap. Also, since we are adding
4216 * 2050 entries we should see ptrtbl growth and leaf-block split.
4218 for (i
= 0; i
< 2050; i
++) {
4219 char name
[MAXNAMELEN
];
4224 (void) snprintf(name
, sizeof (name
), "fzap-%llu-%llu",
4225 (u_longlong_t
)id
, (u_longlong_t
)value
);
4227 tx
= dmu_tx_create(os
);
4228 dmu_tx_hold_zap(tx
, object
, B_TRUE
, name
);
4229 txg
= ztest_tx_assign(tx
, TXG_MIGHTWAIT
, FTAG
);
4232 error
= zap_add(os
, object
, name
, sizeof (uint64_t), 1,
4234 ASSERT(error
== 0 || error
== EEXIST
);
4238 umem_free(od
, sizeof(ztest_od_t
));
4243 ztest_zap_parallel(ztest_ds_t
*zd
, uint64_t id
)
4245 objset_t
*os
= zd
->zd_os
;
4247 uint64_t txg
, object
, count
, wsize
, wc
, zl_wsize
, zl_wc
;
4249 int i
, namelen
, error
;
4250 int micro
= ztest_random(2);
4251 char name
[20], string_value
[20];
4254 od
= umem_alloc(sizeof(ztest_od_t
), UMEM_NOFAIL
);
4255 ztest_od_init(od
, ID_PARALLEL
, FTAG
, micro
, DMU_OT_ZAP_OTHER
, 0, 0);
4257 if (ztest_object_init(zd
, od
, sizeof (ztest_od_t
), B_FALSE
) != 0) {
4258 umem_free(od
, sizeof(ztest_od_t
));
4262 object
= od
->od_object
;
4265 * Generate a random name of the form 'xxx.....' where each
4266 * x is a random printable character and the dots are dots.
4267 * There are 94 such characters, and the name length goes from
4268 * 6 to 20, so there are 94^3 * 15 = 12,458,760 possible names.
4270 namelen
= ztest_random(sizeof (name
) - 5) + 5 + 1;
4272 for (i
= 0; i
< 3; i
++)
4273 name
[i
] = '!' + ztest_random('~' - '!' + 1);
4274 for (; i
< namelen
- 1; i
++)
4278 if ((namelen
& 1) || micro
) {
4279 wsize
= sizeof (txg
);
4285 data
= string_value
;
4289 VERIFY(zap_count(os
, object
, &count
) == 0);
4290 ASSERT(count
!= -1ULL);
4293 * Select an operation: length, lookup, add, update, remove.
4295 i
= ztest_random(5);
4298 tx
= dmu_tx_create(os
);
4299 dmu_tx_hold_zap(tx
, object
, B_TRUE
, NULL
);
4300 txg
= ztest_tx_assign(tx
, TXG_MIGHTWAIT
, FTAG
);
4303 bcopy(name
, string_value
, namelen
);
4307 bzero(string_value
, namelen
);
4313 error
= zap_length(os
, object
, name
, &zl_wsize
, &zl_wc
);
4315 ASSERT3U(wsize
, ==, zl_wsize
);
4316 ASSERT3U(wc
, ==, zl_wc
);
4318 ASSERT3U(error
, ==, ENOENT
);
4323 error
= zap_lookup(os
, object
, name
, wsize
, wc
, data
);
4325 if (data
== string_value
&&
4326 bcmp(name
, data
, namelen
) != 0)
4327 fatal(0, "name '%s' != val '%s' len %d",
4328 name
, data
, namelen
);
4330 ASSERT3U(error
, ==, ENOENT
);
4335 error
= zap_add(os
, object
, name
, wsize
, wc
, data
, tx
);
4336 ASSERT(error
== 0 || error
== EEXIST
);
4340 VERIFY(zap_update(os
, object
, name
, wsize
, wc
, data
, tx
) == 0);
4344 error
= zap_remove(os
, object
, name
, tx
);
4345 ASSERT(error
== 0 || error
== ENOENT
);
4352 umem_free(od
, sizeof(ztest_od_t
));
4356 * Commit callback data.
4358 typedef struct ztest_cb_data
{
4359 list_node_t zcd_node
;
4361 int zcd_expected_err
;
4362 boolean_t zcd_added
;
4363 boolean_t zcd_called
;
4367 /* This is the actual commit callback function */
4369 ztest_commit_callback(void *arg
, int error
)
4371 ztest_cb_data_t
*data
= arg
;
4372 uint64_t synced_txg
;
4374 VERIFY(data
!= NULL
);
4375 VERIFY3S(data
->zcd_expected_err
, ==, error
);
4376 VERIFY(!data
->zcd_called
);
4378 synced_txg
= spa_last_synced_txg(data
->zcd_spa
);
4379 if (data
->zcd_txg
> synced_txg
)
4380 fatal(0, "commit callback of txg %" PRIu64
" called prematurely"
4381 ", last synced txg = %" PRIu64
"\n", data
->zcd_txg
,
4384 data
->zcd_called
= B_TRUE
;
4386 if (error
== ECANCELED
) {
4387 ASSERT3U(data
->zcd_txg
, ==, 0);
4388 ASSERT(!data
->zcd_added
);
4391 * The private callback data should be destroyed here, but
4392 * since we are going to check the zcd_called field after
4393 * dmu_tx_abort(), we will destroy it there.
4398 ASSERT(data
->zcd_added
);
4399 ASSERT3U(data
->zcd_txg
, !=, 0);
4401 (void) mutex_enter(&zcl
.zcl_callbacks_lock
);
4403 /* See if this cb was called more quickly */
4404 if ((synced_txg
- data
->zcd_txg
) < zc_min_txg_delay
)
4405 zc_min_txg_delay
= synced_txg
- data
->zcd_txg
;
4407 /* Remove our callback from the list */
4408 list_remove(&zcl
.zcl_callbacks
, data
);
4410 (void) mutex_exit(&zcl
.zcl_callbacks_lock
);
4412 umem_free(data
, sizeof (ztest_cb_data_t
));
4415 /* Allocate and initialize callback data structure */
4416 static ztest_cb_data_t
*
4417 ztest_create_cb_data(objset_t
*os
, uint64_t txg
)
4419 ztest_cb_data_t
*cb_data
;
4421 cb_data
= umem_zalloc(sizeof (ztest_cb_data_t
), UMEM_NOFAIL
);
4423 cb_data
->zcd_txg
= txg
;
4424 cb_data
->zcd_spa
= dmu_objset_spa(os
);
4425 list_link_init(&cb_data
->zcd_node
);
4431 * Commit callback test.
4434 ztest_dmu_commit_callbacks(ztest_ds_t
*zd
, uint64_t id
)
4436 objset_t
*os
= zd
->zd_os
;
4439 ztest_cb_data_t
*cb_data
[3], *tmp_cb
;
4440 uint64_t old_txg
, txg
;
4443 od
= umem_alloc(sizeof(ztest_od_t
), UMEM_NOFAIL
);
4444 ztest_od_init(od
, id
, FTAG
, 0, DMU_OT_UINT64_OTHER
, 0, 0);
4446 if (ztest_object_init(zd
, od
, sizeof (ztest_od_t
), B_FALSE
) != 0) {
4447 umem_free(od
, sizeof(ztest_od_t
));
4451 tx
= dmu_tx_create(os
);
4453 cb_data
[0] = ztest_create_cb_data(os
, 0);
4454 dmu_tx_callback_register(tx
, ztest_commit_callback
, cb_data
[0]);
4456 dmu_tx_hold_write(tx
, od
->od_object
, 0, sizeof (uint64_t));
4458 /* Every once in a while, abort the transaction on purpose */
4459 if (ztest_random(100) == 0)
4463 error
= dmu_tx_assign(tx
, TXG_NOWAIT
);
4465 txg
= error
? 0 : dmu_tx_get_txg(tx
);
4467 cb_data
[0]->zcd_txg
= txg
;
4468 cb_data
[1] = ztest_create_cb_data(os
, txg
);
4469 dmu_tx_callback_register(tx
, ztest_commit_callback
, cb_data
[1]);
4473 * It's not a strict requirement to call the registered
4474 * callbacks from inside dmu_tx_abort(), but that's what
4475 * it's supposed to happen in the current implementation
4476 * so we will check for that.
4478 for (i
= 0; i
< 2; i
++) {
4479 cb_data
[i
]->zcd_expected_err
= ECANCELED
;
4480 VERIFY(!cb_data
[i
]->zcd_called
);
4485 for (i
= 0; i
< 2; i
++) {
4486 VERIFY(cb_data
[i
]->zcd_called
);
4487 umem_free(cb_data
[i
], sizeof (ztest_cb_data_t
));
4490 umem_free(od
, sizeof(ztest_od_t
));
4494 cb_data
[2] = ztest_create_cb_data(os
, txg
);
4495 dmu_tx_callback_register(tx
, ztest_commit_callback
, cb_data
[2]);
4498 * Read existing data to make sure there isn't a future leak.
4500 VERIFY(0 == dmu_read(os
, od
->od_object
, 0, sizeof (uint64_t),
4501 &old_txg
, DMU_READ_PREFETCH
));
4504 fatal(0, "future leak: got %" PRIu64
", open txg is %" PRIu64
,
4507 dmu_write(os
, od
->od_object
, 0, sizeof (uint64_t), &txg
, tx
);
4509 (void) mutex_enter(&zcl
.zcl_callbacks_lock
);
4512 * Since commit callbacks don't have any ordering requirement and since
4513 * it is theoretically possible for a commit callback to be called
4514 * after an arbitrary amount of time has elapsed since its txg has been
4515 * synced, it is difficult to reliably determine whether a commit
4516 * callback hasn't been called due to high load or due to a flawed
4519 * In practice, we will assume that if after a certain number of txgs a
4520 * commit callback hasn't been called, then most likely there's an
4521 * implementation bug..
4523 tmp_cb
= list_head(&zcl
.zcl_callbacks
);
4524 if (tmp_cb
!= NULL
&&
4525 tmp_cb
->zcd_txg
+ ZTEST_COMMIT_CB_THRESH
< txg
) {
4526 fatal(0, "Commit callback threshold exceeded, oldest txg: %"
4527 PRIu64
", open txg: %" PRIu64
"\n", tmp_cb
->zcd_txg
, txg
);
4531 * Let's find the place to insert our callbacks.
4533 * Even though the list is ordered by txg, it is possible for the
4534 * insertion point to not be the end because our txg may already be
4535 * quiescing at this point and other callbacks in the open txg
4536 * (from other objsets) may have sneaked in.
4538 tmp_cb
= list_tail(&zcl
.zcl_callbacks
);
4539 while (tmp_cb
!= NULL
&& tmp_cb
->zcd_txg
> txg
)
4540 tmp_cb
= list_prev(&zcl
.zcl_callbacks
, tmp_cb
);
4542 /* Add the 3 callbacks to the list */
4543 for (i
= 0; i
< 3; i
++) {
4545 list_insert_head(&zcl
.zcl_callbacks
, cb_data
[i
]);
4547 list_insert_after(&zcl
.zcl_callbacks
, tmp_cb
,
4550 cb_data
[i
]->zcd_added
= B_TRUE
;
4551 VERIFY(!cb_data
[i
]->zcd_called
);
4553 tmp_cb
= cb_data
[i
];
4558 (void) mutex_exit(&zcl
.zcl_callbacks_lock
);
4562 umem_free(od
, sizeof(ztest_od_t
));
4567 ztest_dsl_prop_get_set(ztest_ds_t
*zd
, uint64_t id
)
4569 zfs_prop_t proplist
[] = {
4571 ZFS_PROP_COMPRESSION
,
4577 (void) rw_enter(&ztest_name_lock
, RW_READER
);
4579 for (p
= 0; p
< sizeof (proplist
) / sizeof (proplist
[0]); p
++)
4580 (void) ztest_dsl_prop_set_uint64(zd
->zd_name
, proplist
[p
],
4581 ztest_random_dsl_prop(proplist
[p
]), (int)ztest_random(2));
4583 (void) rw_exit(&ztest_name_lock
);
4588 ztest_spa_prop_get_set(ztest_ds_t
*zd
, uint64_t id
)
4590 nvlist_t
*props
= NULL
;
4592 (void) rw_enter(&ztest_name_lock
, RW_READER
);
4594 (void) ztest_spa_prop_set_uint64(ZPOOL_PROP_DEDUPDITTO
,
4595 ZIO_DEDUPDITTO_MIN
+ ztest_random(ZIO_DEDUPDITTO_MIN
));
4597 VERIFY3U(spa_prop_get(ztest_spa
, &props
), ==, 0);
4599 if (ztest_opts
.zo_verbose
>= 6)
4600 dump_nvlist(props
, 4);
4604 (void) rw_exit(&ztest_name_lock
);
4608 * Test snapshot hold/release and deferred destroy.
4611 ztest_dmu_snapshot_hold(ztest_ds_t
*zd
, uint64_t id
)
4614 objset_t
*os
= zd
->zd_os
;
4618 char clonename
[100];
4620 char osname
[MAXNAMELEN
];
4622 (void) rw_enter(&ztest_name_lock
, RW_READER
);
4624 dmu_objset_name(os
, osname
);
4626 (void) snprintf(snapname
, 100, "sh1_%llu", (u_longlong_t
)id
);
4627 (void) snprintf(fullname
, 100, "%s@%s", osname
, snapname
);
4628 (void) snprintf(clonename
, 100, "%s/ch1_%llu",osname
,(u_longlong_t
)id
);
4629 (void) snprintf(tag
, 100, "tag_%llu", (u_longlong_t
)id
);
4632 * Clean up from any previous run.
4634 (void) dmu_objset_destroy(clonename
, B_FALSE
);
4635 (void) dsl_dataset_user_release(osname
, snapname
, tag
, B_FALSE
);
4636 (void) dmu_objset_destroy(fullname
, B_FALSE
);
4639 * Create snapshot, clone it, mark snap for deferred destroy,
4640 * destroy clone, verify snap was also destroyed.
4642 error
= dmu_objset_snapshot(osname
, snapname
, NULL
, NULL
, FALSE
,
4645 if (error
== ENOSPC
) {
4646 ztest_record_enospc("dmu_objset_snapshot");
4649 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname
, error
);
4652 error
= dmu_objset_hold(fullname
, FTAG
, &origin
);
4654 fatal(0, "dmu_objset_hold(%s) = %d", fullname
, error
);
4656 error
= dmu_objset_clone(clonename
, dmu_objset_ds(origin
), 0);
4657 dmu_objset_rele(origin
, FTAG
);
4659 if (error
== ENOSPC
) {
4660 ztest_record_enospc("dmu_objset_clone");
4663 fatal(0, "dmu_objset_clone(%s) = %d", clonename
, error
);
4666 error
= dmu_objset_destroy(fullname
, B_TRUE
);
4668 fatal(0, "dmu_objset_destroy(%s, B_TRUE) = %d",
4672 error
= dmu_objset_destroy(clonename
, B_FALSE
);
4674 fatal(0, "dmu_objset_destroy(%s) = %d", clonename
, error
);
4676 error
= dmu_objset_hold(fullname
, FTAG
, &origin
);
4677 if (error
!= ENOENT
)
4678 fatal(0, "dmu_objset_hold(%s) = %d", fullname
, error
);
4681 * Create snapshot, add temporary hold, verify that we can't
4682 * destroy a held snapshot, mark for deferred destroy,
4683 * release hold, verify snapshot was destroyed.
4685 error
= dmu_objset_snapshot(osname
, snapname
, NULL
, NULL
, FALSE
,
4688 if (error
== ENOSPC
) {
4689 ztest_record_enospc("dmu_objset_snapshot");
4692 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname
, error
);
4695 error
= dsl_dataset_user_hold(osname
, snapname
, tag
, B_FALSE
,
4698 fatal(0, "dsl_dataset_user_hold(%s)", fullname
, tag
);
4700 error
= dmu_objset_destroy(fullname
, B_FALSE
);
4701 if (error
!= EBUSY
) {
4702 fatal(0, "dmu_objset_destroy(%s, B_FALSE) = %d",
4706 error
= dmu_objset_destroy(fullname
, B_TRUE
);
4708 fatal(0, "dmu_objset_destroy(%s, B_TRUE) = %d",
4712 error
= dsl_dataset_user_release(osname
, snapname
, tag
, B_FALSE
);
4714 fatal(0, "dsl_dataset_user_release(%s)", fullname
, tag
);
4716 VERIFY(dmu_objset_hold(fullname
, FTAG
, &origin
) == ENOENT
);
4719 (void) rw_exit(&ztest_name_lock
);
4723 * Inject random faults into the on-disk data.
4727 ztest_fault_inject(ztest_ds_t
*zd
, uint64_t id
)
4729 ztest_shared_t
*zs
= ztest_shared
;
4730 spa_t
*spa
= ztest_spa
;
4734 uint64_t bad
= 0x1990c0ffeedecadeull
;
4739 int bshift
= SPA_MAXBLOCKSHIFT
+ 2; /* don't scrog all labels */
4745 boolean_t islog
= B_FALSE
;
4747 path0
= umem_alloc(MAXPATHLEN
, UMEM_NOFAIL
);
4748 pathrand
= umem_alloc(MAXPATHLEN
, UMEM_NOFAIL
);
4750 mutex_enter(&ztest_vdev_lock
);
4751 maxfaults
= MAXFAULTS();
4752 leaves
= MAX(zs
->zs_mirrors
, 1) * ztest_opts
.zo_raidz
;
4753 mirror_save
= zs
->zs_mirrors
;
4754 mutex_exit(&ztest_vdev_lock
);
4756 ASSERT(leaves
>= 1);
4759 * We need SCL_STATE here because we're going to look at vd0->vdev_tsd.
4761 spa_config_enter(spa
, SCL_STATE
, FTAG
, RW_READER
);
4763 if (ztest_random(2) == 0) {
4765 * Inject errors on a normal data device or slog device.
4767 top
= ztest_random_vdev_top(spa
, B_TRUE
);
4768 leaf
= ztest_random(leaves
) + zs
->zs_splits
;
4771 * Generate paths to the first leaf in this top-level vdev,
4772 * and to the random leaf we selected. We'll induce transient
4773 * write failures and random online/offline activity on leaf 0,
4774 * and we'll write random garbage to the randomly chosen leaf.
4776 (void) snprintf(path0
, MAXPATHLEN
, ztest_dev_template
,
4777 ztest_opts
.zo_dir
, ztest_opts
.zo_pool
,
4778 top
* leaves
+ zs
->zs_splits
);
4779 (void) snprintf(pathrand
, MAXPATHLEN
, ztest_dev_template
,
4780 ztest_opts
.zo_dir
, ztest_opts
.zo_pool
,
4781 top
* leaves
+ leaf
);
4783 vd0
= vdev_lookup_by_path(spa
->spa_root_vdev
, path0
);
4784 if (vd0
!= NULL
&& vd0
->vdev_top
->vdev_islog
)
4787 if (vd0
!= NULL
&& maxfaults
!= 1) {
4789 * Make vd0 explicitly claim to be unreadable,
4790 * or unwriteable, or reach behind its back
4791 * and close the underlying fd. We can do this if
4792 * maxfaults == 0 because we'll fail and reexecute,
4793 * and we can do it if maxfaults >= 2 because we'll
4794 * have enough redundancy. If maxfaults == 1, the
4795 * combination of this with injection of random data
4796 * corruption below exceeds the pool's fault tolerance.
4798 vdev_file_t
*vf
= vd0
->vdev_tsd
;
4800 if (vf
!= NULL
&& ztest_random(3) == 0) {
4801 (void) close(vf
->vf_vnode
->v_fd
);
4802 vf
->vf_vnode
->v_fd
= -1;
4803 } else if (ztest_random(2) == 0) {
4804 vd0
->vdev_cant_read
= B_TRUE
;
4806 vd0
->vdev_cant_write
= B_TRUE
;
4808 guid0
= vd0
->vdev_guid
;
4812 * Inject errors on an l2cache device.
4814 spa_aux_vdev_t
*sav
= &spa
->spa_l2cache
;
4816 if (sav
->sav_count
== 0) {
4817 spa_config_exit(spa
, SCL_STATE
, FTAG
);
4820 vd0
= sav
->sav_vdevs
[ztest_random(sav
->sav_count
)];
4821 guid0
= vd0
->vdev_guid
;
4822 (void) strcpy(path0
, vd0
->vdev_path
);
4823 (void) strcpy(pathrand
, vd0
->vdev_path
);
4827 maxfaults
= INT_MAX
; /* no limit on cache devices */
4830 spa_config_exit(spa
, SCL_STATE
, FTAG
);
4833 * If we can tolerate two or more faults, or we're dealing
4834 * with a slog, randomly online/offline vd0.
4836 if ((maxfaults
>= 2 || islog
) && guid0
!= 0) {
4837 if (ztest_random(10) < 6) {
4838 int flags
= (ztest_random(2) == 0 ?
4839 ZFS_OFFLINE_TEMPORARY
: 0);
4842 * We have to grab the zs_name_lock as writer to
4843 * prevent a race between offlining a slog and
4844 * destroying a dataset. Offlining the slog will
4845 * grab a reference on the dataset which may cause
4846 * dmu_objset_destroy() to fail with EBUSY thus
4847 * leaving the dataset in an inconsistent state.
4850 (void) rw_enter(&ztest_name_lock
,
4853 VERIFY(vdev_offline(spa
, guid0
, flags
) != EBUSY
);
4856 (void) rw_exit(&ztest_name_lock
);
4858 (void) vdev_online(spa
, guid0
, 0, NULL
);
4866 * We have at least single-fault tolerance, so inject data corruption.
4868 fd
= open(pathrand
, O_RDWR
);
4870 if (fd
== -1) /* we hit a gap in the device namespace */
4873 fsize
= lseek(fd
, 0, SEEK_END
);
4875 while (--iters
!= 0) {
4876 offset
= ztest_random(fsize
/ (leaves
<< bshift
)) *
4877 (leaves
<< bshift
) + (leaf
<< bshift
) +
4878 (ztest_random(1ULL << (bshift
- 1)) & -8ULL);
4880 if (offset
>= fsize
)
4883 mutex_enter(&ztest_vdev_lock
);
4884 if (mirror_save
!= zs
->zs_mirrors
) {
4885 mutex_exit(&ztest_vdev_lock
);
4890 if (pwrite(fd
, &bad
, sizeof (bad
), offset
) != sizeof (bad
))
4891 fatal(1, "can't inject bad word at 0x%llx in %s",
4894 mutex_exit(&ztest_vdev_lock
);
4896 if (ztest_opts
.zo_verbose
>= 7)
4897 (void) printf("injected bad word into %s,"
4898 " offset 0x%llx\n", pathrand
, (u_longlong_t
)offset
);
4903 umem_free(path0
, MAXPATHLEN
);
4904 umem_free(pathrand
, MAXPATHLEN
);
4908 * Verify that DDT repair works as expected.
4911 ztest_ddt_repair(ztest_ds_t
*zd
, uint64_t id
)
4913 ztest_shared_t
*zs
= ztest_shared
;
4914 spa_t
*spa
= ztest_spa
;
4915 objset_t
*os
= zd
->zd_os
;
4917 uint64_t object
, blocksize
, txg
, pattern
, psize
;
4918 enum zio_checksum checksum
= spa_dedup_checksum(spa
);
4923 int copies
= 2 * ZIO_DEDUPDITTO_MIN
;
4926 blocksize
= ztest_random_blocksize();
4927 blocksize
= MIN(blocksize
, 2048); /* because we write so many */
4929 od
= umem_alloc(sizeof(ztest_od_t
), UMEM_NOFAIL
);
4930 ztest_od_init(od
, id
, FTAG
, 0, DMU_OT_UINT64_OTHER
, blocksize
, 0);
4932 if (ztest_object_init(zd
, od
, sizeof (ztest_od_t
), B_FALSE
) != 0) {
4933 umem_free(od
, sizeof(ztest_od_t
));
4938 * Take the name lock as writer to prevent anyone else from changing
4939 * the pool and dataset properies we need to maintain during this test.
4941 (void) rw_enter(&ztest_name_lock
, RW_WRITER
);
4943 if (ztest_dsl_prop_set_uint64(zd
->zd_name
, ZFS_PROP_DEDUP
, checksum
,
4945 ztest_dsl_prop_set_uint64(zd
->zd_name
, ZFS_PROP_COPIES
, 1,
4947 (void) rw_exit(&ztest_name_lock
);
4948 umem_free(od
, sizeof(ztest_od_t
));
4952 object
= od
[0].od_object
;
4953 blocksize
= od
[0].od_blocksize
;
4954 pattern
= zs
->zs_guid
^ dmu_objset_fsid_guid(os
);
4956 ASSERT(object
!= 0);
4958 tx
= dmu_tx_create(os
);
4959 dmu_tx_hold_write(tx
, object
, 0, copies
* blocksize
);
4960 txg
= ztest_tx_assign(tx
, TXG_WAIT
, FTAG
);
4962 (void) rw_exit(&ztest_name_lock
);
4963 umem_free(od
, sizeof(ztest_od_t
));
4968 * Write all the copies of our block.
4970 for (i
= 0; i
< copies
; i
++) {
4971 uint64_t offset
= i
* blocksize
;
4972 VERIFY(dmu_buf_hold(os
, object
, offset
, FTAG
, &db
,
4973 DMU_READ_NO_PREFETCH
) == 0);
4974 ASSERT(db
->db_offset
== offset
);
4975 ASSERT(db
->db_size
== blocksize
);
4976 ASSERT(ztest_pattern_match(db
->db_data
, db
->db_size
, pattern
) ||
4977 ztest_pattern_match(db
->db_data
, db
->db_size
, 0ULL));
4978 dmu_buf_will_fill(db
, tx
);
4979 ztest_pattern_set(db
->db_data
, db
->db_size
, pattern
);
4980 dmu_buf_rele(db
, FTAG
);
4984 txg_wait_synced(spa_get_dsl(spa
), txg
);
4987 * Find out what block we got.
4989 VERIFY(dmu_buf_hold(os
, object
, 0, FTAG
, &db
,
4990 DMU_READ_NO_PREFETCH
) == 0);
4991 blk
= *((dmu_buf_impl_t
*)db
)->db_blkptr
;
4992 dmu_buf_rele(db
, FTAG
);
4995 * Damage the block. Dedup-ditto will save us when we read it later.
4997 psize
= BP_GET_PSIZE(&blk
);
4998 buf
= zio_buf_alloc(psize
);
4999 ztest_pattern_set(buf
, psize
, ~pattern
);
5001 (void) zio_wait(zio_rewrite(NULL
, spa
, 0, &blk
,
5002 buf
, psize
, NULL
, NULL
, ZIO_PRIORITY_SYNC_WRITE
,
5003 ZIO_FLAG_CANFAIL
| ZIO_FLAG_INDUCE_DAMAGE
, NULL
));
5005 zio_buf_free(buf
, psize
);
5007 (void) rw_exit(&ztest_name_lock
);
5008 umem_free(od
, sizeof(ztest_od_t
));
5016 ztest_scrub(ztest_ds_t
*zd
, uint64_t id
)
5018 spa_t
*spa
= ztest_spa
;
5020 (void) spa_scan(spa
, POOL_SCAN_SCRUB
);
5021 (void) poll(NULL
, 0, 100); /* wait a moment, then force a restart */
5022 (void) spa_scan(spa
, POOL_SCAN_SCRUB
);
5026 * Change the guid for the pool.
5030 ztest_reguid(ztest_ds_t
*zd
, uint64_t id
)
5032 spa_t
*spa
= ztest_spa
;
5033 uint64_t orig
, load
;
5035 orig
= spa_guid(spa
);
5036 load
= spa_load_guid(spa
);
5037 if (spa_change_guid(spa
) != 0)
5040 if (ztest_opts
.zo_verbose
>= 3) {
5041 (void) printf("Changed guid old %llu -> %llu\n",
5042 (u_longlong_t
)orig
, (u_longlong_t
)spa_guid(spa
));
5045 VERIFY3U(orig
, !=, spa_guid(spa
));
5046 VERIFY3U(load
, ==, spa_load_guid(spa
));
5050 * Rename the pool to a different name and then rename it back.
5054 ztest_spa_rename(ztest_ds_t
*zd
, uint64_t id
)
5056 char *oldname
, *newname
;
5059 (void) rw_enter(&ztest_name_lock
, RW_WRITER
);
5061 oldname
= ztest_opts
.zo_pool
;
5062 newname
= umem_alloc(strlen(oldname
) + 5, UMEM_NOFAIL
);
5063 (void) strcpy(newname
, oldname
);
5064 (void) strcat(newname
, "_tmp");
5069 VERIFY3U(0, ==, spa_rename(oldname
, newname
));
5072 * Try to open it under the old name, which shouldn't exist
5074 VERIFY3U(ENOENT
, ==, spa_open(oldname
, &spa
, FTAG
));
5077 * Open it under the new name and make sure it's still the same spa_t.
5079 VERIFY3U(0, ==, spa_open(newname
, &spa
, FTAG
));
5081 ASSERT(spa
== ztest_spa
);
5082 spa_close(spa
, FTAG
);
5085 * Rename it back to the original
5087 VERIFY3U(0, ==, spa_rename(newname
, oldname
));
5090 * Make sure it can still be opened
5092 VERIFY3U(0, ==, spa_open(oldname
, &spa
, FTAG
));
5094 ASSERT(spa
== ztest_spa
);
5095 spa_close(spa
, FTAG
);
5097 umem_free(newname
, strlen(newname
) + 1);
5099 (void) rw_exit(&ztest_name_lock
);
5103 * Verify pool integrity by running zdb.
5106 ztest_run_zdb(char *pool
)
5114 bin
= umem_alloc(MAXPATHLEN
+ MAXNAMELEN
+ 20, UMEM_NOFAIL
);
5115 zdb
= umem_alloc(MAXPATHLEN
+ MAXNAMELEN
+ 20, UMEM_NOFAIL
);
5116 zbuf
= umem_alloc(1024, UMEM_NOFAIL
);
5118 VERIFY(realpath(getexecname(), bin
) != NULL
);
5119 if (strncmp(bin
, "/usr/sbin/ztest", 15) == 0) {
5120 strcpy(bin
, "/usr/sbin/zdb"); /* Installed */
5121 } else if (strncmp(bin
, "/sbin/ztest", 11) == 0) {
5122 strcpy(bin
, "/sbin/zdb"); /* Installed */
5124 strstr(bin
, "/ztest/")[0] = '\0'; /* In-tree */
5125 strcat(bin
, "/zdb/zdb");
5129 "%s -bcc%s%s -U %s %s",
5131 ztest_opts
.zo_verbose
>= 3 ? "s" : "",
5132 ztest_opts
.zo_verbose
>= 4 ? "v" : "",
5136 if (ztest_opts
.zo_verbose
>= 5)
5137 (void) printf("Executing %s\n", strstr(zdb
, "zdb "));
5139 fp
= popen(zdb
, "r");
5141 while (fgets(zbuf
, 1024, fp
) != NULL
)
5142 if (ztest_opts
.zo_verbose
>= 3)
5143 (void) printf("%s", zbuf
);
5145 status
= pclose(fp
);
5150 ztest_dump_core
= 0;
5151 if (WIFEXITED(status
))
5152 fatal(0, "'%s' exit code %d", zdb
, WEXITSTATUS(status
));
5154 fatal(0, "'%s' died with signal %d", zdb
, WTERMSIG(status
));
5156 umem_free(bin
, MAXPATHLEN
+ MAXNAMELEN
+ 20);
5157 umem_free(zdb
, MAXPATHLEN
+ MAXNAMELEN
+ 20);
5158 umem_free(zbuf
, 1024);
5162 ztest_walk_pool_directory(char *header
)
5166 if (ztest_opts
.zo_verbose
>= 6)
5167 (void) printf("%s\n", header
);
5169 mutex_enter(&spa_namespace_lock
);
5170 while ((spa
= spa_next(spa
)) != NULL
)
5171 if (ztest_opts
.zo_verbose
>= 6)
5172 (void) printf("\t%s\n", spa_name(spa
));
5173 mutex_exit(&spa_namespace_lock
);
5177 ztest_spa_import_export(char *oldname
, char *newname
)
5179 nvlist_t
*config
, *newconfig
;
5183 if (ztest_opts
.zo_verbose
>= 4) {
5184 (void) printf("import/export: old = %s, new = %s\n",
5189 * Clean up from previous runs.
5191 (void) spa_destroy(newname
);
5194 * Get the pool's configuration and guid.
5196 VERIFY3U(0, ==, spa_open(oldname
, &spa
, FTAG
));
5199 * Kick off a scrub to tickle scrub/export races.
5201 if (ztest_random(2) == 0)
5202 (void) spa_scan(spa
, POOL_SCAN_SCRUB
);
5204 pool_guid
= spa_guid(spa
);
5205 spa_close(spa
, FTAG
);
5207 ztest_walk_pool_directory("pools before export");
5212 VERIFY3U(0, ==, spa_export(oldname
, &config
, B_FALSE
, B_FALSE
));
5214 ztest_walk_pool_directory("pools after export");
5219 newconfig
= spa_tryimport(config
);
5220 ASSERT(newconfig
!= NULL
);
5221 nvlist_free(newconfig
);
5224 * Import it under the new name.
5226 VERIFY3U(0, ==, spa_import(newname
, config
, NULL
, 0));
5228 ztest_walk_pool_directory("pools after import");
5231 * Try to import it again -- should fail with EEXIST.
5233 VERIFY3U(EEXIST
, ==, spa_import(newname
, config
, NULL
, 0));
5236 * Try to import it under a different name -- should fail with EEXIST.
5238 VERIFY3U(EEXIST
, ==, spa_import(oldname
, config
, NULL
, 0));
5241 * Verify that the pool is no longer visible under the old name.
5243 VERIFY3U(ENOENT
, ==, spa_open(oldname
, &spa
, FTAG
));
5246 * Verify that we can open and close the pool using the new name.
5248 VERIFY3U(0, ==, spa_open(newname
, &spa
, FTAG
));
5249 ASSERT(pool_guid
== spa_guid(spa
));
5250 spa_close(spa
, FTAG
);
5252 nvlist_free(config
);
5256 ztest_resume(spa_t
*spa
)
5258 if (spa_suspended(spa
) && ztest_opts
.zo_verbose
>= 6)
5259 (void) printf("resuming from suspended state\n");
5260 spa_vdev_state_enter(spa
, SCL_NONE
);
5261 vdev_clear(spa
, NULL
);
5262 (void) spa_vdev_state_exit(spa
, NULL
, 0);
5263 (void) zio_resume(spa
);
5267 ztest_resume_thread(void *arg
)
5271 while (!ztest_exiting
) {
5272 if (spa_suspended(spa
))
5274 (void) poll(NULL
, 0, 100);
5285 ztest_deadman_alarm(int sig
)
5287 fatal(0, "failed to complete within %d seconds of deadline", GRACE
);
5291 ztest_execute(int test
, ztest_info_t
*zi
, uint64_t id
)
5293 ztest_ds_t
*zd
= &ztest_ds
[id
% ztest_opts
.zo_datasets
];
5294 ztest_shared_callstate_t
*zc
= ZTEST_GET_SHARED_CALLSTATE(test
);
5295 hrtime_t functime
= gethrtime();
5298 for (i
= 0; i
< zi
->zi_iters
; i
++)
5299 zi
->zi_func(zd
, id
);
5301 functime
= gethrtime() - functime
;
5303 atomic_add_64(&zc
->zc_count
, 1);
5304 atomic_add_64(&zc
->zc_time
, functime
);
5306 if (ztest_opts
.zo_verbose
>= 4) {
5308 (void) dladdr((void *)zi
->zi_func
, &dli
);
5309 (void) printf("%6.2f sec in %s\n",
5310 (double)functime
/ NANOSEC
, dli
.dli_sname
);
5315 ztest_thread(void *arg
)
5318 uint64_t id
= (uintptr_t)arg
;
5319 ztest_shared_t
*zs
= ztest_shared
;
5323 ztest_shared_callstate_t
*zc
;
5325 while ((now
= gethrtime()) < zs
->zs_thread_stop
) {
5327 * See if it's time to force a crash.
5329 if (now
> zs
->zs_thread_kill
)
5333 * If we're getting ENOSPC with some regularity, stop.
5335 if (zs
->zs_enospc_count
> 10)
5339 * Pick a random function to execute.
5341 rand
= ztest_random(ZTEST_FUNCS
);
5342 zi
= &ztest_info
[rand
];
5343 zc
= ZTEST_GET_SHARED_CALLSTATE(rand
);
5344 call_next
= zc
->zc_next
;
5346 if (now
>= call_next
&&
5347 atomic_cas_64(&zc
->zc_next
, call_next
, call_next
+
5348 ztest_random(2 * zi
->zi_interval
[0] + 1)) == call_next
) {
5349 ztest_execute(rand
, zi
, id
);
5359 ztest_dataset_name(char *dsname
, char *pool
, int d
)
5361 (void) snprintf(dsname
, MAXNAMELEN
, "%s/ds_%d", pool
, d
);
5365 ztest_dataset_destroy(int d
)
5367 char name
[MAXNAMELEN
];
5370 ztest_dataset_name(name
, ztest_opts
.zo_pool
, d
);
5372 if (ztest_opts
.zo_verbose
>= 3)
5373 (void) printf("Destroying %s to free up space\n", name
);
5376 * Cleanup any non-standard clones and snapshots. In general,
5377 * ztest thread t operates on dataset (t % zopt_datasets),
5378 * so there may be more than one thing to clean up.
5380 for (t
= d
; t
< ztest_opts
.zo_threads
;
5381 t
+= ztest_opts
.zo_datasets
)
5382 ztest_dsl_dataset_cleanup(name
, t
);
5384 (void) dmu_objset_find(name
, ztest_objset_destroy_cb
, NULL
,
5385 DS_FIND_SNAPSHOTS
| DS_FIND_CHILDREN
);
5389 ztest_dataset_dirobj_verify(ztest_ds_t
*zd
)
5391 uint64_t usedobjs
, dirobjs
, scratch
;
5394 * ZTEST_DIROBJ is the object directory for the entire dataset.
5395 * Therefore, the number of objects in use should equal the
5396 * number of ZTEST_DIROBJ entries, +1 for ZTEST_DIROBJ itself.
5397 * If not, we have an object leak.
5399 * Note that we can only check this in ztest_dataset_open(),
5400 * when the open-context and syncing-context values agree.
5401 * That's because zap_count() returns the open-context value,
5402 * while dmu_objset_space() returns the rootbp fill count.
5404 VERIFY3U(0, ==, zap_count(zd
->zd_os
, ZTEST_DIROBJ
, &dirobjs
));
5405 dmu_objset_space(zd
->zd_os
, &scratch
, &scratch
, &usedobjs
, &scratch
);
5406 ASSERT3U(dirobjs
+ 1, ==, usedobjs
);
5410 ztest_dataset_open(int d
)
5412 ztest_ds_t
*zd
= &ztest_ds
[d
];
5413 uint64_t committed_seq
= ZTEST_GET_SHARED_DS(d
)->zd_seq
;
5416 char name
[MAXNAMELEN
];
5419 ztest_dataset_name(name
, ztest_opts
.zo_pool
, d
);
5421 (void) rw_enter(&ztest_name_lock
, RW_READER
);
5423 error
= ztest_dataset_create(name
);
5424 if (error
== ENOSPC
) {
5425 (void) rw_exit(&ztest_name_lock
);
5426 ztest_record_enospc(FTAG
);
5429 ASSERT(error
== 0 || error
== EEXIST
);
5431 VERIFY3U(dmu_objset_hold(name
, zd
, &os
), ==, 0);
5432 (void) rw_exit(&ztest_name_lock
);
5434 ztest_zd_init(zd
, ZTEST_GET_SHARED_DS(d
), os
);
5436 zilog
= zd
->zd_zilog
;
5438 if (zilog
->zl_header
->zh_claim_lr_seq
!= 0 &&
5439 zilog
->zl_header
->zh_claim_lr_seq
< committed_seq
)
5440 fatal(0, "missing log records: claimed %llu < committed %llu",
5441 zilog
->zl_header
->zh_claim_lr_seq
, committed_seq
);
5443 ztest_dataset_dirobj_verify(zd
);
5445 zil_replay(os
, zd
, ztest_replay_vector
);
5447 ztest_dataset_dirobj_verify(zd
);
5449 if (ztest_opts
.zo_verbose
>= 6)
5450 (void) printf("%s replay %llu blocks, %llu records, seq %llu\n",
5452 (u_longlong_t
)zilog
->zl_parse_blk_count
,
5453 (u_longlong_t
)zilog
->zl_parse_lr_count
,
5454 (u_longlong_t
)zilog
->zl_replaying_seq
);
5456 zilog
= zil_open(os
, ztest_get_data
);
5458 if (zilog
->zl_replaying_seq
!= 0 &&
5459 zilog
->zl_replaying_seq
< committed_seq
)
5460 fatal(0, "missing log records: replayed %llu < committed %llu",
5461 zilog
->zl_replaying_seq
, committed_seq
);
5467 ztest_dataset_close(int d
)
5469 ztest_ds_t
*zd
= &ztest_ds
[d
];
5471 zil_close(zd
->zd_zilog
);
5472 dmu_objset_rele(zd
->zd_os
, zd
);
5478 * Kick off threads to run tests on all datasets in parallel.
5481 ztest_run(ztest_shared_t
*zs
)
5486 kthread_t
*resume_thread
;
5491 ztest_exiting
= B_FALSE
;
5494 * Initialize parent/child shared state.
5496 mutex_init(&ztest_vdev_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
5497 rw_init(&ztest_name_lock
, NULL
, RW_DEFAULT
, NULL
);
5499 zs
->zs_thread_start
= gethrtime();
5500 zs
->zs_thread_stop
=
5501 zs
->zs_thread_start
+ ztest_opts
.zo_passtime
* NANOSEC
;
5502 zs
->zs_thread_stop
= MIN(zs
->zs_thread_stop
, zs
->zs_proc_stop
);
5503 zs
->zs_thread_kill
= zs
->zs_thread_stop
;
5504 if (ztest_random(100) < ztest_opts
.zo_killrate
) {
5505 zs
->zs_thread_kill
-=
5506 ztest_random(ztest_opts
.zo_passtime
* NANOSEC
);
5509 mutex_init(&zcl
.zcl_callbacks_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
5511 list_create(&zcl
.zcl_callbacks
, sizeof (ztest_cb_data_t
),
5512 offsetof(ztest_cb_data_t
, zcd_node
));
5517 kernel_init(FREAD
| FWRITE
);
5518 VERIFY(spa_open(ztest_opts
.zo_pool
, &spa
, FTAG
) == 0);
5519 spa
->spa_debug
= B_TRUE
;
5522 VERIFY3U(0, ==, dmu_objset_hold(ztest_opts
.zo_pool
, FTAG
, &os
));
5523 zs
->zs_guid
= dmu_objset_fsid_guid(os
);
5524 dmu_objset_rele(os
, FTAG
);
5526 spa
->spa_dedup_ditto
= 2 * ZIO_DEDUPDITTO_MIN
;
5529 * We don't expect the pool to suspend unless maxfaults == 0,
5530 * in which case ztest_fault_inject() temporarily takes away
5531 * the only valid replica.
5533 if (MAXFAULTS() == 0)
5534 spa
->spa_failmode
= ZIO_FAILURE_MODE_WAIT
;
5536 spa
->spa_failmode
= ZIO_FAILURE_MODE_PANIC
;
5539 * Create a thread to periodically resume suspended I/O.
5541 VERIFY3P((resume_thread
= zk_thread_create(NULL
, 0,
5542 (thread_func_t
)ztest_resume_thread
, spa
, TS_RUN
, NULL
, 0, 0,
5543 PTHREAD_CREATE_JOINABLE
)), !=, NULL
);
5546 * Set a deadman alarm to abort() if we hang.
5548 signal(SIGALRM
, ztest_deadman_alarm
);
5549 alarm((zs
->zs_thread_stop
- zs
->zs_thread_start
) / NANOSEC
+ GRACE
);
5552 * Verify that we can safely inquire about about any object,
5553 * whether it's allocated or not. To make it interesting,
5554 * we probe a 5-wide window around each power of two.
5555 * This hits all edge cases, including zero and the max.
5557 for (t
= 0; t
< 64; t
++) {
5558 for (d
= -5; d
<= 5; d
++) {
5559 error
= dmu_object_info(spa
->spa_meta_objset
,
5560 (1ULL << t
) + d
, NULL
);
5561 ASSERT(error
== 0 || error
== ENOENT
||
5567 * If we got any ENOSPC errors on the previous run, destroy something.
5569 if (zs
->zs_enospc_count
!= 0) {
5570 int d
= ztest_random(ztest_opts
.zo_datasets
);
5571 ztest_dataset_destroy(d
);
5573 zs
->zs_enospc_count
= 0;
5575 tid
= umem_zalloc(ztest_opts
.zo_threads
* sizeof (kt_did_t
),
5578 if (ztest_opts
.zo_verbose
>= 4)
5579 (void) printf("starting main threads...\n");
5582 * Kick off all the tests that run in parallel.
5584 for (t
= 0; t
< ztest_opts
.zo_threads
; t
++) {
5587 if (t
< ztest_opts
.zo_datasets
&&
5588 ztest_dataset_open(t
) != 0)
5591 VERIFY3P(thread
= zk_thread_create(NULL
, 0,
5592 (thread_func_t
)ztest_thread
,
5593 (void *)(uintptr_t)t
, TS_RUN
, NULL
, 0, 0,
5594 PTHREAD_CREATE_JOINABLE
), !=, NULL
);
5595 tid
[t
] = thread
->t_tid
;
5599 * Wait for all of the tests to complete. We go in reverse order
5600 * so we don't close datasets while threads are still using them.
5602 for (t
= ztest_opts
.zo_threads
- 1; t
>= 0; t
--) {
5603 thread_join(tid
[t
]);
5604 if (t
< ztest_opts
.zo_datasets
)
5605 ztest_dataset_close(t
);
5608 txg_wait_synced(spa_get_dsl(spa
), 0);
5610 zs
->zs_alloc
= metaslab_class_get_alloc(spa_normal_class(spa
));
5611 zs
->zs_space
= metaslab_class_get_space(spa_normal_class(spa
));
5613 umem_free(tid
, ztest_opts
.zo_threads
* sizeof (kt_did_t
));
5615 /* Kill the resume thread */
5616 ztest_exiting
= B_TRUE
;
5617 thread_join(resume_thread
->t_tid
);
5621 * Right before closing the pool, kick off a bunch of async I/O;
5622 * spa_close() should wait for it to complete.
5624 for (object
= 1; object
< 50; object
++)
5625 dmu_prefetch(spa
->spa_meta_objset
, object
, 0, 1ULL << 20);
5627 /* Verify that at least one commit cb was called in a timely fashion */
5628 if (zc_cb_counter
>= ZTEST_COMMIT_CB_MIN_REG
)
5629 VERIFY3U(zc_min_txg_delay
, ==, 0);
5631 spa_close(spa
, FTAG
);
5634 * Verify that we can loop over all pools.
5636 mutex_enter(&spa_namespace_lock
);
5637 for (spa
= spa_next(NULL
); spa
!= NULL
; spa
= spa_next(spa
))
5638 if (ztest_opts
.zo_verbose
> 3)
5639 (void) printf("spa_next: found %s\n", spa_name(spa
));
5640 mutex_exit(&spa_namespace_lock
);
5643 * Verify that we can export the pool and reimport it under a
5646 if (ztest_random(2) == 0) {
5647 char name
[MAXNAMELEN
];
5648 (void) snprintf(name
, MAXNAMELEN
, "%s_import",
5649 ztest_opts
.zo_pool
);
5650 ztest_spa_import_export(ztest_opts
.zo_pool
, name
);
5651 ztest_spa_import_export(name
, ztest_opts
.zo_pool
);
5656 list_destroy(&zcl
.zcl_callbacks
);
5657 mutex_destroy(&zcl
.zcl_callbacks_lock
);
5658 rw_destroy(&ztest_name_lock
);
5659 mutex_destroy(&ztest_vdev_lock
);
5665 ztest_ds_t
*zd
= &ztest_ds
[0];
5669 if (ztest_opts
.zo_verbose
>= 3)
5670 (void) printf("testing spa_freeze()...\n");
5672 kernel_init(FREAD
| FWRITE
);
5673 VERIFY3U(0, ==, spa_open(ztest_opts
.zo_pool
, &spa
, FTAG
));
5674 VERIFY3U(0, ==, ztest_dataset_open(0));
5677 * Force the first log block to be transactionally allocated.
5678 * We have to do this before we freeze the pool -- otherwise
5679 * the log chain won't be anchored.
5681 while (BP_IS_HOLE(&zd
->zd_zilog
->zl_header
->zh_log
)) {
5682 ztest_dmu_object_alloc_free(zd
, 0);
5683 zil_commit(zd
->zd_zilog
, 0);
5686 txg_wait_synced(spa_get_dsl(spa
), 0);
5689 * Freeze the pool. This stops spa_sync() from doing anything,
5690 * so that the only way to record changes from now on is the ZIL.
5695 * Run tests that generate log records but don't alter the pool config
5696 * or depend on DSL sync tasks (snapshots, objset create/destroy, etc).
5697 * We do a txg_wait_synced() after each iteration to force the txg
5698 * to increase well beyond the last synced value in the uberblock.
5699 * The ZIL should be OK with that.
5701 while (ztest_random(10) != 0 &&
5702 numloops
++ < ztest_opts
.zo_maxloops
) {
5703 ztest_dmu_write_parallel(zd
, 0);
5704 ztest_dmu_object_alloc_free(zd
, 0);
5705 txg_wait_synced(spa_get_dsl(spa
), 0);
5709 * Commit all of the changes we just generated.
5711 zil_commit(zd
->zd_zilog
, 0);
5712 txg_wait_synced(spa_get_dsl(spa
), 0);
5715 * Close our dataset and close the pool.
5717 ztest_dataset_close(0);
5718 spa_close(spa
, FTAG
);
5722 * Open and close the pool and dataset to induce log replay.
5724 kernel_init(FREAD
| FWRITE
);
5725 VERIFY3U(0, ==, spa_open(ztest_opts
.zo_pool
, &spa
, FTAG
));
5726 VERIFY3U(0, ==, ztest_dataset_open(0));
5727 ztest_dataset_close(0);
5728 spa_close(spa
, FTAG
);
5733 print_time(hrtime_t t
, char *timebuf
)
5735 hrtime_t s
= t
/ NANOSEC
;
5736 hrtime_t m
= s
/ 60;
5737 hrtime_t h
= m
/ 60;
5738 hrtime_t d
= h
/ 24;
5747 (void) sprintf(timebuf
,
5748 "%llud%02lluh%02llum%02llus", d
, h
, m
, s
);
5750 (void) sprintf(timebuf
, "%lluh%02llum%02llus", h
, m
, s
);
5752 (void) sprintf(timebuf
, "%llum%02llus", m
, s
);
5754 (void) sprintf(timebuf
, "%llus", s
);
5758 make_random_props(void)
5762 if (ztest_random(2) == 0)
5765 VERIFY(nvlist_alloc(&props
, NV_UNIQUE_NAME
, 0) == 0);
5766 VERIFY(nvlist_add_uint64(props
, "autoreplace", 1) == 0);
5772 * Create a storage pool with the given name and initial vdev size.
5773 * Then test spa_freeze() functionality.
5776 ztest_init(ztest_shared_t
*zs
)
5779 nvlist_t
*nvroot
, *props
;
5781 mutex_init(&ztest_vdev_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
5782 rw_init(&ztest_name_lock
, NULL
, RW_DEFAULT
, NULL
);
5784 kernel_init(FREAD
| FWRITE
);
5787 * Create the storage pool.
5789 (void) spa_destroy(ztest_opts
.zo_pool
);
5790 ztest_shared
->zs_vdev_next_leaf
= 0;
5792 zs
->zs_mirrors
= ztest_opts
.zo_mirrors
;
5793 nvroot
= make_vdev_root(NULL
, NULL
, ztest_opts
.zo_vdev_size
, 0,
5794 0, ztest_opts
.zo_raidz
, zs
->zs_mirrors
, 1);
5795 props
= make_random_props();
5796 VERIFY3U(0, ==, spa_create(ztest_opts
.zo_pool
, nvroot
, props
,
5798 nvlist_free(nvroot
);
5800 VERIFY3U(0, ==, spa_open(ztest_opts
.zo_pool
, &spa
, FTAG
));
5801 zs
->zs_metaslab_sz
=
5802 1ULL << spa
->spa_root_vdev
->vdev_child
[0]->vdev_ms_shift
;
5803 spa_close(spa
, FTAG
);
5807 ztest_run_zdb(ztest_opts
.zo_pool
);
5811 ztest_run_zdb(ztest_opts
.zo_pool
);
5813 rw_destroy(&ztest_name_lock
);
5814 mutex_destroy(&ztest_vdev_lock
);
5822 char *tmp
= tempnam(NULL
, NULL
);
5823 fd
= open(tmp
, O_RDWR
| O_CREAT
, 0700);
5824 ASSERT3S(fd
, >=, 0);
5825 if (fd
!= ZTEST_FD_DATA
) {
5826 VERIFY3S(dup2(fd
, ZTEST_FD_DATA
), ==, ZTEST_FD_DATA
);
5832 fd
= open("/dev/urandom", O_RDONLY
);
5833 ASSERT3S(fd
, >=, 0);
5834 if (fd
!= ZTEST_FD_RAND
) {
5835 VERIFY3S(dup2(fd
, ZTEST_FD_RAND
), ==, ZTEST_FD_RAND
);
5841 shared_data_size(ztest_shared_hdr_t
*hdr
)
5845 size
= hdr
->zh_hdr_size
;
5846 size
+= hdr
->zh_opts_size
;
5847 size
+= hdr
->zh_size
;
5848 size
+= hdr
->zh_stats_size
* hdr
->zh_stats_count
;
5849 size
+= hdr
->zh_ds_size
* hdr
->zh_ds_count
;
5858 ztest_shared_hdr_t
*hdr
;
5860 hdr
= (void *)mmap(0, P2ROUNDUP(sizeof (*hdr
), getpagesize()),
5861 PROT_READ
| PROT_WRITE
, MAP_SHARED
, ZTEST_FD_DATA
, 0);
5862 ASSERT(hdr
!= MAP_FAILED
);
5864 VERIFY3U(0, ==, ftruncate(ZTEST_FD_DATA
, sizeof (ztest_shared_hdr_t
)));
5866 hdr
->zh_hdr_size
= sizeof (ztest_shared_hdr_t
);
5867 hdr
->zh_opts_size
= sizeof (ztest_shared_opts_t
);
5868 hdr
->zh_size
= sizeof (ztest_shared_t
);
5869 hdr
->zh_stats_size
= sizeof (ztest_shared_callstate_t
);
5870 hdr
->zh_stats_count
= ZTEST_FUNCS
;
5871 hdr
->zh_ds_size
= sizeof (ztest_shared_ds_t
);
5872 hdr
->zh_ds_count
= ztest_opts
.zo_datasets
;
5874 size
= shared_data_size(hdr
);
5875 VERIFY3U(0, ==, ftruncate(ZTEST_FD_DATA
, size
));
5877 (void) munmap((caddr_t
)hdr
, P2ROUNDUP(sizeof (*hdr
), getpagesize()));
5884 ztest_shared_hdr_t
*hdr
;
5887 hdr
= (void *)mmap(0, P2ROUNDUP(sizeof (*hdr
), getpagesize()),
5888 PROT_READ
, MAP_SHARED
, ZTEST_FD_DATA
, 0);
5889 ASSERT(hdr
!= MAP_FAILED
);
5891 size
= shared_data_size(hdr
);
5893 (void) munmap((caddr_t
)hdr
, P2ROUNDUP(sizeof (*hdr
), getpagesize()));
5894 hdr
= ztest_shared_hdr
= (void *)mmap(0, P2ROUNDUP(size
, getpagesize()),
5895 PROT_READ
| PROT_WRITE
, MAP_SHARED
, ZTEST_FD_DATA
, 0);
5896 ASSERT(hdr
!= MAP_FAILED
);
5897 buf
= (uint8_t *)hdr
;
5899 offset
= hdr
->zh_hdr_size
;
5900 ztest_shared_opts
= (void *)&buf
[offset
];
5901 offset
+= hdr
->zh_opts_size
;
5902 ztest_shared
= (void *)&buf
[offset
];
5903 offset
+= hdr
->zh_size
;
5904 ztest_shared_callstate
= (void *)&buf
[offset
];
5905 offset
+= hdr
->zh_stats_size
* hdr
->zh_stats_count
;
5906 ztest_shared_ds
= (void *)&buf
[offset
];
5910 exec_child(char *cmd
, char *libpath
, boolean_t ignorekill
, int *statusp
)
5914 char cmdbuf
[MAXPATHLEN
];
5919 (void) strlcpy(cmdbuf
, getexecname(), sizeof (cmdbuf
));
5924 fatal(1, "fork failed");
5926 if (pid
== 0) { /* child */
5927 char *emptyargv
[2] = { cmd
, NULL
};
5929 struct rlimit rl
= { 1024, 1024 };
5930 (void) setrlimit(RLIMIT_NOFILE
, &rl
);
5931 (void) enable_extended_FILE_stdio(-1, -1);
5932 if (libpath
!= NULL
)
5933 VERIFY(0 == setenv("LD_LIBRARY_PATH", libpath
, 1));
5934 (void) execv(cmd
, emptyargv
);
5935 ztest_dump_core
= B_FALSE
;
5936 fatal(B_TRUE
, "exec failed: %s", cmd
);
5939 while (waitpid(pid
, &status
, 0) != pid
)
5941 if (statusp
!= NULL
)
5944 if (WIFEXITED(status
)) {
5945 if (WEXITSTATUS(status
) != 0) {
5946 (void) fprintf(stderr
, "child exited with code %d\n",
5947 WEXITSTATUS(status
));
5951 } else if (WIFSIGNALED(status
)) {
5952 if (!ignorekill
|| WTERMSIG(status
) != SIGKILL
) {
5953 (void) fprintf(stderr
, "child died with signal %d\n",
5959 (void) fprintf(stderr
, "something strange happened to child\n");
5966 ztest_run_init(void)
5970 ztest_shared_t
*zs
= ztest_shared
;
5972 ASSERT(ztest_opts
.zo_init
!= 0);
5975 * Blow away any existing copy of zpool.cache
5977 (void) remove(spa_config_path
);
5980 * Create and initialize our storage pool.
5982 for (i
= 1; i
<= ztest_opts
.zo_init
; i
++) {
5983 bzero(zs
, sizeof (ztest_shared_t
));
5984 if (ztest_opts
.zo_verbose
>= 3 &&
5985 ztest_opts
.zo_init
!= 1) {
5986 (void) printf("ztest_init(), pass %d\n", i
);
5993 main(int argc
, char **argv
)
6001 ztest_shared_callstate_t
*zc
;
6005 char cmd
[MAXNAMELEN
];
6008 boolean_t ischild
= (0 == lseek(ZTEST_FD_DATA
, 0, SEEK_CUR
));
6010 ASSERT(ischild
|| errno
== EBADF
|| errno
== ESPIPE
);
6012 (void) setvbuf(stdout
, NULL
, _IOLBF
, 0);
6015 dprintf_setup(&argc
, argv
);
6016 process_options(argc
, argv
);
6021 bcopy(&ztest_opts
, ztest_shared_opts
,
6022 sizeof (*ztest_shared_opts
));
6025 bcopy(ztest_shared_opts
, &ztest_opts
, sizeof (ztest_opts
));
6027 ASSERT3U(ztest_opts
.zo_datasets
, ==, ztest_shared_hdr
->zh_ds_count
);
6029 /* Override location of zpool.cache */
6030 (void) asprintf((char **)&spa_config_path
, "%s/zpool.cache",
6033 ztest_ds
= umem_alloc(ztest_opts
.zo_datasets
* sizeof (ztest_ds_t
),
6038 metaslab_gang_bang
= ztest_opts
.zo_metaslab_gang_bang
;
6039 metaslab_df_alloc_threshold
=
6040 zs
->zs_metaslab_df_alloc_threshold
;
6049 hasalt
= (strlen(ztest_opts
.zo_alt_ztest
) != 0);
6051 if (ztest_opts
.zo_verbose
>= 1) {
6052 (void) printf("%llu vdevs, %d datasets, %d threads,"
6053 " %llu seconds...\n",
6054 (u_longlong_t
)ztest_opts
.zo_vdevs
,
6055 ztest_opts
.zo_datasets
,
6056 ztest_opts
.zo_threads
,
6057 (u_longlong_t
)ztest_opts
.zo_time
);
6060 (void) strlcpy(cmd
, getexecname(), sizeof (cmd
));
6062 zs
->zs_do_init
= B_TRUE
;
6063 if (strlen(ztest_opts
.zo_alt_ztest
) != 0) {
6064 if (ztest_opts
.zo_verbose
>= 1) {
6065 (void) printf("Executing older ztest for "
6066 "initialization: %s\n", ztest_opts
.zo_alt_ztest
);
6068 VERIFY(!exec_child(ztest_opts
.zo_alt_ztest
,
6069 ztest_opts
.zo_alt_libpath
, B_FALSE
, NULL
));
6071 VERIFY(!exec_child(NULL
, NULL
, B_FALSE
, NULL
));
6073 zs
->zs_do_init
= B_FALSE
;
6075 zs
->zs_proc_start
= gethrtime();
6076 zs
->zs_proc_stop
= zs
->zs_proc_start
+ ztest_opts
.zo_time
* NANOSEC
;
6078 for (f
= 0; f
< ZTEST_FUNCS
; f
++) {
6079 zi
= &ztest_info
[f
];
6080 zc
= ZTEST_GET_SHARED_CALLSTATE(f
);
6081 if (zs
->zs_proc_start
+ zi
->zi_interval
[0] > zs
->zs_proc_stop
)
6082 zc
->zc_next
= UINT64_MAX
;
6084 zc
->zc_next
= zs
->zs_proc_start
+
6085 ztest_random(2 * zi
->zi_interval
[0] + 1);
6089 * Run the tests in a loop. These tests include fault injection
6090 * to verify that self-healing data works, and forced crashes
6091 * to verify that we never lose on-disk consistency.
6093 while (gethrtime() < zs
->zs_proc_stop
) {
6098 * Initialize the workload counters for each function.
6100 for (f
= 0; f
< ZTEST_FUNCS
; f
++) {
6101 zc
= ZTEST_GET_SHARED_CALLSTATE(f
);
6106 /* Set the allocation switch size */
6107 zs
->zs_metaslab_df_alloc_threshold
=
6108 ztest_random(zs
->zs_metaslab_sz
/ 4) + 1;
6110 if (!hasalt
|| ztest_random(2) == 0) {
6111 if (hasalt
&& ztest_opts
.zo_verbose
>= 1) {
6112 (void) printf("Executing newer ztest: %s\n",
6116 killed
= exec_child(cmd
, NULL
, B_TRUE
, &status
);
6118 if (hasalt
&& ztest_opts
.zo_verbose
>= 1) {
6119 (void) printf("Executing older ztest: %s\n",
6120 ztest_opts
.zo_alt_ztest
);
6123 killed
= exec_child(ztest_opts
.zo_alt_ztest
,
6124 ztest_opts
.zo_alt_libpath
, B_TRUE
, &status
);
6131 if (ztest_opts
.zo_verbose
>= 1) {
6132 hrtime_t now
= gethrtime();
6134 now
= MIN(now
, zs
->zs_proc_stop
);
6135 print_time(zs
->zs_proc_stop
- now
, timebuf
);
6136 nicenum(zs
->zs_space
, numbuf
);
6138 (void) printf("Pass %3d, %8s, %3llu ENOSPC, "
6139 "%4.1f%% of %5s used, %3.0f%% done, %8s to go\n",
6141 WIFEXITED(status
) ? "Complete" : "SIGKILL",
6142 (u_longlong_t
)zs
->zs_enospc_count
,
6143 100.0 * zs
->zs_alloc
/ zs
->zs_space
,
6145 100.0 * (now
- zs
->zs_proc_start
) /
6146 (ztest_opts
.zo_time
* NANOSEC
), timebuf
);
6149 if (ztest_opts
.zo_verbose
>= 2) {
6150 (void) printf("\nWorkload summary:\n\n");
6151 (void) printf("%7s %9s %s\n",
6152 "Calls", "Time", "Function");
6153 (void) printf("%7s %9s %s\n",
6154 "-----", "----", "--------");
6155 for (f
= 0; f
< ZTEST_FUNCS
; f
++) {
6158 zi
= &ztest_info
[f
];
6159 zc
= ZTEST_GET_SHARED_CALLSTATE(f
);
6160 print_time(zc
->zc_time
, timebuf
);
6161 (void) dladdr((void *)zi
->zi_func
, &dli
);
6162 (void) printf("%7llu %9s %s\n",
6163 (u_longlong_t
)zc
->zc_count
, timebuf
,
6166 (void) printf("\n");
6170 * It's possible that we killed a child during a rename test,
6171 * in which case we'll have a 'ztest_tmp' pool lying around
6172 * instead of 'ztest'. Do a blind rename in case this happened.
6175 if (spa_open(ztest_opts
.zo_pool
, &spa
, FTAG
) == 0) {
6176 spa_close(spa
, FTAG
);
6178 char tmpname
[MAXNAMELEN
];
6180 kernel_init(FREAD
| FWRITE
);
6181 (void) snprintf(tmpname
, sizeof (tmpname
), "%s_tmp",
6182 ztest_opts
.zo_pool
);
6183 (void) spa_rename(tmpname
, ztest_opts
.zo_pool
);
6187 ztest_run_zdb(ztest_opts
.zo_pool
);
6190 if (ztest_opts
.zo_verbose
>= 1) {
6192 (void) printf("%d runs of older ztest: %s\n", older
,
6193 ztest_opts
.zo_alt_ztest
);
6194 (void) printf("%d runs of newer ztest: %s\n", newer
,
6197 (void) printf("%d killed, %d completed, %.0f%% kill rate\n",
6198 kills
, iters
- kills
, (100.0 * kills
) / MAX(1, iters
));