]> git.proxmox.com Git - mirror_zfs.git/blob - cmd/ztest/ztest.c
Illumos #3955
[mirror_zfs.git] / cmd / ztest / ztest.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012 by Delphix. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2013 Steven Hartland. All rights reserved.
26 */
27
28 /*
29 * The objective of this program is to provide a DMU/ZAP/SPA stress test
30 * that runs entirely in userland, is easy to use, and easy to extend.
31 *
32 * The overall design of the ztest program is as follows:
33 *
34 * (1) For each major functional area (e.g. adding vdevs to a pool,
35 * creating and destroying datasets, reading and writing objects, etc)
36 * we have a simple routine to test that functionality. These
37 * individual routines do not have to do anything "stressful".
38 *
39 * (2) We turn these simple functionality tests into a stress test by
40 * running them all in parallel, with as many threads as desired,
41 * and spread across as many datasets, objects, and vdevs as desired.
42 *
43 * (3) While all this is happening, we inject faults into the pool to
44 * verify that self-healing data really works.
45 *
46 * (4) Every time we open a dataset, we change its checksum and compression
47 * functions. Thus even individual objects vary from block to block
48 * in which checksum they use and whether they're compressed.
49 *
50 * (5) To verify that we never lose on-disk consistency after a crash,
51 * we run the entire test in a child of the main process.
52 * At random times, the child self-immolates with a SIGKILL.
53 * This is the software equivalent of pulling the power cord.
54 * The parent then runs the test again, using the existing
55 * storage pool, as many times as desired. If backwards compatability
56 * testing is enabled ztest will sometimes run the "older" version
57 * of ztest after a SIGKILL.
58 *
59 * (6) To verify that we don't have future leaks or temporal incursions,
60 * many of the functional tests record the transaction group number
61 * as part of their data. When reading old data, they verify that
62 * the transaction group number is less than the current, open txg.
63 * If you add a new test, please do this if applicable.
64 *
65 * (7) Threads are created with a reduced stack size, for sanity checking.
66 * Therefore, it's important not to allocate huge buffers on the stack.
67 *
68 * When run with no arguments, ztest runs for about five minutes and
69 * produces no output if successful. To get a little bit of information,
70 * specify -V. To get more information, specify -VV, and so on.
71 *
72 * To turn this into an overnight stress test, use -T to specify run time.
73 *
74 * You can ask more more vdevs [-v], datasets [-d], or threads [-t]
75 * to increase the pool capacity, fanout, and overall stress level.
76 *
77 * Use the -k option to set the desired frequency of kills.
78 *
79 * When ztest invokes itself it passes all relevant information through a
80 * temporary file which is mmap-ed in the child process. This allows shared
81 * memory to survive the exec syscall. The ztest_shared_hdr_t struct is always
82 * stored at offset 0 of this file and contains information on the size and
83 * number of shared structures in the file. The information stored in this file
84 * must remain backwards compatible with older versions of ztest so that
85 * ztest can invoke them during backwards compatibility testing (-B).
86 */
87
88 #include <sys/zfs_context.h>
89 #include <sys/spa.h>
90 #include <sys/dmu.h>
91 #include <sys/txg.h>
92 #include <sys/dbuf.h>
93 #include <sys/zap.h>
94 #include <sys/dmu_objset.h>
95 #include <sys/poll.h>
96 #include <sys/stat.h>
97 #include <sys/time.h>
98 #include <sys/wait.h>
99 #include <sys/mman.h>
100 #include <sys/resource.h>
101 #include <sys/zio.h>
102 #include <sys/zil.h>
103 #include <sys/zil_impl.h>
104 #include <sys/vdev_impl.h>
105 #include <sys/vdev_file.h>
106 #include <sys/spa_impl.h>
107 #include <sys/metaslab_impl.h>
108 #include <sys/dsl_prop.h>
109 #include <sys/dsl_dataset.h>
110 #include <sys/dsl_destroy.h>
111 #include <sys/dsl_scan.h>
112 #include <sys/zio_checksum.h>
113 #include <sys/refcount.h>
114 #include <sys/zfeature.h>
115 #include <sys/dsl_userhold.h>
116 #include <stdio.h>
117 #include <stdio_ext.h>
118 #include <stdlib.h>
119 #include <unistd.h>
120 #include <signal.h>
121 #include <umem.h>
122 #include <dlfcn.h>
123 #include <ctype.h>
124 #include <math.h>
125 #include <sys/fs/zfs.h>
126 #include <libnvpair.h>
127
128 static int ztest_fd_data = -1;
129 static int ztest_fd_rand = -1;
130
131 typedef struct ztest_shared_hdr {
132 uint64_t zh_hdr_size;
133 uint64_t zh_opts_size;
134 uint64_t zh_size;
135 uint64_t zh_stats_size;
136 uint64_t zh_stats_count;
137 uint64_t zh_ds_size;
138 uint64_t zh_ds_count;
139 } ztest_shared_hdr_t;
140
141 static ztest_shared_hdr_t *ztest_shared_hdr;
142
143 typedef struct ztest_shared_opts {
144 char zo_pool[MAXNAMELEN];
145 char zo_dir[MAXNAMELEN];
146 char zo_alt_ztest[MAXNAMELEN];
147 char zo_alt_libpath[MAXNAMELEN];
148 uint64_t zo_vdevs;
149 uint64_t zo_vdevtime;
150 size_t zo_vdev_size;
151 int zo_ashift;
152 int zo_mirrors;
153 int zo_raidz;
154 int zo_raidz_parity;
155 int zo_datasets;
156 int zo_threads;
157 uint64_t zo_passtime;
158 uint64_t zo_killrate;
159 int zo_verbose;
160 int zo_init;
161 uint64_t zo_time;
162 uint64_t zo_maxloops;
163 uint64_t zo_metaslab_gang_bang;
164 } ztest_shared_opts_t;
165
166 static const ztest_shared_opts_t ztest_opts_defaults = {
167 .zo_pool = { 'z', 't', 'e', 's', 't', '\0' },
168 .zo_dir = { '/', 't', 'm', 'p', '\0' },
169 .zo_alt_ztest = { '\0' },
170 .zo_alt_libpath = { '\0' },
171 .zo_vdevs = 5,
172 .zo_ashift = SPA_MINBLOCKSHIFT,
173 .zo_mirrors = 2,
174 .zo_raidz = 4,
175 .zo_raidz_parity = 1,
176 .zo_vdev_size = SPA_MINDEVSIZE,
177 .zo_datasets = 7,
178 .zo_threads = 23,
179 .zo_passtime = 60, /* 60 seconds */
180 .zo_killrate = 70, /* 70% kill rate */
181 .zo_verbose = 0,
182 .zo_init = 1,
183 .zo_time = 300, /* 5 minutes */
184 .zo_maxloops = 50, /* max loops during spa_freeze() */
185 .zo_metaslab_gang_bang = 32 << 10
186 };
187
188 extern uint64_t metaslab_gang_bang;
189 extern uint64_t metaslab_df_alloc_threshold;
190
191 static ztest_shared_opts_t *ztest_shared_opts;
192 static ztest_shared_opts_t ztest_opts;
193
194 typedef struct ztest_shared_ds {
195 uint64_t zd_seq;
196 } ztest_shared_ds_t;
197
198 static ztest_shared_ds_t *ztest_shared_ds;
199 #define ZTEST_GET_SHARED_DS(d) (&ztest_shared_ds[d])
200
201 #define BT_MAGIC 0x123456789abcdefULL
202 #define MAXFAULTS() \
203 (MAX(zs->zs_mirrors, 1) * (ztest_opts.zo_raidz_parity + 1) - 1)
204
205 enum ztest_io_type {
206 ZTEST_IO_WRITE_TAG,
207 ZTEST_IO_WRITE_PATTERN,
208 ZTEST_IO_WRITE_ZEROES,
209 ZTEST_IO_TRUNCATE,
210 ZTEST_IO_SETATTR,
211 ZTEST_IO_REWRITE,
212 ZTEST_IO_TYPES
213 };
214
215 typedef struct ztest_block_tag {
216 uint64_t bt_magic;
217 uint64_t bt_objset;
218 uint64_t bt_object;
219 uint64_t bt_offset;
220 uint64_t bt_gen;
221 uint64_t bt_txg;
222 uint64_t bt_crtxg;
223 } ztest_block_tag_t;
224
225 typedef struct bufwad {
226 uint64_t bw_index;
227 uint64_t bw_txg;
228 uint64_t bw_data;
229 } bufwad_t;
230
231 /*
232 * XXX -- fix zfs range locks to be generic so we can use them here.
233 */
234 typedef enum {
235 RL_READER,
236 RL_WRITER,
237 RL_APPEND
238 } rl_type_t;
239
240 typedef struct rll {
241 void *rll_writer;
242 int rll_readers;
243 kmutex_t rll_lock;
244 kcondvar_t rll_cv;
245 } rll_t;
246
247 typedef struct rl {
248 uint64_t rl_object;
249 uint64_t rl_offset;
250 uint64_t rl_size;
251 rll_t *rl_lock;
252 } rl_t;
253
254 #define ZTEST_RANGE_LOCKS 64
255 #define ZTEST_OBJECT_LOCKS 64
256
257 /*
258 * Object descriptor. Used as a template for object lookup/create/remove.
259 */
260 typedef struct ztest_od {
261 uint64_t od_dir;
262 uint64_t od_object;
263 dmu_object_type_t od_type;
264 dmu_object_type_t od_crtype;
265 uint64_t od_blocksize;
266 uint64_t od_crblocksize;
267 uint64_t od_gen;
268 uint64_t od_crgen;
269 char od_name[MAXNAMELEN];
270 } ztest_od_t;
271
272 /*
273 * Per-dataset state.
274 */
275 typedef struct ztest_ds {
276 ztest_shared_ds_t *zd_shared;
277 objset_t *zd_os;
278 krwlock_t zd_zilog_lock;
279 zilog_t *zd_zilog;
280 ztest_od_t *zd_od; /* debugging aid */
281 char zd_name[MAXNAMELEN];
282 kmutex_t zd_dirobj_lock;
283 rll_t zd_object_lock[ZTEST_OBJECT_LOCKS];
284 rll_t zd_range_lock[ZTEST_RANGE_LOCKS];
285 } ztest_ds_t;
286
287 /*
288 * Per-iteration state.
289 */
290 typedef void ztest_func_t(ztest_ds_t *zd, uint64_t id);
291
292 typedef struct ztest_info {
293 ztest_func_t *zi_func; /* test function */
294 uint64_t zi_iters; /* iterations per execution */
295 uint64_t *zi_interval; /* execute every <interval> seconds */
296 } ztest_info_t;
297
298 typedef struct ztest_shared_callstate {
299 uint64_t zc_count; /* per-pass count */
300 uint64_t zc_time; /* per-pass time */
301 uint64_t zc_next; /* next time to call this function */
302 } ztest_shared_callstate_t;
303
304 static ztest_shared_callstate_t *ztest_shared_callstate;
305 #define ZTEST_GET_SHARED_CALLSTATE(c) (&ztest_shared_callstate[c])
306
307 /*
308 * Note: these aren't static because we want dladdr() to work.
309 */
310 ztest_func_t ztest_dmu_read_write;
311 ztest_func_t ztest_dmu_write_parallel;
312 ztest_func_t ztest_dmu_object_alloc_free;
313 ztest_func_t ztest_dmu_commit_callbacks;
314 ztest_func_t ztest_zap;
315 ztest_func_t ztest_zap_parallel;
316 ztest_func_t ztest_zil_commit;
317 ztest_func_t ztest_zil_remount;
318 ztest_func_t ztest_dmu_read_write_zcopy;
319 ztest_func_t ztest_dmu_objset_create_destroy;
320 ztest_func_t ztest_dmu_prealloc;
321 ztest_func_t ztest_fzap;
322 ztest_func_t ztest_dmu_snapshot_create_destroy;
323 ztest_func_t ztest_dsl_prop_get_set;
324 ztest_func_t ztest_spa_prop_get_set;
325 ztest_func_t ztest_spa_create_destroy;
326 ztest_func_t ztest_fault_inject;
327 ztest_func_t ztest_ddt_repair;
328 ztest_func_t ztest_dmu_snapshot_hold;
329 ztest_func_t ztest_spa_rename;
330 ztest_func_t ztest_scrub;
331 ztest_func_t ztest_dsl_dataset_promote_busy;
332 ztest_func_t ztest_vdev_attach_detach;
333 ztest_func_t ztest_vdev_LUN_growth;
334 ztest_func_t ztest_vdev_add_remove;
335 ztest_func_t ztest_vdev_aux_add_remove;
336 ztest_func_t ztest_split_pool;
337 ztest_func_t ztest_reguid;
338 ztest_func_t ztest_spa_upgrade;
339
340 uint64_t zopt_always = 0ULL * NANOSEC; /* all the time */
341 uint64_t zopt_incessant = 1ULL * NANOSEC / 10; /* every 1/10 second */
342 uint64_t zopt_often = 1ULL * NANOSEC; /* every second */
343 uint64_t zopt_sometimes = 10ULL * NANOSEC; /* every 10 seconds */
344 uint64_t zopt_rarely = 60ULL * NANOSEC; /* every 60 seconds */
345
346 ztest_info_t ztest_info[] = {
347 { ztest_dmu_read_write, 1, &zopt_always },
348 { ztest_dmu_write_parallel, 10, &zopt_always },
349 { ztest_dmu_object_alloc_free, 1, &zopt_always },
350 { ztest_dmu_commit_callbacks, 1, &zopt_always },
351 { ztest_zap, 30, &zopt_always },
352 { ztest_zap_parallel, 100, &zopt_always },
353 { ztest_split_pool, 1, &zopt_always },
354 { ztest_zil_commit, 1, &zopt_incessant },
355 { ztest_zil_remount, 1, &zopt_sometimes },
356 { ztest_dmu_read_write_zcopy, 1, &zopt_often },
357 { ztest_dmu_objset_create_destroy, 1, &zopt_often },
358 { ztest_dsl_prop_get_set, 1, &zopt_often },
359 { ztest_spa_prop_get_set, 1, &zopt_sometimes },
360 #if 0
361 { ztest_dmu_prealloc, 1, &zopt_sometimes },
362 #endif
363 { ztest_fzap, 1, &zopt_sometimes },
364 { ztest_dmu_snapshot_create_destroy, 1, &zopt_sometimes },
365 { ztest_spa_create_destroy, 1, &zopt_sometimes },
366 { ztest_fault_inject, 1, &zopt_sometimes },
367 { ztest_ddt_repair, 1, &zopt_sometimes },
368 { ztest_dmu_snapshot_hold, 1, &zopt_sometimes },
369 { ztest_reguid, 1, &zopt_sometimes },
370 { ztest_spa_rename, 1, &zopt_rarely },
371 { ztest_scrub, 1, &zopt_rarely },
372 { ztest_spa_upgrade, 1, &zopt_rarely },
373 { ztest_dsl_dataset_promote_busy, 1, &zopt_rarely },
374 { ztest_vdev_attach_detach, 1, &zopt_sometimes },
375 { ztest_vdev_LUN_growth, 1, &zopt_rarely },
376 { ztest_vdev_add_remove, 1,
377 &ztest_opts.zo_vdevtime },
378 { ztest_vdev_aux_add_remove, 1,
379 &ztest_opts.zo_vdevtime },
380 };
381
382 #define ZTEST_FUNCS (sizeof (ztest_info) / sizeof (ztest_info_t))
383
384 /*
385 * The following struct is used to hold a list of uncalled commit callbacks.
386 * The callbacks are ordered by txg number.
387 */
388 typedef struct ztest_cb_list {
389 kmutex_t zcl_callbacks_lock;
390 list_t zcl_callbacks;
391 } ztest_cb_list_t;
392
393 /*
394 * Stuff we need to share writably between parent and child.
395 */
396 typedef struct ztest_shared {
397 boolean_t zs_do_init;
398 hrtime_t zs_proc_start;
399 hrtime_t zs_proc_stop;
400 hrtime_t zs_thread_start;
401 hrtime_t zs_thread_stop;
402 hrtime_t zs_thread_kill;
403 uint64_t zs_enospc_count;
404 uint64_t zs_vdev_next_leaf;
405 uint64_t zs_vdev_aux;
406 uint64_t zs_alloc;
407 uint64_t zs_space;
408 uint64_t zs_splits;
409 uint64_t zs_mirrors;
410 uint64_t zs_metaslab_sz;
411 uint64_t zs_metaslab_df_alloc_threshold;
412 uint64_t zs_guid;
413 } ztest_shared_t;
414
415 #define ID_PARALLEL -1ULL
416
417 static char ztest_dev_template[] = "%s/%s.%llua";
418 static char ztest_aux_template[] = "%s/%s.%s.%llu";
419 ztest_shared_t *ztest_shared;
420
421 static spa_t *ztest_spa = NULL;
422 static ztest_ds_t *ztest_ds;
423
424 static kmutex_t ztest_vdev_lock;
425
426 /*
427 * The ztest_name_lock protects the pool and dataset namespace used by
428 * the individual tests. To modify the namespace, consumers must grab
429 * this lock as writer. Grabbing the lock as reader will ensure that the
430 * namespace does not change while the lock is held.
431 */
432 static krwlock_t ztest_name_lock;
433
434 static boolean_t ztest_dump_core = B_TRUE;
435 static boolean_t ztest_exiting;
436
437 /* Global commit callback list */
438 static ztest_cb_list_t zcl;
439 /* Commit cb delay */
440 static uint64_t zc_min_txg_delay = UINT64_MAX;
441 static int zc_cb_counter = 0;
442
443 /*
444 * Minimum number of commit callbacks that need to be registered for us to check
445 * whether the minimum txg delay is acceptable.
446 */
447 #define ZTEST_COMMIT_CB_MIN_REG 100
448
449 /*
450 * If a number of txgs equal to this threshold have been created after a commit
451 * callback has been registered but not called, then we assume there is an
452 * implementation bug.
453 */
454 #define ZTEST_COMMIT_CB_THRESH (TXG_CONCURRENT_STATES + 1000)
455
456 extern uint64_t metaslab_gang_bang;
457 extern uint64_t metaslab_df_alloc_threshold;
458
459 enum ztest_object {
460 ZTEST_META_DNODE = 0,
461 ZTEST_DIROBJ,
462 ZTEST_OBJECTS
463 };
464
465 static void usage(boolean_t) __NORETURN;
466
467 /*
468 * These libumem hooks provide a reasonable set of defaults for the allocator's
469 * debugging facilities.
470 */
471 const char *
472 _umem_debug_init(void)
473 {
474 return ("default,verbose"); /* $UMEM_DEBUG setting */
475 }
476
477 const char *
478 _umem_logging_init(void)
479 {
480 return ("fail,contents"); /* $UMEM_LOGGING setting */
481 }
482
483 #define FATAL_MSG_SZ 1024
484
485 char *fatal_msg;
486
487 static void
488 fatal(int do_perror, char *message, ...)
489 {
490 va_list args;
491 int save_errno = errno;
492 char *buf;
493
494 (void) fflush(stdout);
495 buf = umem_alloc(FATAL_MSG_SZ, UMEM_NOFAIL);
496
497 va_start(args, message);
498 (void) sprintf(buf, "ztest: ");
499 /* LINTED */
500 (void) vsprintf(buf + strlen(buf), message, args);
501 va_end(args);
502 if (do_perror) {
503 (void) snprintf(buf + strlen(buf), FATAL_MSG_SZ - strlen(buf),
504 ": %s", strerror(save_errno));
505 }
506 (void) fprintf(stderr, "%s\n", buf);
507 fatal_msg = buf; /* to ease debugging */
508 if (ztest_dump_core)
509 abort();
510 exit(3);
511 }
512
513 static int
514 str2shift(const char *buf)
515 {
516 const char *ends = "BKMGTPEZ";
517 int i;
518
519 if (buf[0] == '\0')
520 return (0);
521 for (i = 0; i < strlen(ends); i++) {
522 if (toupper(buf[0]) == ends[i])
523 break;
524 }
525 if (i == strlen(ends)) {
526 (void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n",
527 buf);
528 usage(B_FALSE);
529 }
530 if (buf[1] == '\0' || (toupper(buf[1]) == 'B' && buf[2] == '\0')) {
531 return (10*i);
532 }
533 (void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n", buf);
534 usage(B_FALSE);
535 /* NOTREACHED */
536 }
537
538 static uint64_t
539 nicenumtoull(const char *buf)
540 {
541 char *end;
542 uint64_t val;
543
544 val = strtoull(buf, &end, 0);
545 if (end == buf) {
546 (void) fprintf(stderr, "ztest: bad numeric value: %s\n", buf);
547 usage(B_FALSE);
548 } else if (end[0] == '.') {
549 double fval = strtod(buf, &end);
550 fval *= pow(2, str2shift(end));
551 if (fval > UINT64_MAX) {
552 (void) fprintf(stderr, "ztest: value too large: %s\n",
553 buf);
554 usage(B_FALSE);
555 }
556 val = (uint64_t)fval;
557 } else {
558 int shift = str2shift(end);
559 if (shift >= 64 || (val << shift) >> shift != val) {
560 (void) fprintf(stderr, "ztest: value too large: %s\n",
561 buf);
562 usage(B_FALSE);
563 }
564 val <<= shift;
565 }
566 return (val);
567 }
568
569 static void
570 usage(boolean_t requested)
571 {
572 const ztest_shared_opts_t *zo = &ztest_opts_defaults;
573
574 char nice_vdev_size[10];
575 char nice_gang_bang[10];
576 FILE *fp = requested ? stdout : stderr;
577
578 nicenum(zo->zo_vdev_size, nice_vdev_size);
579 nicenum(zo->zo_metaslab_gang_bang, nice_gang_bang);
580
581 (void) fprintf(fp, "Usage: %s\n"
582 "\t[-v vdevs (default: %llu)]\n"
583 "\t[-s size_of_each_vdev (default: %s)]\n"
584 "\t[-a alignment_shift (default: %d)] use 0 for random\n"
585 "\t[-m mirror_copies (default: %d)]\n"
586 "\t[-r raidz_disks (default: %d)]\n"
587 "\t[-R raidz_parity (default: %d)]\n"
588 "\t[-d datasets (default: %d)]\n"
589 "\t[-t threads (default: %d)]\n"
590 "\t[-g gang_block_threshold (default: %s)]\n"
591 "\t[-i init_count (default: %d)] initialize pool i times\n"
592 "\t[-k kill_percentage (default: %llu%%)]\n"
593 "\t[-p pool_name (default: %s)]\n"
594 "\t[-f dir (default: %s)] file directory for vdev files\n"
595 "\t[-V] verbose (use multiple times for ever more blather)\n"
596 "\t[-E] use existing pool instead of creating new one\n"
597 "\t[-T time (default: %llu sec)] total run time\n"
598 "\t[-F freezeloops (default: %llu)] max loops in spa_freeze()\n"
599 "\t[-P passtime (default: %llu sec)] time per pass\n"
600 "\t[-B alt_ztest (default: <none>)] alternate ztest path\n"
601 "\t[-h] (print help)\n"
602 "",
603 zo->zo_pool,
604 (u_longlong_t)zo->zo_vdevs, /* -v */
605 nice_vdev_size, /* -s */
606 zo->zo_ashift, /* -a */
607 zo->zo_mirrors, /* -m */
608 zo->zo_raidz, /* -r */
609 zo->zo_raidz_parity, /* -R */
610 zo->zo_datasets, /* -d */
611 zo->zo_threads, /* -t */
612 nice_gang_bang, /* -g */
613 zo->zo_init, /* -i */
614 (u_longlong_t)zo->zo_killrate, /* -k */
615 zo->zo_pool, /* -p */
616 zo->zo_dir, /* -f */
617 (u_longlong_t)zo->zo_time, /* -T */
618 (u_longlong_t)zo->zo_maxloops, /* -F */
619 (u_longlong_t)zo->zo_passtime);
620 exit(requested ? 0 : 1);
621 }
622
623 static void
624 process_options(int argc, char **argv)
625 {
626 char *path;
627 ztest_shared_opts_t *zo = &ztest_opts;
628
629 int opt;
630 uint64_t value;
631 char altdir[MAXNAMELEN] = { 0 };
632
633 bcopy(&ztest_opts_defaults, zo, sizeof (*zo));
634
635 while ((opt = getopt(argc, argv,
636 "v:s:a:m:r:R:d:t:g:i:k:p:f:VET:P:hF:B:")) != EOF) {
637 value = 0;
638 switch (opt) {
639 case 'v':
640 case 's':
641 case 'a':
642 case 'm':
643 case 'r':
644 case 'R':
645 case 'd':
646 case 't':
647 case 'g':
648 case 'i':
649 case 'k':
650 case 'T':
651 case 'P':
652 case 'F':
653 value = nicenumtoull(optarg);
654 }
655 switch (opt) {
656 case 'v':
657 zo->zo_vdevs = value;
658 break;
659 case 's':
660 zo->zo_vdev_size = MAX(SPA_MINDEVSIZE, value);
661 break;
662 case 'a':
663 zo->zo_ashift = value;
664 break;
665 case 'm':
666 zo->zo_mirrors = value;
667 break;
668 case 'r':
669 zo->zo_raidz = MAX(1, value);
670 break;
671 case 'R':
672 zo->zo_raidz_parity = MIN(MAX(value, 1), 3);
673 break;
674 case 'd':
675 zo->zo_datasets = MAX(1, value);
676 break;
677 case 't':
678 zo->zo_threads = MAX(1, value);
679 break;
680 case 'g':
681 zo->zo_metaslab_gang_bang = MAX(SPA_MINBLOCKSIZE << 1,
682 value);
683 break;
684 case 'i':
685 zo->zo_init = value;
686 break;
687 case 'k':
688 zo->zo_killrate = value;
689 break;
690 case 'p':
691 (void) strlcpy(zo->zo_pool, optarg,
692 sizeof (zo->zo_pool));
693 break;
694 case 'f':
695 path = realpath(optarg, NULL);
696 if (path == NULL) {
697 (void) fprintf(stderr, "error: %s: %s\n",
698 optarg, strerror(errno));
699 usage(B_FALSE);
700 } else {
701 (void) strlcpy(zo->zo_dir, path,
702 sizeof (zo->zo_dir));
703 }
704 break;
705 case 'V':
706 zo->zo_verbose++;
707 break;
708 case 'E':
709 zo->zo_init = 0;
710 break;
711 case 'T':
712 zo->zo_time = value;
713 break;
714 case 'P':
715 zo->zo_passtime = MAX(1, value);
716 break;
717 case 'F':
718 zo->zo_maxloops = MAX(1, value);
719 break;
720 case 'B':
721 (void) strlcpy(altdir, optarg, sizeof (altdir));
722 break;
723 case 'h':
724 usage(B_TRUE);
725 break;
726 case '?':
727 default:
728 usage(B_FALSE);
729 break;
730 }
731 }
732
733 zo->zo_raidz_parity = MIN(zo->zo_raidz_parity, zo->zo_raidz - 1);
734
735 zo->zo_vdevtime =
736 (zo->zo_vdevs > 0 ? zo->zo_time * NANOSEC / zo->zo_vdevs :
737 UINT64_MAX >> 2);
738
739 if (strlen(altdir) > 0) {
740 char *cmd;
741 char *realaltdir;
742 char *bin;
743 char *ztest;
744 char *isa;
745 int isalen;
746
747 cmd = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
748 realaltdir = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
749
750 VERIFY(NULL != realpath(getexecname(), cmd));
751 if (0 != access(altdir, F_OK)) {
752 ztest_dump_core = B_FALSE;
753 fatal(B_TRUE, "invalid alternate ztest path: %s",
754 altdir);
755 }
756 VERIFY(NULL != realpath(altdir, realaltdir));
757
758 /*
759 * 'cmd' should be of the form "<anything>/usr/bin/<isa>/ztest".
760 * We want to extract <isa> to determine if we should use
761 * 32 or 64 bit binaries.
762 */
763 bin = strstr(cmd, "/usr/bin/");
764 ztest = strstr(bin, "/ztest");
765 isa = bin + 9;
766 isalen = ztest - isa;
767 (void) snprintf(zo->zo_alt_ztest, sizeof (zo->zo_alt_ztest),
768 "%s/usr/bin/%.*s/ztest", realaltdir, isalen, isa);
769 (void) snprintf(zo->zo_alt_libpath, sizeof (zo->zo_alt_libpath),
770 "%s/usr/lib/%.*s", realaltdir, isalen, isa);
771
772 if (0 != access(zo->zo_alt_ztest, X_OK)) {
773 ztest_dump_core = B_FALSE;
774 fatal(B_TRUE, "invalid alternate ztest: %s",
775 zo->zo_alt_ztest);
776 } else if (0 != access(zo->zo_alt_libpath, X_OK)) {
777 ztest_dump_core = B_FALSE;
778 fatal(B_TRUE, "invalid alternate lib directory %s",
779 zo->zo_alt_libpath);
780 }
781
782 umem_free(cmd, MAXPATHLEN);
783 umem_free(realaltdir, MAXPATHLEN);
784 }
785 }
786
787 static void
788 ztest_kill(ztest_shared_t *zs)
789 {
790 zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(ztest_spa));
791 zs->zs_space = metaslab_class_get_space(spa_normal_class(ztest_spa));
792 (void) kill(getpid(), SIGKILL);
793 }
794
795 static uint64_t
796 ztest_random(uint64_t range)
797 {
798 uint64_t r;
799
800 ASSERT3S(ztest_fd_rand, >=, 0);
801
802 if (range == 0)
803 return (0);
804
805 if (read(ztest_fd_rand, &r, sizeof (r)) != sizeof (r))
806 fatal(1, "short read from /dev/urandom");
807
808 return (r % range);
809 }
810
811 /* ARGSUSED */
812 static void
813 ztest_record_enospc(const char *s)
814 {
815 ztest_shared->zs_enospc_count++;
816 }
817
818 static uint64_t
819 ztest_get_ashift(void)
820 {
821 if (ztest_opts.zo_ashift == 0)
822 return (SPA_MINBLOCKSHIFT + ztest_random(3));
823 return (ztest_opts.zo_ashift);
824 }
825
826 static nvlist_t *
827 make_vdev_file(char *path, char *aux, char *pool, size_t size, uint64_t ashift)
828 {
829 char *pathbuf;
830 uint64_t vdev;
831 nvlist_t *file;
832
833 pathbuf = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
834
835 if (ashift == 0)
836 ashift = ztest_get_ashift();
837
838 if (path == NULL) {
839 path = pathbuf;
840
841 if (aux != NULL) {
842 vdev = ztest_shared->zs_vdev_aux;
843 (void) snprintf(path, MAXPATHLEN,
844 ztest_aux_template, ztest_opts.zo_dir,
845 pool == NULL ? ztest_opts.zo_pool : pool,
846 aux, vdev);
847 } else {
848 vdev = ztest_shared->zs_vdev_next_leaf++;
849 (void) snprintf(path, MAXPATHLEN,
850 ztest_dev_template, ztest_opts.zo_dir,
851 pool == NULL ? ztest_opts.zo_pool : pool, vdev);
852 }
853 }
854
855 if (size != 0) {
856 int fd = open(path, O_RDWR | O_CREAT | O_TRUNC, 0666);
857 if (fd == -1)
858 fatal(1, "can't open %s", path);
859 if (ftruncate(fd, size) != 0)
860 fatal(1, "can't ftruncate %s", path);
861 (void) close(fd);
862 }
863
864 VERIFY(nvlist_alloc(&file, NV_UNIQUE_NAME, 0) == 0);
865 VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_TYPE, VDEV_TYPE_FILE) == 0);
866 VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_PATH, path) == 0);
867 VERIFY(nvlist_add_uint64(file, ZPOOL_CONFIG_ASHIFT, ashift) == 0);
868 umem_free(pathbuf, MAXPATHLEN);
869
870 return (file);
871 }
872
873 static nvlist_t *
874 make_vdev_raidz(char *path, char *aux, char *pool, size_t size,
875 uint64_t ashift, int r)
876 {
877 nvlist_t *raidz, **child;
878 int c;
879
880 if (r < 2)
881 return (make_vdev_file(path, aux, pool, size, ashift));
882 child = umem_alloc(r * sizeof (nvlist_t *), UMEM_NOFAIL);
883
884 for (c = 0; c < r; c++)
885 child[c] = make_vdev_file(path, aux, pool, size, ashift);
886
887 VERIFY(nvlist_alloc(&raidz, NV_UNIQUE_NAME, 0) == 0);
888 VERIFY(nvlist_add_string(raidz, ZPOOL_CONFIG_TYPE,
889 VDEV_TYPE_RAIDZ) == 0);
890 VERIFY(nvlist_add_uint64(raidz, ZPOOL_CONFIG_NPARITY,
891 ztest_opts.zo_raidz_parity) == 0);
892 VERIFY(nvlist_add_nvlist_array(raidz, ZPOOL_CONFIG_CHILDREN,
893 child, r) == 0);
894
895 for (c = 0; c < r; c++)
896 nvlist_free(child[c]);
897
898 umem_free(child, r * sizeof (nvlist_t *));
899
900 return (raidz);
901 }
902
903 static nvlist_t *
904 make_vdev_mirror(char *path, char *aux, char *pool, size_t size,
905 uint64_t ashift, int r, int m)
906 {
907 nvlist_t *mirror, **child;
908 int c;
909
910 if (m < 1)
911 return (make_vdev_raidz(path, aux, pool, size, ashift, r));
912
913 child = umem_alloc(m * sizeof (nvlist_t *), UMEM_NOFAIL);
914
915 for (c = 0; c < m; c++)
916 child[c] = make_vdev_raidz(path, aux, pool, size, ashift, r);
917
918 VERIFY(nvlist_alloc(&mirror, NV_UNIQUE_NAME, 0) == 0);
919 VERIFY(nvlist_add_string(mirror, ZPOOL_CONFIG_TYPE,
920 VDEV_TYPE_MIRROR) == 0);
921 VERIFY(nvlist_add_nvlist_array(mirror, ZPOOL_CONFIG_CHILDREN,
922 child, m) == 0);
923
924 for (c = 0; c < m; c++)
925 nvlist_free(child[c]);
926
927 umem_free(child, m * sizeof (nvlist_t *));
928
929 return (mirror);
930 }
931
932 static nvlist_t *
933 make_vdev_root(char *path, char *aux, char *pool, size_t size, uint64_t ashift,
934 int log, int r, int m, int t)
935 {
936 nvlist_t *root, **child;
937 int c;
938
939 ASSERT(t > 0);
940
941 child = umem_alloc(t * sizeof (nvlist_t *), UMEM_NOFAIL);
942
943 for (c = 0; c < t; c++) {
944 child[c] = make_vdev_mirror(path, aux, pool, size, ashift,
945 r, m);
946 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
947 log) == 0);
948 }
949
950 VERIFY(nvlist_alloc(&root, NV_UNIQUE_NAME, 0) == 0);
951 VERIFY(nvlist_add_string(root, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) == 0);
952 VERIFY(nvlist_add_nvlist_array(root, aux ? aux : ZPOOL_CONFIG_CHILDREN,
953 child, t) == 0);
954
955 for (c = 0; c < t; c++)
956 nvlist_free(child[c]);
957
958 umem_free(child, t * sizeof (nvlist_t *));
959
960 return (root);
961 }
962
963 /*
964 * Find a random spa version. Returns back a random spa version in the
965 * range [initial_version, SPA_VERSION_FEATURES].
966 */
967 static uint64_t
968 ztest_random_spa_version(uint64_t initial_version)
969 {
970 uint64_t version = initial_version;
971
972 if (version <= SPA_VERSION_BEFORE_FEATURES) {
973 version = version +
974 ztest_random(SPA_VERSION_BEFORE_FEATURES - version + 1);
975 }
976
977 if (version > SPA_VERSION_BEFORE_FEATURES)
978 version = SPA_VERSION_FEATURES;
979
980 ASSERT(SPA_VERSION_IS_SUPPORTED(version));
981 return (version);
982 }
983
984 static int
985 ztest_random_blocksize(void)
986 {
987 return (1 << (SPA_MINBLOCKSHIFT +
988 ztest_random(SPA_MAXBLOCKSHIFT - SPA_MINBLOCKSHIFT + 1)));
989 }
990
991 static int
992 ztest_random_ibshift(void)
993 {
994 return (DN_MIN_INDBLKSHIFT +
995 ztest_random(DN_MAX_INDBLKSHIFT - DN_MIN_INDBLKSHIFT + 1));
996 }
997
998 static uint64_t
999 ztest_random_vdev_top(spa_t *spa, boolean_t log_ok)
1000 {
1001 uint64_t top;
1002 vdev_t *rvd = spa->spa_root_vdev;
1003 vdev_t *tvd;
1004
1005 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
1006
1007 do {
1008 top = ztest_random(rvd->vdev_children);
1009 tvd = rvd->vdev_child[top];
1010 } while (tvd->vdev_ishole || (tvd->vdev_islog && !log_ok) ||
1011 tvd->vdev_mg == NULL || tvd->vdev_mg->mg_class == NULL);
1012
1013 return (top);
1014 }
1015
1016 static uint64_t
1017 ztest_random_dsl_prop(zfs_prop_t prop)
1018 {
1019 uint64_t value;
1020
1021 do {
1022 value = zfs_prop_random_value(prop, ztest_random(-1ULL));
1023 } while (prop == ZFS_PROP_CHECKSUM && value == ZIO_CHECKSUM_OFF);
1024
1025 return (value);
1026 }
1027
1028 static int
1029 ztest_dsl_prop_set_uint64(char *osname, zfs_prop_t prop, uint64_t value,
1030 boolean_t inherit)
1031 {
1032 const char *propname = zfs_prop_to_name(prop);
1033 const char *valname;
1034 char *setpoint;
1035 uint64_t curval;
1036 int error;
1037
1038 error = dsl_prop_set_int(osname, propname,
1039 (inherit ? ZPROP_SRC_NONE : ZPROP_SRC_LOCAL), value);
1040
1041 if (error == ENOSPC) {
1042 ztest_record_enospc(FTAG);
1043 return (error);
1044 }
1045 ASSERT0(error);
1046
1047 setpoint = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
1048 VERIFY0(dsl_prop_get_integer(osname, propname, &curval, setpoint));
1049
1050 if (ztest_opts.zo_verbose >= 6) {
1051 VERIFY(zfs_prop_index_to_string(prop, curval, &valname) == 0);
1052 (void) printf("%s %s = %s at '%s'\n",
1053 osname, propname, valname, setpoint);
1054 }
1055 umem_free(setpoint, MAXPATHLEN);
1056
1057 return (error);
1058 }
1059
1060 static int
1061 ztest_spa_prop_set_uint64(zpool_prop_t prop, uint64_t value)
1062 {
1063 spa_t *spa = ztest_spa;
1064 nvlist_t *props = NULL;
1065 int error;
1066
1067 VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0);
1068 VERIFY(nvlist_add_uint64(props, zpool_prop_to_name(prop), value) == 0);
1069
1070 error = spa_prop_set(spa, props);
1071
1072 nvlist_free(props);
1073
1074 if (error == ENOSPC) {
1075 ztest_record_enospc(FTAG);
1076 return (error);
1077 }
1078 ASSERT0(error);
1079
1080 return (error);
1081 }
1082
1083 static void
1084 ztest_rll_init(rll_t *rll)
1085 {
1086 rll->rll_writer = NULL;
1087 rll->rll_readers = 0;
1088 mutex_init(&rll->rll_lock, NULL, MUTEX_DEFAULT, NULL);
1089 cv_init(&rll->rll_cv, NULL, CV_DEFAULT, NULL);
1090 }
1091
1092 static void
1093 ztest_rll_destroy(rll_t *rll)
1094 {
1095 ASSERT(rll->rll_writer == NULL);
1096 ASSERT(rll->rll_readers == 0);
1097 mutex_destroy(&rll->rll_lock);
1098 cv_destroy(&rll->rll_cv);
1099 }
1100
1101 static void
1102 ztest_rll_lock(rll_t *rll, rl_type_t type)
1103 {
1104 mutex_enter(&rll->rll_lock);
1105
1106 if (type == RL_READER) {
1107 while (rll->rll_writer != NULL)
1108 (void) cv_wait(&rll->rll_cv, &rll->rll_lock);
1109 rll->rll_readers++;
1110 } else {
1111 while (rll->rll_writer != NULL || rll->rll_readers)
1112 (void) cv_wait(&rll->rll_cv, &rll->rll_lock);
1113 rll->rll_writer = curthread;
1114 }
1115
1116 mutex_exit(&rll->rll_lock);
1117 }
1118
1119 static void
1120 ztest_rll_unlock(rll_t *rll)
1121 {
1122 mutex_enter(&rll->rll_lock);
1123
1124 if (rll->rll_writer) {
1125 ASSERT(rll->rll_readers == 0);
1126 rll->rll_writer = NULL;
1127 } else {
1128 ASSERT(rll->rll_readers != 0);
1129 ASSERT(rll->rll_writer == NULL);
1130 rll->rll_readers--;
1131 }
1132
1133 if (rll->rll_writer == NULL && rll->rll_readers == 0)
1134 cv_broadcast(&rll->rll_cv);
1135
1136 mutex_exit(&rll->rll_lock);
1137 }
1138
1139 static void
1140 ztest_object_lock(ztest_ds_t *zd, uint64_t object, rl_type_t type)
1141 {
1142 rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)];
1143
1144 ztest_rll_lock(rll, type);
1145 }
1146
1147 static void
1148 ztest_object_unlock(ztest_ds_t *zd, uint64_t object)
1149 {
1150 rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)];
1151
1152 ztest_rll_unlock(rll);
1153 }
1154
1155 static rl_t *
1156 ztest_range_lock(ztest_ds_t *zd, uint64_t object, uint64_t offset,
1157 uint64_t size, rl_type_t type)
1158 {
1159 uint64_t hash = object ^ (offset % (ZTEST_RANGE_LOCKS + 1));
1160 rll_t *rll = &zd->zd_range_lock[hash & (ZTEST_RANGE_LOCKS - 1)];
1161 rl_t *rl;
1162
1163 rl = umem_alloc(sizeof (*rl), UMEM_NOFAIL);
1164 rl->rl_object = object;
1165 rl->rl_offset = offset;
1166 rl->rl_size = size;
1167 rl->rl_lock = rll;
1168
1169 ztest_rll_lock(rll, type);
1170
1171 return (rl);
1172 }
1173
1174 static void
1175 ztest_range_unlock(rl_t *rl)
1176 {
1177 rll_t *rll = rl->rl_lock;
1178
1179 ztest_rll_unlock(rll);
1180
1181 umem_free(rl, sizeof (*rl));
1182 }
1183
1184 static void
1185 ztest_zd_init(ztest_ds_t *zd, ztest_shared_ds_t *szd, objset_t *os)
1186 {
1187 zd->zd_os = os;
1188 zd->zd_zilog = dmu_objset_zil(os);
1189 zd->zd_shared = szd;
1190 dmu_objset_name(os, zd->zd_name);
1191 int l;
1192
1193 if (zd->zd_shared != NULL)
1194 zd->zd_shared->zd_seq = 0;
1195
1196 rw_init(&zd->zd_zilog_lock, NULL, RW_DEFAULT, NULL);
1197 mutex_init(&zd->zd_dirobj_lock, NULL, MUTEX_DEFAULT, NULL);
1198
1199 for (l = 0; l < ZTEST_OBJECT_LOCKS; l++)
1200 ztest_rll_init(&zd->zd_object_lock[l]);
1201
1202 for (l = 0; l < ZTEST_RANGE_LOCKS; l++)
1203 ztest_rll_init(&zd->zd_range_lock[l]);
1204 }
1205
1206 static void
1207 ztest_zd_fini(ztest_ds_t *zd)
1208 {
1209 int l;
1210
1211 mutex_destroy(&zd->zd_dirobj_lock);
1212 rw_destroy(&zd->zd_zilog_lock);
1213
1214 for (l = 0; l < ZTEST_OBJECT_LOCKS; l++)
1215 ztest_rll_destroy(&zd->zd_object_lock[l]);
1216
1217 for (l = 0; l < ZTEST_RANGE_LOCKS; l++)
1218 ztest_rll_destroy(&zd->zd_range_lock[l]);
1219 }
1220
1221 #define TXG_MIGHTWAIT (ztest_random(10) == 0 ? TXG_NOWAIT : TXG_WAIT)
1222
1223 static uint64_t
1224 ztest_tx_assign(dmu_tx_t *tx, uint64_t txg_how, const char *tag)
1225 {
1226 uint64_t txg;
1227 int error;
1228
1229 /*
1230 * Attempt to assign tx to some transaction group.
1231 */
1232 error = dmu_tx_assign(tx, txg_how);
1233 if (error) {
1234 if (error == ERESTART) {
1235 ASSERT(txg_how == TXG_NOWAIT);
1236 dmu_tx_wait(tx);
1237 } else {
1238 ASSERT3U(error, ==, ENOSPC);
1239 ztest_record_enospc(tag);
1240 }
1241 dmu_tx_abort(tx);
1242 return (0);
1243 }
1244 txg = dmu_tx_get_txg(tx);
1245 ASSERT(txg != 0);
1246 return (txg);
1247 }
1248
1249 static void
1250 ztest_pattern_set(void *buf, uint64_t size, uint64_t value)
1251 {
1252 uint64_t *ip = buf;
1253 uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size);
1254
1255 while (ip < ip_end)
1256 *ip++ = value;
1257 }
1258
1259 #ifndef NDEBUG
1260 static boolean_t
1261 ztest_pattern_match(void *buf, uint64_t size, uint64_t value)
1262 {
1263 uint64_t *ip = buf;
1264 uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size);
1265 uint64_t diff = 0;
1266
1267 while (ip < ip_end)
1268 diff |= (value - *ip++);
1269
1270 return (diff == 0);
1271 }
1272 #endif
1273
1274 static void
1275 ztest_bt_generate(ztest_block_tag_t *bt, objset_t *os, uint64_t object,
1276 uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg)
1277 {
1278 bt->bt_magic = BT_MAGIC;
1279 bt->bt_objset = dmu_objset_id(os);
1280 bt->bt_object = object;
1281 bt->bt_offset = offset;
1282 bt->bt_gen = gen;
1283 bt->bt_txg = txg;
1284 bt->bt_crtxg = crtxg;
1285 }
1286
1287 static void
1288 ztest_bt_verify(ztest_block_tag_t *bt, objset_t *os, uint64_t object,
1289 uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg)
1290 {
1291 ASSERT(bt->bt_magic == BT_MAGIC);
1292 ASSERT(bt->bt_objset == dmu_objset_id(os));
1293 ASSERT(bt->bt_object == object);
1294 ASSERT(bt->bt_offset == offset);
1295 ASSERT(bt->bt_gen <= gen);
1296 ASSERT(bt->bt_txg <= txg);
1297 ASSERT(bt->bt_crtxg == crtxg);
1298 }
1299
1300 static ztest_block_tag_t *
1301 ztest_bt_bonus(dmu_buf_t *db)
1302 {
1303 dmu_object_info_t doi;
1304 ztest_block_tag_t *bt;
1305
1306 dmu_object_info_from_db(db, &doi);
1307 ASSERT3U(doi.doi_bonus_size, <=, db->db_size);
1308 ASSERT3U(doi.doi_bonus_size, >=, sizeof (*bt));
1309 bt = (void *)((char *)db->db_data + doi.doi_bonus_size - sizeof (*bt));
1310
1311 return (bt);
1312 }
1313
1314 /*
1315 * ZIL logging ops
1316 */
1317
1318 #define lrz_type lr_mode
1319 #define lrz_blocksize lr_uid
1320 #define lrz_ibshift lr_gid
1321 #define lrz_bonustype lr_rdev
1322 #define lrz_bonuslen lr_crtime[1]
1323
1324 static void
1325 ztest_log_create(ztest_ds_t *zd, dmu_tx_t *tx, lr_create_t *lr)
1326 {
1327 char *name = (void *)(lr + 1); /* name follows lr */
1328 size_t namesize = strlen(name) + 1;
1329 itx_t *itx;
1330
1331 if (zil_replaying(zd->zd_zilog, tx))
1332 return;
1333
1334 itx = zil_itx_create(TX_CREATE, sizeof (*lr) + namesize);
1335 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1336 sizeof (*lr) + namesize - sizeof (lr_t));
1337
1338 zil_itx_assign(zd->zd_zilog, itx, tx);
1339 }
1340
1341 static void
1342 ztest_log_remove(ztest_ds_t *zd, dmu_tx_t *tx, lr_remove_t *lr, uint64_t object)
1343 {
1344 char *name = (void *)(lr + 1); /* name follows lr */
1345 size_t namesize = strlen(name) + 1;
1346 itx_t *itx;
1347
1348 if (zil_replaying(zd->zd_zilog, tx))
1349 return;
1350
1351 itx = zil_itx_create(TX_REMOVE, sizeof (*lr) + namesize);
1352 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1353 sizeof (*lr) + namesize - sizeof (lr_t));
1354
1355 itx->itx_oid = object;
1356 zil_itx_assign(zd->zd_zilog, itx, tx);
1357 }
1358
1359 static void
1360 ztest_log_write(ztest_ds_t *zd, dmu_tx_t *tx, lr_write_t *lr)
1361 {
1362 itx_t *itx;
1363 itx_wr_state_t write_state = ztest_random(WR_NUM_STATES);
1364
1365 if (zil_replaying(zd->zd_zilog, tx))
1366 return;
1367
1368 if (lr->lr_length > ZIL_MAX_LOG_DATA)
1369 write_state = WR_INDIRECT;
1370
1371 itx = zil_itx_create(TX_WRITE,
1372 sizeof (*lr) + (write_state == WR_COPIED ? lr->lr_length : 0));
1373
1374 if (write_state == WR_COPIED &&
1375 dmu_read(zd->zd_os, lr->lr_foid, lr->lr_offset, lr->lr_length,
1376 ((lr_write_t *)&itx->itx_lr) + 1, DMU_READ_NO_PREFETCH) != 0) {
1377 zil_itx_destroy(itx);
1378 itx = zil_itx_create(TX_WRITE, sizeof (*lr));
1379 write_state = WR_NEED_COPY;
1380 }
1381 itx->itx_private = zd;
1382 itx->itx_wr_state = write_state;
1383 itx->itx_sync = (ztest_random(8) == 0);
1384 itx->itx_sod += (write_state == WR_NEED_COPY ? lr->lr_length : 0);
1385
1386 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1387 sizeof (*lr) - sizeof (lr_t));
1388
1389 zil_itx_assign(zd->zd_zilog, itx, tx);
1390 }
1391
1392 static void
1393 ztest_log_truncate(ztest_ds_t *zd, dmu_tx_t *tx, lr_truncate_t *lr)
1394 {
1395 itx_t *itx;
1396
1397 if (zil_replaying(zd->zd_zilog, tx))
1398 return;
1399
1400 itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
1401 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1402 sizeof (*lr) - sizeof (lr_t));
1403
1404 itx->itx_sync = B_FALSE;
1405 zil_itx_assign(zd->zd_zilog, itx, tx);
1406 }
1407
1408 static void
1409 ztest_log_setattr(ztest_ds_t *zd, dmu_tx_t *tx, lr_setattr_t *lr)
1410 {
1411 itx_t *itx;
1412
1413 if (zil_replaying(zd->zd_zilog, tx))
1414 return;
1415
1416 itx = zil_itx_create(TX_SETATTR, sizeof (*lr));
1417 bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
1418 sizeof (*lr) - sizeof (lr_t));
1419
1420 itx->itx_sync = B_FALSE;
1421 zil_itx_assign(zd->zd_zilog, itx, tx);
1422 }
1423
1424 /*
1425 * ZIL replay ops
1426 */
1427 static int
1428 ztest_replay_create(ztest_ds_t *zd, lr_create_t *lr, boolean_t byteswap)
1429 {
1430 char *name = (void *)(lr + 1); /* name follows lr */
1431 objset_t *os = zd->zd_os;
1432 ztest_block_tag_t *bbt;
1433 dmu_buf_t *db;
1434 dmu_tx_t *tx;
1435 uint64_t txg;
1436 int error = 0;
1437
1438 if (byteswap)
1439 byteswap_uint64_array(lr, sizeof (*lr));
1440
1441 ASSERT(lr->lr_doid == ZTEST_DIROBJ);
1442 ASSERT(name[0] != '\0');
1443
1444 tx = dmu_tx_create(os);
1445
1446 dmu_tx_hold_zap(tx, lr->lr_doid, B_TRUE, name);
1447
1448 if (lr->lrz_type == DMU_OT_ZAP_OTHER) {
1449 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1450 } else {
1451 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1452 }
1453
1454 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1455 if (txg == 0)
1456 return (ENOSPC);
1457
1458 ASSERT(dmu_objset_zil(os)->zl_replay == !!lr->lr_foid);
1459
1460 if (lr->lrz_type == DMU_OT_ZAP_OTHER) {
1461 if (lr->lr_foid == 0) {
1462 lr->lr_foid = zap_create(os,
1463 lr->lrz_type, lr->lrz_bonustype,
1464 lr->lrz_bonuslen, tx);
1465 } else {
1466 error = zap_create_claim(os, lr->lr_foid,
1467 lr->lrz_type, lr->lrz_bonustype,
1468 lr->lrz_bonuslen, tx);
1469 }
1470 } else {
1471 if (lr->lr_foid == 0) {
1472 lr->lr_foid = dmu_object_alloc(os,
1473 lr->lrz_type, 0, lr->lrz_bonustype,
1474 lr->lrz_bonuslen, tx);
1475 } else {
1476 error = dmu_object_claim(os, lr->lr_foid,
1477 lr->lrz_type, 0, lr->lrz_bonustype,
1478 lr->lrz_bonuslen, tx);
1479 }
1480 }
1481
1482 if (error) {
1483 ASSERT3U(error, ==, EEXIST);
1484 ASSERT(zd->zd_zilog->zl_replay);
1485 dmu_tx_commit(tx);
1486 return (error);
1487 }
1488
1489 ASSERT(lr->lr_foid != 0);
1490
1491 if (lr->lrz_type != DMU_OT_ZAP_OTHER)
1492 VERIFY3U(0, ==, dmu_object_set_blocksize(os, lr->lr_foid,
1493 lr->lrz_blocksize, lr->lrz_ibshift, tx));
1494
1495 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
1496 bbt = ztest_bt_bonus(db);
1497 dmu_buf_will_dirty(db, tx);
1498 ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_gen, txg, txg);
1499 dmu_buf_rele(db, FTAG);
1500
1501 VERIFY3U(0, ==, zap_add(os, lr->lr_doid, name, sizeof (uint64_t), 1,
1502 &lr->lr_foid, tx));
1503
1504 (void) ztest_log_create(zd, tx, lr);
1505
1506 dmu_tx_commit(tx);
1507
1508 return (0);
1509 }
1510
1511 static int
1512 ztest_replay_remove(ztest_ds_t *zd, lr_remove_t *lr, boolean_t byteswap)
1513 {
1514 char *name = (void *)(lr + 1); /* name follows lr */
1515 objset_t *os = zd->zd_os;
1516 dmu_object_info_t doi;
1517 dmu_tx_t *tx;
1518 uint64_t object, txg;
1519
1520 if (byteswap)
1521 byteswap_uint64_array(lr, sizeof (*lr));
1522
1523 ASSERT(lr->lr_doid == ZTEST_DIROBJ);
1524 ASSERT(name[0] != '\0');
1525
1526 VERIFY3U(0, ==,
1527 zap_lookup(os, lr->lr_doid, name, sizeof (object), 1, &object));
1528 ASSERT(object != 0);
1529
1530 ztest_object_lock(zd, object, RL_WRITER);
1531
1532 VERIFY3U(0, ==, dmu_object_info(os, object, &doi));
1533
1534 tx = dmu_tx_create(os);
1535
1536 dmu_tx_hold_zap(tx, lr->lr_doid, B_FALSE, name);
1537 dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END);
1538
1539 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1540 if (txg == 0) {
1541 ztest_object_unlock(zd, object);
1542 return (ENOSPC);
1543 }
1544
1545 if (doi.doi_type == DMU_OT_ZAP_OTHER) {
1546 VERIFY3U(0, ==, zap_destroy(os, object, tx));
1547 } else {
1548 VERIFY3U(0, ==, dmu_object_free(os, object, tx));
1549 }
1550
1551 VERIFY3U(0, ==, zap_remove(os, lr->lr_doid, name, tx));
1552
1553 (void) ztest_log_remove(zd, tx, lr, object);
1554
1555 dmu_tx_commit(tx);
1556
1557 ztest_object_unlock(zd, object);
1558
1559 return (0);
1560 }
1561
1562 static int
1563 ztest_replay_write(ztest_ds_t *zd, lr_write_t *lr, boolean_t byteswap)
1564 {
1565 objset_t *os = zd->zd_os;
1566 void *data = lr + 1; /* data follows lr */
1567 uint64_t offset, length;
1568 ztest_block_tag_t *bt = data;
1569 ztest_block_tag_t *bbt;
1570 uint64_t gen, txg, lrtxg, crtxg;
1571 dmu_object_info_t doi;
1572 dmu_tx_t *tx;
1573 dmu_buf_t *db;
1574 arc_buf_t *abuf = NULL;
1575 rl_t *rl;
1576
1577 if (byteswap)
1578 byteswap_uint64_array(lr, sizeof (*lr));
1579
1580 offset = lr->lr_offset;
1581 length = lr->lr_length;
1582
1583 /* If it's a dmu_sync() block, write the whole block */
1584 if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
1585 uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
1586 if (length < blocksize) {
1587 offset -= offset % blocksize;
1588 length = blocksize;
1589 }
1590 }
1591
1592 if (bt->bt_magic == BSWAP_64(BT_MAGIC))
1593 byteswap_uint64_array(bt, sizeof (*bt));
1594
1595 if (bt->bt_magic != BT_MAGIC)
1596 bt = NULL;
1597
1598 ztest_object_lock(zd, lr->lr_foid, RL_READER);
1599 rl = ztest_range_lock(zd, lr->lr_foid, offset, length, RL_WRITER);
1600
1601 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
1602
1603 dmu_object_info_from_db(db, &doi);
1604
1605 bbt = ztest_bt_bonus(db);
1606 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
1607 gen = bbt->bt_gen;
1608 crtxg = bbt->bt_crtxg;
1609 lrtxg = lr->lr_common.lrc_txg;
1610
1611 tx = dmu_tx_create(os);
1612
1613 dmu_tx_hold_write(tx, lr->lr_foid, offset, length);
1614
1615 if (ztest_random(8) == 0 && length == doi.doi_data_block_size &&
1616 P2PHASE(offset, length) == 0)
1617 abuf = dmu_request_arcbuf(db, length);
1618
1619 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1620 if (txg == 0) {
1621 if (abuf != NULL)
1622 dmu_return_arcbuf(abuf);
1623 dmu_buf_rele(db, FTAG);
1624 ztest_range_unlock(rl);
1625 ztest_object_unlock(zd, lr->lr_foid);
1626 return (ENOSPC);
1627 }
1628
1629 if (bt != NULL) {
1630 /*
1631 * Usually, verify the old data before writing new data --
1632 * but not always, because we also want to verify correct
1633 * behavior when the data was not recently read into cache.
1634 */
1635 ASSERT(offset % doi.doi_data_block_size == 0);
1636 if (ztest_random(4) != 0) {
1637 int prefetch = ztest_random(2) ?
1638 DMU_READ_PREFETCH : DMU_READ_NO_PREFETCH;
1639 ztest_block_tag_t rbt;
1640
1641 VERIFY(dmu_read(os, lr->lr_foid, offset,
1642 sizeof (rbt), &rbt, prefetch) == 0);
1643 if (rbt.bt_magic == BT_MAGIC) {
1644 ztest_bt_verify(&rbt, os, lr->lr_foid,
1645 offset, gen, txg, crtxg);
1646 }
1647 }
1648
1649 /*
1650 * Writes can appear to be newer than the bonus buffer because
1651 * the ztest_get_data() callback does a dmu_read() of the
1652 * open-context data, which may be different than the data
1653 * as it was when the write was generated.
1654 */
1655 if (zd->zd_zilog->zl_replay) {
1656 ztest_bt_verify(bt, os, lr->lr_foid, offset,
1657 MAX(gen, bt->bt_gen), MAX(txg, lrtxg),
1658 bt->bt_crtxg);
1659 }
1660
1661 /*
1662 * Set the bt's gen/txg to the bonus buffer's gen/txg
1663 * so that all of the usual ASSERTs will work.
1664 */
1665 ztest_bt_generate(bt, os, lr->lr_foid, offset, gen, txg, crtxg);
1666 }
1667
1668 if (abuf == NULL) {
1669 dmu_write(os, lr->lr_foid, offset, length, data, tx);
1670 } else {
1671 bcopy(data, abuf->b_data, length);
1672 dmu_assign_arcbuf(db, offset, abuf, tx);
1673 }
1674
1675 (void) ztest_log_write(zd, tx, lr);
1676
1677 dmu_buf_rele(db, FTAG);
1678
1679 dmu_tx_commit(tx);
1680
1681 ztest_range_unlock(rl);
1682 ztest_object_unlock(zd, lr->lr_foid);
1683
1684 return (0);
1685 }
1686
1687 static int
1688 ztest_replay_truncate(ztest_ds_t *zd, lr_truncate_t *lr, boolean_t byteswap)
1689 {
1690 objset_t *os = zd->zd_os;
1691 dmu_tx_t *tx;
1692 uint64_t txg;
1693 rl_t *rl;
1694
1695 if (byteswap)
1696 byteswap_uint64_array(lr, sizeof (*lr));
1697
1698 ztest_object_lock(zd, lr->lr_foid, RL_READER);
1699 rl = ztest_range_lock(zd, lr->lr_foid, lr->lr_offset, lr->lr_length,
1700 RL_WRITER);
1701
1702 tx = dmu_tx_create(os);
1703
1704 dmu_tx_hold_free(tx, lr->lr_foid, lr->lr_offset, lr->lr_length);
1705
1706 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1707 if (txg == 0) {
1708 ztest_range_unlock(rl);
1709 ztest_object_unlock(zd, lr->lr_foid);
1710 return (ENOSPC);
1711 }
1712
1713 VERIFY(dmu_free_range(os, lr->lr_foid, lr->lr_offset,
1714 lr->lr_length, tx) == 0);
1715
1716 (void) ztest_log_truncate(zd, tx, lr);
1717
1718 dmu_tx_commit(tx);
1719
1720 ztest_range_unlock(rl);
1721 ztest_object_unlock(zd, lr->lr_foid);
1722
1723 return (0);
1724 }
1725
1726 static int
1727 ztest_replay_setattr(ztest_ds_t *zd, lr_setattr_t *lr, boolean_t byteswap)
1728 {
1729 objset_t *os = zd->zd_os;
1730 dmu_tx_t *tx;
1731 dmu_buf_t *db;
1732 ztest_block_tag_t *bbt;
1733 uint64_t txg, lrtxg, crtxg;
1734
1735 if (byteswap)
1736 byteswap_uint64_array(lr, sizeof (*lr));
1737
1738 ztest_object_lock(zd, lr->lr_foid, RL_WRITER);
1739
1740 VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
1741
1742 tx = dmu_tx_create(os);
1743 dmu_tx_hold_bonus(tx, lr->lr_foid);
1744
1745 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
1746 if (txg == 0) {
1747 dmu_buf_rele(db, FTAG);
1748 ztest_object_unlock(zd, lr->lr_foid);
1749 return (ENOSPC);
1750 }
1751
1752 bbt = ztest_bt_bonus(db);
1753 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
1754 crtxg = bbt->bt_crtxg;
1755 lrtxg = lr->lr_common.lrc_txg;
1756
1757 if (zd->zd_zilog->zl_replay) {
1758 ASSERT(lr->lr_size != 0);
1759 ASSERT(lr->lr_mode != 0);
1760 ASSERT(lrtxg != 0);
1761 } else {
1762 /*
1763 * Randomly change the size and increment the generation.
1764 */
1765 lr->lr_size = (ztest_random(db->db_size / sizeof (*bbt)) + 1) *
1766 sizeof (*bbt);
1767 lr->lr_mode = bbt->bt_gen + 1;
1768 ASSERT(lrtxg == 0);
1769 }
1770
1771 /*
1772 * Verify that the current bonus buffer is not newer than our txg.
1773 */
1774 ztest_bt_verify(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode,
1775 MAX(txg, lrtxg), crtxg);
1776
1777 dmu_buf_will_dirty(db, tx);
1778
1779 ASSERT3U(lr->lr_size, >=, sizeof (*bbt));
1780 ASSERT3U(lr->lr_size, <=, db->db_size);
1781 VERIFY0(dmu_set_bonus(db, lr->lr_size, tx));
1782 bbt = ztest_bt_bonus(db);
1783
1784 ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode, txg, crtxg);
1785
1786 dmu_buf_rele(db, FTAG);
1787
1788 (void) ztest_log_setattr(zd, tx, lr);
1789
1790 dmu_tx_commit(tx);
1791
1792 ztest_object_unlock(zd, lr->lr_foid);
1793
1794 return (0);
1795 }
1796
1797 zil_replay_func_t ztest_replay_vector[TX_MAX_TYPE] = {
1798 NULL, /* 0 no such transaction type */
1799 (zil_replay_func_t)ztest_replay_create, /* TX_CREATE */
1800 NULL, /* TX_MKDIR */
1801 NULL, /* TX_MKXATTR */
1802 NULL, /* TX_SYMLINK */
1803 (zil_replay_func_t)ztest_replay_remove, /* TX_REMOVE */
1804 NULL, /* TX_RMDIR */
1805 NULL, /* TX_LINK */
1806 NULL, /* TX_RENAME */
1807 (zil_replay_func_t)ztest_replay_write, /* TX_WRITE */
1808 (zil_replay_func_t)ztest_replay_truncate, /* TX_TRUNCATE */
1809 (zil_replay_func_t)ztest_replay_setattr, /* TX_SETATTR */
1810 NULL, /* TX_ACL */
1811 NULL, /* TX_CREATE_ACL */
1812 NULL, /* TX_CREATE_ATTR */
1813 NULL, /* TX_CREATE_ACL_ATTR */
1814 NULL, /* TX_MKDIR_ACL */
1815 NULL, /* TX_MKDIR_ATTR */
1816 NULL, /* TX_MKDIR_ACL_ATTR */
1817 NULL, /* TX_WRITE2 */
1818 };
1819
1820 /*
1821 * ZIL get_data callbacks
1822 */
1823
1824 static void
1825 ztest_get_done(zgd_t *zgd, int error)
1826 {
1827 ztest_ds_t *zd = zgd->zgd_private;
1828 uint64_t object = zgd->zgd_rl->rl_object;
1829
1830 if (zgd->zgd_db)
1831 dmu_buf_rele(zgd->zgd_db, zgd);
1832
1833 ztest_range_unlock(zgd->zgd_rl);
1834 ztest_object_unlock(zd, object);
1835
1836 if (error == 0 && zgd->zgd_bp)
1837 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
1838
1839 umem_free(zgd, sizeof (*zgd));
1840 }
1841
1842 static int
1843 ztest_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
1844 {
1845 ztest_ds_t *zd = arg;
1846 objset_t *os = zd->zd_os;
1847 uint64_t object = lr->lr_foid;
1848 uint64_t offset = lr->lr_offset;
1849 uint64_t size = lr->lr_length;
1850 blkptr_t *bp = &lr->lr_blkptr;
1851 uint64_t txg = lr->lr_common.lrc_txg;
1852 uint64_t crtxg;
1853 dmu_object_info_t doi;
1854 dmu_buf_t *db;
1855 zgd_t *zgd;
1856 int error;
1857
1858 ztest_object_lock(zd, object, RL_READER);
1859 error = dmu_bonus_hold(os, object, FTAG, &db);
1860 if (error) {
1861 ztest_object_unlock(zd, object);
1862 return (error);
1863 }
1864
1865 crtxg = ztest_bt_bonus(db)->bt_crtxg;
1866
1867 if (crtxg == 0 || crtxg > txg) {
1868 dmu_buf_rele(db, FTAG);
1869 ztest_object_unlock(zd, object);
1870 return (ENOENT);
1871 }
1872
1873 dmu_object_info_from_db(db, &doi);
1874 dmu_buf_rele(db, FTAG);
1875 db = NULL;
1876
1877 zgd = umem_zalloc(sizeof (*zgd), UMEM_NOFAIL);
1878 zgd->zgd_zilog = zd->zd_zilog;
1879 zgd->zgd_private = zd;
1880
1881 if (buf != NULL) { /* immediate write */
1882 zgd->zgd_rl = ztest_range_lock(zd, object, offset, size,
1883 RL_READER);
1884
1885 error = dmu_read(os, object, offset, size, buf,
1886 DMU_READ_NO_PREFETCH);
1887 ASSERT(error == 0);
1888 } else {
1889 size = doi.doi_data_block_size;
1890 if (ISP2(size)) {
1891 offset = P2ALIGN(offset, size);
1892 } else {
1893 ASSERT(offset < size);
1894 offset = 0;
1895 }
1896
1897 zgd->zgd_rl = ztest_range_lock(zd, object, offset, size,
1898 RL_READER);
1899
1900 error = dmu_buf_hold(os, object, offset, zgd, &db,
1901 DMU_READ_NO_PREFETCH);
1902
1903 if (error == 0) {
1904 blkptr_t *obp = dmu_buf_get_blkptr(db);
1905 if (obp) {
1906 ASSERT(BP_IS_HOLE(bp));
1907 *bp = *obp;
1908 }
1909
1910 zgd->zgd_db = db;
1911 zgd->zgd_bp = bp;
1912
1913 ASSERT(db->db_offset == offset);
1914 ASSERT(db->db_size == size);
1915
1916 error = dmu_sync(zio, lr->lr_common.lrc_txg,
1917 ztest_get_done, zgd);
1918
1919 if (error == 0)
1920 return (0);
1921 }
1922 }
1923
1924 ztest_get_done(zgd, error);
1925
1926 return (error);
1927 }
1928
1929 static void *
1930 ztest_lr_alloc(size_t lrsize, char *name)
1931 {
1932 char *lr;
1933 size_t namesize = name ? strlen(name) + 1 : 0;
1934
1935 lr = umem_zalloc(lrsize + namesize, UMEM_NOFAIL);
1936
1937 if (name)
1938 bcopy(name, lr + lrsize, namesize);
1939
1940 return (lr);
1941 }
1942
1943 void
1944 ztest_lr_free(void *lr, size_t lrsize, char *name)
1945 {
1946 size_t namesize = name ? strlen(name) + 1 : 0;
1947
1948 umem_free(lr, lrsize + namesize);
1949 }
1950
1951 /*
1952 * Lookup a bunch of objects. Returns the number of objects not found.
1953 */
1954 static int
1955 ztest_lookup(ztest_ds_t *zd, ztest_od_t *od, int count)
1956 {
1957 int missing = 0;
1958 int error;
1959 int i;
1960
1961 ASSERT(mutex_held(&zd->zd_dirobj_lock));
1962
1963 for (i = 0; i < count; i++, od++) {
1964 od->od_object = 0;
1965 error = zap_lookup(zd->zd_os, od->od_dir, od->od_name,
1966 sizeof (uint64_t), 1, &od->od_object);
1967 if (error) {
1968 ASSERT(error == ENOENT);
1969 ASSERT(od->od_object == 0);
1970 missing++;
1971 } else {
1972 dmu_buf_t *db;
1973 ztest_block_tag_t *bbt;
1974 dmu_object_info_t doi;
1975
1976 ASSERT(od->od_object != 0);
1977 ASSERT(missing == 0); /* there should be no gaps */
1978
1979 ztest_object_lock(zd, od->od_object, RL_READER);
1980 VERIFY3U(0, ==, dmu_bonus_hold(zd->zd_os,
1981 od->od_object, FTAG, &db));
1982 dmu_object_info_from_db(db, &doi);
1983 bbt = ztest_bt_bonus(db);
1984 ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
1985 od->od_type = doi.doi_type;
1986 od->od_blocksize = doi.doi_data_block_size;
1987 od->od_gen = bbt->bt_gen;
1988 dmu_buf_rele(db, FTAG);
1989 ztest_object_unlock(zd, od->od_object);
1990 }
1991 }
1992
1993 return (missing);
1994 }
1995
1996 static int
1997 ztest_create(ztest_ds_t *zd, ztest_od_t *od, int count)
1998 {
1999 int missing = 0;
2000 int i;
2001
2002 ASSERT(mutex_held(&zd->zd_dirobj_lock));
2003
2004 for (i = 0; i < count; i++, od++) {
2005 if (missing) {
2006 od->od_object = 0;
2007 missing++;
2008 continue;
2009 }
2010
2011 lr_create_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name);
2012
2013 lr->lr_doid = od->od_dir;
2014 lr->lr_foid = 0; /* 0 to allocate, > 0 to claim */
2015 lr->lrz_type = od->od_crtype;
2016 lr->lrz_blocksize = od->od_crblocksize;
2017 lr->lrz_ibshift = ztest_random_ibshift();
2018 lr->lrz_bonustype = DMU_OT_UINT64_OTHER;
2019 lr->lrz_bonuslen = dmu_bonus_max();
2020 lr->lr_gen = od->od_crgen;
2021 lr->lr_crtime[0] = time(NULL);
2022
2023 if (ztest_replay_create(zd, lr, B_FALSE) != 0) {
2024 ASSERT(missing == 0);
2025 od->od_object = 0;
2026 missing++;
2027 } else {
2028 od->od_object = lr->lr_foid;
2029 od->od_type = od->od_crtype;
2030 od->od_blocksize = od->od_crblocksize;
2031 od->od_gen = od->od_crgen;
2032 ASSERT(od->od_object != 0);
2033 }
2034
2035 ztest_lr_free(lr, sizeof (*lr), od->od_name);
2036 }
2037
2038 return (missing);
2039 }
2040
2041 static int
2042 ztest_remove(ztest_ds_t *zd, ztest_od_t *od, int count)
2043 {
2044 int missing = 0;
2045 int error;
2046 int i;
2047
2048 ASSERT(mutex_held(&zd->zd_dirobj_lock));
2049
2050 od += count - 1;
2051
2052 for (i = count - 1; i >= 0; i--, od--) {
2053 if (missing) {
2054 missing++;
2055 continue;
2056 }
2057
2058 /*
2059 * No object was found.
2060 */
2061 if (od->od_object == 0)
2062 continue;
2063
2064 lr_remove_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name);
2065
2066 lr->lr_doid = od->od_dir;
2067
2068 if ((error = ztest_replay_remove(zd, lr, B_FALSE)) != 0) {
2069 ASSERT3U(error, ==, ENOSPC);
2070 missing++;
2071 } else {
2072 od->od_object = 0;
2073 }
2074 ztest_lr_free(lr, sizeof (*lr), od->od_name);
2075 }
2076
2077 return (missing);
2078 }
2079
2080 static int
2081 ztest_write(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size,
2082 void *data)
2083 {
2084 lr_write_t *lr;
2085 int error;
2086
2087 lr = ztest_lr_alloc(sizeof (*lr) + size, NULL);
2088
2089 lr->lr_foid = object;
2090 lr->lr_offset = offset;
2091 lr->lr_length = size;
2092 lr->lr_blkoff = 0;
2093 BP_ZERO(&lr->lr_blkptr);
2094
2095 bcopy(data, lr + 1, size);
2096
2097 error = ztest_replay_write(zd, lr, B_FALSE);
2098
2099 ztest_lr_free(lr, sizeof (*lr) + size, NULL);
2100
2101 return (error);
2102 }
2103
2104 static int
2105 ztest_truncate(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size)
2106 {
2107 lr_truncate_t *lr;
2108 int error;
2109
2110 lr = ztest_lr_alloc(sizeof (*lr), NULL);
2111
2112 lr->lr_foid = object;
2113 lr->lr_offset = offset;
2114 lr->lr_length = size;
2115
2116 error = ztest_replay_truncate(zd, lr, B_FALSE);
2117
2118 ztest_lr_free(lr, sizeof (*lr), NULL);
2119
2120 return (error);
2121 }
2122
2123 static int
2124 ztest_setattr(ztest_ds_t *zd, uint64_t object)
2125 {
2126 lr_setattr_t *lr;
2127 int error;
2128
2129 lr = ztest_lr_alloc(sizeof (*lr), NULL);
2130
2131 lr->lr_foid = object;
2132 lr->lr_size = 0;
2133 lr->lr_mode = 0;
2134
2135 error = ztest_replay_setattr(zd, lr, B_FALSE);
2136
2137 ztest_lr_free(lr, sizeof (*lr), NULL);
2138
2139 return (error);
2140 }
2141
2142 static void
2143 ztest_prealloc(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size)
2144 {
2145 objset_t *os = zd->zd_os;
2146 dmu_tx_t *tx;
2147 uint64_t txg;
2148 rl_t *rl;
2149
2150 txg_wait_synced(dmu_objset_pool(os), 0);
2151
2152 ztest_object_lock(zd, object, RL_READER);
2153 rl = ztest_range_lock(zd, object, offset, size, RL_WRITER);
2154
2155 tx = dmu_tx_create(os);
2156
2157 dmu_tx_hold_write(tx, object, offset, size);
2158
2159 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
2160
2161 if (txg != 0) {
2162 dmu_prealloc(os, object, offset, size, tx);
2163 dmu_tx_commit(tx);
2164 txg_wait_synced(dmu_objset_pool(os), txg);
2165 } else {
2166 (void) dmu_free_long_range(os, object, offset, size);
2167 }
2168
2169 ztest_range_unlock(rl);
2170 ztest_object_unlock(zd, object);
2171 }
2172
2173 static void
2174 ztest_io(ztest_ds_t *zd, uint64_t object, uint64_t offset)
2175 {
2176 int err;
2177 ztest_block_tag_t wbt;
2178 dmu_object_info_t doi;
2179 enum ztest_io_type io_type;
2180 uint64_t blocksize;
2181 void *data;
2182
2183 VERIFY(dmu_object_info(zd->zd_os, object, &doi) == 0);
2184 blocksize = doi.doi_data_block_size;
2185 data = umem_alloc(blocksize, UMEM_NOFAIL);
2186
2187 /*
2188 * Pick an i/o type at random, biased toward writing block tags.
2189 */
2190 io_type = ztest_random(ZTEST_IO_TYPES);
2191 if (ztest_random(2) == 0)
2192 io_type = ZTEST_IO_WRITE_TAG;
2193
2194 (void) rw_enter(&zd->zd_zilog_lock, RW_READER);
2195
2196 switch (io_type) {
2197
2198 case ZTEST_IO_WRITE_TAG:
2199 ztest_bt_generate(&wbt, zd->zd_os, object, offset, 0, 0, 0);
2200 (void) ztest_write(zd, object, offset, sizeof (wbt), &wbt);
2201 break;
2202
2203 case ZTEST_IO_WRITE_PATTERN:
2204 (void) memset(data, 'a' + (object + offset) % 5, blocksize);
2205 if (ztest_random(2) == 0) {
2206 /*
2207 * Induce fletcher2 collisions to ensure that
2208 * zio_ddt_collision() detects and resolves them
2209 * when using fletcher2-verify for deduplication.
2210 */
2211 ((uint64_t *)data)[0] ^= 1ULL << 63;
2212 ((uint64_t *)data)[4] ^= 1ULL << 63;
2213 }
2214 (void) ztest_write(zd, object, offset, blocksize, data);
2215 break;
2216
2217 case ZTEST_IO_WRITE_ZEROES:
2218 bzero(data, blocksize);
2219 (void) ztest_write(zd, object, offset, blocksize, data);
2220 break;
2221
2222 case ZTEST_IO_TRUNCATE:
2223 (void) ztest_truncate(zd, object, offset, blocksize);
2224 break;
2225
2226 case ZTEST_IO_SETATTR:
2227 (void) ztest_setattr(zd, object);
2228 break;
2229 default:
2230 break;
2231
2232 case ZTEST_IO_REWRITE:
2233 (void) rw_enter(&ztest_name_lock, RW_READER);
2234 err = ztest_dsl_prop_set_uint64(zd->zd_name,
2235 ZFS_PROP_CHECKSUM, spa_dedup_checksum(ztest_spa),
2236 B_FALSE);
2237 VERIFY(err == 0 || err == ENOSPC);
2238 err = ztest_dsl_prop_set_uint64(zd->zd_name,
2239 ZFS_PROP_COMPRESSION,
2240 ztest_random_dsl_prop(ZFS_PROP_COMPRESSION),
2241 B_FALSE);
2242 VERIFY(err == 0 || err == ENOSPC);
2243 (void) rw_exit(&ztest_name_lock);
2244
2245 VERIFY0(dmu_read(zd->zd_os, object, offset, blocksize, data,
2246 DMU_READ_NO_PREFETCH));
2247
2248 (void) ztest_write(zd, object, offset, blocksize, data);
2249 break;
2250 }
2251
2252 (void) rw_exit(&zd->zd_zilog_lock);
2253
2254 umem_free(data, blocksize);
2255 }
2256
2257 /*
2258 * Initialize an object description template.
2259 */
2260 static void
2261 ztest_od_init(ztest_od_t *od, uint64_t id, char *tag, uint64_t index,
2262 dmu_object_type_t type, uint64_t blocksize, uint64_t gen)
2263 {
2264 od->od_dir = ZTEST_DIROBJ;
2265 od->od_object = 0;
2266
2267 od->od_crtype = type;
2268 od->od_crblocksize = blocksize ? blocksize : ztest_random_blocksize();
2269 od->od_crgen = gen;
2270
2271 od->od_type = DMU_OT_NONE;
2272 od->od_blocksize = 0;
2273 od->od_gen = 0;
2274
2275 (void) snprintf(od->od_name, sizeof (od->od_name), "%s(%lld)[%llu]",
2276 tag, (longlong_t)id, (u_longlong_t)index);
2277 }
2278
2279 /*
2280 * Lookup or create the objects for a test using the od template.
2281 * If the objects do not all exist, or if 'remove' is specified,
2282 * remove any existing objects and create new ones. Otherwise,
2283 * use the existing objects.
2284 */
2285 static int
2286 ztest_object_init(ztest_ds_t *zd, ztest_od_t *od, size_t size, boolean_t remove)
2287 {
2288 int count = size / sizeof (*od);
2289 int rv = 0;
2290
2291 mutex_enter(&zd->zd_dirobj_lock);
2292 if ((ztest_lookup(zd, od, count) != 0 || remove) &&
2293 (ztest_remove(zd, od, count) != 0 ||
2294 ztest_create(zd, od, count) != 0))
2295 rv = -1;
2296 zd->zd_od = od;
2297 mutex_exit(&zd->zd_dirobj_lock);
2298
2299 return (rv);
2300 }
2301
2302 /* ARGSUSED */
2303 void
2304 ztest_zil_commit(ztest_ds_t *zd, uint64_t id)
2305 {
2306 zilog_t *zilog = zd->zd_zilog;
2307
2308 (void) rw_enter(&zd->zd_zilog_lock, RW_READER);
2309
2310 zil_commit(zilog, ztest_random(ZTEST_OBJECTS));
2311
2312 /*
2313 * Remember the committed values in zd, which is in parent/child
2314 * shared memory. If we die, the next iteration of ztest_run()
2315 * will verify that the log really does contain this record.
2316 */
2317 mutex_enter(&zilog->zl_lock);
2318 ASSERT(zd->zd_shared != NULL);
2319 ASSERT3U(zd->zd_shared->zd_seq, <=, zilog->zl_commit_lr_seq);
2320 zd->zd_shared->zd_seq = zilog->zl_commit_lr_seq;
2321 mutex_exit(&zilog->zl_lock);
2322
2323 (void) rw_exit(&zd->zd_zilog_lock);
2324 }
2325
2326 /*
2327 * This function is designed to simulate the operations that occur during a
2328 * mount/unmount operation. We hold the dataset across these operations in an
2329 * attempt to expose any implicit assumptions about ZIL management.
2330 */
2331 /* ARGSUSED */
2332 void
2333 ztest_zil_remount(ztest_ds_t *zd, uint64_t id)
2334 {
2335 objset_t *os = zd->zd_os;
2336
2337 /*
2338 * We grab the zd_dirobj_lock to ensure that no other thread is
2339 * updating the zil (i.e. adding in-memory log records) and the
2340 * zd_zilog_lock to block any I/O.
2341 */
2342 mutex_enter(&zd->zd_dirobj_lock);
2343 (void) rw_enter(&zd->zd_zilog_lock, RW_WRITER);
2344
2345 /* zfs_sb_teardown() */
2346 zil_close(zd->zd_zilog);
2347
2348 /* zfsvfs_setup() */
2349 VERIFY(zil_open(os, ztest_get_data) == zd->zd_zilog);
2350 zil_replay(os, zd, ztest_replay_vector);
2351
2352 (void) rw_exit(&zd->zd_zilog_lock);
2353 mutex_exit(&zd->zd_dirobj_lock);
2354 }
2355
2356 /*
2357 * Verify that we can't destroy an active pool, create an existing pool,
2358 * or create a pool with a bad vdev spec.
2359 */
2360 /* ARGSUSED */
2361 void
2362 ztest_spa_create_destroy(ztest_ds_t *zd, uint64_t id)
2363 {
2364 ztest_shared_opts_t *zo = &ztest_opts;
2365 spa_t *spa;
2366 nvlist_t *nvroot;
2367
2368 /*
2369 * Attempt to create using a bad file.
2370 */
2371 nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 0, 1);
2372 VERIFY3U(ENOENT, ==,
2373 spa_create("ztest_bad_file", nvroot, NULL, NULL));
2374 nvlist_free(nvroot);
2375
2376 /*
2377 * Attempt to create using a bad mirror.
2378 */
2379 nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 2, 1);
2380 VERIFY3U(ENOENT, ==,
2381 spa_create("ztest_bad_mirror", nvroot, NULL, NULL));
2382 nvlist_free(nvroot);
2383
2384 /*
2385 * Attempt to create an existing pool. It shouldn't matter
2386 * what's in the nvroot; we should fail with EEXIST.
2387 */
2388 (void) rw_enter(&ztest_name_lock, RW_READER);
2389 nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 0, 1);
2390 VERIFY3U(EEXIST, ==, spa_create(zo->zo_pool, nvroot, NULL, NULL));
2391 nvlist_free(nvroot);
2392 VERIFY3U(0, ==, spa_open(zo->zo_pool, &spa, FTAG));
2393 VERIFY3U(EBUSY, ==, spa_destroy(zo->zo_pool));
2394 spa_close(spa, FTAG);
2395
2396 (void) rw_exit(&ztest_name_lock);
2397 }
2398
2399 /* ARGSUSED */
2400 void
2401 ztest_spa_upgrade(ztest_ds_t *zd, uint64_t id)
2402 {
2403 spa_t *spa;
2404 uint64_t initial_version = SPA_VERSION_INITIAL;
2405 uint64_t version, newversion;
2406 nvlist_t *nvroot, *props;
2407 char *name;
2408
2409 mutex_enter(&ztest_vdev_lock);
2410 name = kmem_asprintf("%s_upgrade", ztest_opts.zo_pool);
2411
2412 /*
2413 * Clean up from previous runs.
2414 */
2415 (void) spa_destroy(name);
2416
2417 nvroot = make_vdev_root(NULL, NULL, name, ztest_opts.zo_vdev_size, 0,
2418 0, ztest_opts.zo_raidz, ztest_opts.zo_mirrors, 1);
2419
2420 /*
2421 * If we're configuring a RAIDZ device then make sure that the
2422 * the initial version is capable of supporting that feature.
2423 */
2424 switch (ztest_opts.zo_raidz_parity) {
2425 case 0:
2426 case 1:
2427 initial_version = SPA_VERSION_INITIAL;
2428 break;
2429 case 2:
2430 initial_version = SPA_VERSION_RAIDZ2;
2431 break;
2432 case 3:
2433 initial_version = SPA_VERSION_RAIDZ3;
2434 break;
2435 }
2436
2437 /*
2438 * Create a pool with a spa version that can be upgraded. Pick
2439 * a value between initial_version and SPA_VERSION_BEFORE_FEATURES.
2440 */
2441 do {
2442 version = ztest_random_spa_version(initial_version);
2443 } while (version > SPA_VERSION_BEFORE_FEATURES);
2444
2445 props = fnvlist_alloc();
2446 fnvlist_add_uint64(props,
2447 zpool_prop_to_name(ZPOOL_PROP_VERSION), version);
2448 VERIFY3S(spa_create(name, nvroot, props, NULL), ==, 0);
2449 fnvlist_free(nvroot);
2450 fnvlist_free(props);
2451
2452 VERIFY3S(spa_open(name, &spa, FTAG), ==, 0);
2453 VERIFY3U(spa_version(spa), ==, version);
2454 newversion = ztest_random_spa_version(version + 1);
2455
2456 if (ztest_opts.zo_verbose >= 4) {
2457 (void) printf("upgrading spa version from %llu to %llu\n",
2458 (u_longlong_t)version, (u_longlong_t)newversion);
2459 }
2460
2461 spa_upgrade(spa, newversion);
2462 VERIFY3U(spa_version(spa), >, version);
2463 VERIFY3U(spa_version(spa), ==, fnvlist_lookup_uint64(spa->spa_config,
2464 zpool_prop_to_name(ZPOOL_PROP_VERSION)));
2465 spa_close(spa, FTAG);
2466
2467 strfree(name);
2468 mutex_exit(&ztest_vdev_lock);
2469 }
2470
2471 static vdev_t *
2472 vdev_lookup_by_path(vdev_t *vd, const char *path)
2473 {
2474 vdev_t *mvd;
2475 int c;
2476
2477 if (vd->vdev_path != NULL && strcmp(path, vd->vdev_path) == 0)
2478 return (vd);
2479
2480 for (c = 0; c < vd->vdev_children; c++)
2481 if ((mvd = vdev_lookup_by_path(vd->vdev_child[c], path)) !=
2482 NULL)
2483 return (mvd);
2484
2485 return (NULL);
2486 }
2487
2488 /*
2489 * Find the first available hole which can be used as a top-level.
2490 */
2491 int
2492 find_vdev_hole(spa_t *spa)
2493 {
2494 vdev_t *rvd = spa->spa_root_vdev;
2495 int c;
2496
2497 ASSERT(spa_config_held(spa, SCL_VDEV, RW_READER) == SCL_VDEV);
2498
2499 for (c = 0; c < rvd->vdev_children; c++) {
2500 vdev_t *cvd = rvd->vdev_child[c];
2501
2502 if (cvd->vdev_ishole)
2503 break;
2504 }
2505 return (c);
2506 }
2507
2508 /*
2509 * Verify that vdev_add() works as expected.
2510 */
2511 /* ARGSUSED */
2512 void
2513 ztest_vdev_add_remove(ztest_ds_t *zd, uint64_t id)
2514 {
2515 ztest_shared_t *zs = ztest_shared;
2516 spa_t *spa = ztest_spa;
2517 uint64_t leaves;
2518 uint64_t guid;
2519 nvlist_t *nvroot;
2520 int error;
2521
2522 mutex_enter(&ztest_vdev_lock);
2523 leaves = MAX(zs->zs_mirrors + zs->zs_splits, 1) * ztest_opts.zo_raidz;
2524
2525 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2526
2527 ztest_shared->zs_vdev_next_leaf = find_vdev_hole(spa) * leaves;
2528
2529 /*
2530 * If we have slogs then remove them 1/4 of the time.
2531 */
2532 if (spa_has_slogs(spa) && ztest_random(4) == 0) {
2533 /*
2534 * Grab the guid from the head of the log class rotor.
2535 */
2536 guid = spa_log_class(spa)->mc_rotor->mg_vd->vdev_guid;
2537
2538 spa_config_exit(spa, SCL_VDEV, FTAG);
2539
2540 /*
2541 * We have to grab the zs_name_lock as writer to
2542 * prevent a race between removing a slog (dmu_objset_find)
2543 * and destroying a dataset. Removing the slog will
2544 * grab a reference on the dataset which may cause
2545 * dsl_destroy_head() to fail with EBUSY thus
2546 * leaving the dataset in an inconsistent state.
2547 */
2548 rw_enter(&ztest_name_lock, RW_WRITER);
2549 error = spa_vdev_remove(spa, guid, B_FALSE);
2550 rw_exit(&ztest_name_lock);
2551
2552 if (error && error != EEXIST)
2553 fatal(0, "spa_vdev_remove() = %d", error);
2554 } else {
2555 spa_config_exit(spa, SCL_VDEV, FTAG);
2556
2557 /*
2558 * Make 1/4 of the devices be log devices.
2559 */
2560 nvroot = make_vdev_root(NULL, NULL, NULL,
2561 ztest_opts.zo_vdev_size, 0,
2562 ztest_random(4) == 0, ztest_opts.zo_raidz,
2563 zs->zs_mirrors, 1);
2564
2565 error = spa_vdev_add(spa, nvroot);
2566 nvlist_free(nvroot);
2567
2568 if (error == ENOSPC)
2569 ztest_record_enospc("spa_vdev_add");
2570 else if (error != 0)
2571 fatal(0, "spa_vdev_add() = %d", error);
2572 }
2573
2574 mutex_exit(&ztest_vdev_lock);
2575 }
2576
2577 /*
2578 * Verify that adding/removing aux devices (l2arc, hot spare) works as expected.
2579 */
2580 /* ARGSUSED */
2581 void
2582 ztest_vdev_aux_add_remove(ztest_ds_t *zd, uint64_t id)
2583 {
2584 ztest_shared_t *zs = ztest_shared;
2585 spa_t *spa = ztest_spa;
2586 vdev_t *rvd = spa->spa_root_vdev;
2587 spa_aux_vdev_t *sav;
2588 char *aux;
2589 char *path;
2590 uint64_t guid = 0;
2591 int error;
2592
2593 path = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
2594
2595 if (ztest_random(2) == 0) {
2596 sav = &spa->spa_spares;
2597 aux = ZPOOL_CONFIG_SPARES;
2598 } else {
2599 sav = &spa->spa_l2cache;
2600 aux = ZPOOL_CONFIG_L2CACHE;
2601 }
2602
2603 mutex_enter(&ztest_vdev_lock);
2604
2605 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2606
2607 if (sav->sav_count != 0 && ztest_random(4) == 0) {
2608 /*
2609 * Pick a random device to remove.
2610 */
2611 guid = sav->sav_vdevs[ztest_random(sav->sav_count)]->vdev_guid;
2612 } else {
2613 /*
2614 * Find an unused device we can add.
2615 */
2616 zs->zs_vdev_aux = 0;
2617 for (;;) {
2618 int c;
2619 (void) snprintf(path, MAXPATHLEN, ztest_aux_template,
2620 ztest_opts.zo_dir, ztest_opts.zo_pool, aux,
2621 zs->zs_vdev_aux);
2622 for (c = 0; c < sav->sav_count; c++)
2623 if (strcmp(sav->sav_vdevs[c]->vdev_path,
2624 path) == 0)
2625 break;
2626 if (c == sav->sav_count &&
2627 vdev_lookup_by_path(rvd, path) == NULL)
2628 break;
2629 zs->zs_vdev_aux++;
2630 }
2631 }
2632
2633 spa_config_exit(spa, SCL_VDEV, FTAG);
2634
2635 if (guid == 0) {
2636 /*
2637 * Add a new device.
2638 */
2639 nvlist_t *nvroot = make_vdev_root(NULL, aux, NULL,
2640 (ztest_opts.zo_vdev_size * 5) / 4, 0, 0, 0, 0, 1);
2641 error = spa_vdev_add(spa, nvroot);
2642 if (error != 0)
2643 fatal(0, "spa_vdev_add(%p) = %d", nvroot, error);
2644 nvlist_free(nvroot);
2645 } else {
2646 /*
2647 * Remove an existing device. Sometimes, dirty its
2648 * vdev state first to make sure we handle removal
2649 * of devices that have pending state changes.
2650 */
2651 if (ztest_random(2) == 0)
2652 (void) vdev_online(spa, guid, 0, NULL);
2653
2654 error = spa_vdev_remove(spa, guid, B_FALSE);
2655 if (error != 0 && error != EBUSY)
2656 fatal(0, "spa_vdev_remove(%llu) = %d", guid, error);
2657 }
2658
2659 mutex_exit(&ztest_vdev_lock);
2660
2661 umem_free(path, MAXPATHLEN);
2662 }
2663
2664 /*
2665 * split a pool if it has mirror tlvdevs
2666 */
2667 /* ARGSUSED */
2668 void
2669 ztest_split_pool(ztest_ds_t *zd, uint64_t id)
2670 {
2671 ztest_shared_t *zs = ztest_shared;
2672 spa_t *spa = ztest_spa;
2673 vdev_t *rvd = spa->spa_root_vdev;
2674 nvlist_t *tree, **child, *config, *split, **schild;
2675 uint_t c, children, schildren = 0, lastlogid = 0;
2676 int error = 0;
2677
2678 mutex_enter(&ztest_vdev_lock);
2679
2680 /* ensure we have a useable config; mirrors of raidz aren't supported */
2681 if (zs->zs_mirrors < 3 || ztest_opts.zo_raidz > 1) {
2682 mutex_exit(&ztest_vdev_lock);
2683 return;
2684 }
2685
2686 /* clean up the old pool, if any */
2687 (void) spa_destroy("splitp");
2688
2689 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2690
2691 /* generate a config from the existing config */
2692 mutex_enter(&spa->spa_props_lock);
2693 VERIFY(nvlist_lookup_nvlist(spa->spa_config, ZPOOL_CONFIG_VDEV_TREE,
2694 &tree) == 0);
2695 mutex_exit(&spa->spa_props_lock);
2696
2697 VERIFY(nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
2698 &children) == 0);
2699
2700 schild = malloc(rvd->vdev_children * sizeof (nvlist_t *));
2701 for (c = 0; c < children; c++) {
2702 vdev_t *tvd = rvd->vdev_child[c];
2703 nvlist_t **mchild;
2704 uint_t mchildren;
2705
2706 if (tvd->vdev_islog || tvd->vdev_ops == &vdev_hole_ops) {
2707 VERIFY(nvlist_alloc(&schild[schildren], NV_UNIQUE_NAME,
2708 0) == 0);
2709 VERIFY(nvlist_add_string(schild[schildren],
2710 ZPOOL_CONFIG_TYPE, VDEV_TYPE_HOLE) == 0);
2711 VERIFY(nvlist_add_uint64(schild[schildren],
2712 ZPOOL_CONFIG_IS_HOLE, 1) == 0);
2713 if (lastlogid == 0)
2714 lastlogid = schildren;
2715 ++schildren;
2716 continue;
2717 }
2718 lastlogid = 0;
2719 VERIFY(nvlist_lookup_nvlist_array(child[c],
2720 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
2721 VERIFY(nvlist_dup(mchild[0], &schild[schildren++], 0) == 0);
2722 }
2723
2724 /* OK, create a config that can be used to split */
2725 VERIFY(nvlist_alloc(&split, NV_UNIQUE_NAME, 0) == 0);
2726 VERIFY(nvlist_add_string(split, ZPOOL_CONFIG_TYPE,
2727 VDEV_TYPE_ROOT) == 0);
2728 VERIFY(nvlist_add_nvlist_array(split, ZPOOL_CONFIG_CHILDREN, schild,
2729 lastlogid != 0 ? lastlogid : schildren) == 0);
2730
2731 VERIFY(nvlist_alloc(&config, NV_UNIQUE_NAME, 0) == 0);
2732 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, split) == 0);
2733
2734 for (c = 0; c < schildren; c++)
2735 nvlist_free(schild[c]);
2736 free(schild);
2737 nvlist_free(split);
2738
2739 spa_config_exit(spa, SCL_VDEV, FTAG);
2740
2741 (void) rw_enter(&ztest_name_lock, RW_WRITER);
2742 error = spa_vdev_split_mirror(spa, "splitp", config, NULL, B_FALSE);
2743 (void) rw_exit(&ztest_name_lock);
2744
2745 nvlist_free(config);
2746
2747 if (error == 0) {
2748 (void) printf("successful split - results:\n");
2749 mutex_enter(&spa_namespace_lock);
2750 show_pool_stats(spa);
2751 show_pool_stats(spa_lookup("splitp"));
2752 mutex_exit(&spa_namespace_lock);
2753 ++zs->zs_splits;
2754 --zs->zs_mirrors;
2755 }
2756 mutex_exit(&ztest_vdev_lock);
2757
2758 }
2759
2760 /*
2761 * Verify that we can attach and detach devices.
2762 */
2763 /* ARGSUSED */
2764 void
2765 ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id)
2766 {
2767 ztest_shared_t *zs = ztest_shared;
2768 spa_t *spa = ztest_spa;
2769 spa_aux_vdev_t *sav = &spa->spa_spares;
2770 vdev_t *rvd = spa->spa_root_vdev;
2771 vdev_t *oldvd, *newvd, *pvd;
2772 nvlist_t *root;
2773 uint64_t leaves;
2774 uint64_t leaf, top;
2775 uint64_t ashift = ztest_get_ashift();
2776 uint64_t oldguid, pguid;
2777 size_t oldsize, newsize;
2778 char *oldpath, *newpath;
2779 int replacing;
2780 int oldvd_has_siblings = B_FALSE;
2781 int newvd_is_spare = B_FALSE;
2782 int oldvd_is_log;
2783 int error, expected_error;
2784
2785 oldpath = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
2786 newpath = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
2787
2788 mutex_enter(&ztest_vdev_lock);
2789 leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz;
2790
2791 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2792
2793 /*
2794 * Decide whether to do an attach or a replace.
2795 */
2796 replacing = ztest_random(2);
2797
2798 /*
2799 * Pick a random top-level vdev.
2800 */
2801 top = ztest_random_vdev_top(spa, B_TRUE);
2802
2803 /*
2804 * Pick a random leaf within it.
2805 */
2806 leaf = ztest_random(leaves);
2807
2808 /*
2809 * Locate this vdev.
2810 */
2811 oldvd = rvd->vdev_child[top];
2812 if (zs->zs_mirrors >= 1) {
2813 ASSERT(oldvd->vdev_ops == &vdev_mirror_ops);
2814 ASSERT(oldvd->vdev_children >= zs->zs_mirrors);
2815 oldvd = oldvd->vdev_child[leaf / ztest_opts.zo_raidz];
2816 }
2817 if (ztest_opts.zo_raidz > 1) {
2818 ASSERT(oldvd->vdev_ops == &vdev_raidz_ops);
2819 ASSERT(oldvd->vdev_children == ztest_opts.zo_raidz);
2820 oldvd = oldvd->vdev_child[leaf % ztest_opts.zo_raidz];
2821 }
2822
2823 /*
2824 * If we're already doing an attach or replace, oldvd may be a
2825 * mirror vdev -- in which case, pick a random child.
2826 */
2827 while (oldvd->vdev_children != 0) {
2828 oldvd_has_siblings = B_TRUE;
2829 ASSERT(oldvd->vdev_children >= 2);
2830 oldvd = oldvd->vdev_child[ztest_random(oldvd->vdev_children)];
2831 }
2832
2833 oldguid = oldvd->vdev_guid;
2834 oldsize = vdev_get_min_asize(oldvd);
2835 oldvd_is_log = oldvd->vdev_top->vdev_islog;
2836 (void) strcpy(oldpath, oldvd->vdev_path);
2837 pvd = oldvd->vdev_parent;
2838 pguid = pvd->vdev_guid;
2839
2840 /*
2841 * If oldvd has siblings, then half of the time, detach it.
2842 */
2843 if (oldvd_has_siblings && ztest_random(2) == 0) {
2844 spa_config_exit(spa, SCL_VDEV, FTAG);
2845 error = spa_vdev_detach(spa, oldguid, pguid, B_FALSE);
2846 if (error != 0 && error != ENODEV && error != EBUSY &&
2847 error != ENOTSUP)
2848 fatal(0, "detach (%s) returned %d", oldpath, error);
2849 goto out;
2850 }
2851
2852 /*
2853 * For the new vdev, choose with equal probability between the two
2854 * standard paths (ending in either 'a' or 'b') or a random hot spare.
2855 */
2856 if (sav->sav_count != 0 && ztest_random(3) == 0) {
2857 newvd = sav->sav_vdevs[ztest_random(sav->sav_count)];
2858 newvd_is_spare = B_TRUE;
2859 (void) strcpy(newpath, newvd->vdev_path);
2860 } else {
2861 (void) snprintf(newpath, MAXPATHLEN, ztest_dev_template,
2862 ztest_opts.zo_dir, ztest_opts.zo_pool,
2863 top * leaves + leaf);
2864 if (ztest_random(2) == 0)
2865 newpath[strlen(newpath) - 1] = 'b';
2866 newvd = vdev_lookup_by_path(rvd, newpath);
2867 }
2868
2869 if (newvd) {
2870 newsize = vdev_get_min_asize(newvd);
2871 } else {
2872 /*
2873 * Make newsize a little bigger or smaller than oldsize.
2874 * If it's smaller, the attach should fail.
2875 * If it's larger, and we're doing a replace,
2876 * we should get dynamic LUN growth when we're done.
2877 */
2878 newsize = 10 * oldsize / (9 + ztest_random(3));
2879 }
2880
2881 /*
2882 * If pvd is not a mirror or root, the attach should fail with ENOTSUP,
2883 * unless it's a replace; in that case any non-replacing parent is OK.
2884 *
2885 * If newvd is already part of the pool, it should fail with EBUSY.
2886 *
2887 * If newvd is too small, it should fail with EOVERFLOW.
2888 */
2889 if (pvd->vdev_ops != &vdev_mirror_ops &&
2890 pvd->vdev_ops != &vdev_root_ops && (!replacing ||
2891 pvd->vdev_ops == &vdev_replacing_ops ||
2892 pvd->vdev_ops == &vdev_spare_ops))
2893 expected_error = ENOTSUP;
2894 else if (newvd_is_spare && (!replacing || oldvd_is_log))
2895 expected_error = ENOTSUP;
2896 else if (newvd == oldvd)
2897 expected_error = replacing ? 0 : EBUSY;
2898 else if (vdev_lookup_by_path(rvd, newpath) != NULL)
2899 expected_error = EBUSY;
2900 else if (newsize < oldsize)
2901 expected_error = EOVERFLOW;
2902 else if (ashift > oldvd->vdev_top->vdev_ashift)
2903 expected_error = EDOM;
2904 else
2905 expected_error = 0;
2906
2907 spa_config_exit(spa, SCL_VDEV, FTAG);
2908
2909 /*
2910 * Build the nvlist describing newpath.
2911 */
2912 root = make_vdev_root(newpath, NULL, NULL, newvd == NULL ? newsize : 0,
2913 ashift, 0, 0, 0, 1);
2914
2915 error = spa_vdev_attach(spa, oldguid, root, replacing);
2916
2917 nvlist_free(root);
2918
2919 /*
2920 * If our parent was the replacing vdev, but the replace completed,
2921 * then instead of failing with ENOTSUP we may either succeed,
2922 * fail with ENODEV, or fail with EOVERFLOW.
2923 */
2924 if (expected_error == ENOTSUP &&
2925 (error == 0 || error == ENODEV || error == EOVERFLOW))
2926 expected_error = error;
2927
2928 /*
2929 * If someone grew the LUN, the replacement may be too small.
2930 */
2931 if (error == EOVERFLOW || error == EBUSY)
2932 expected_error = error;
2933
2934 /* XXX workaround 6690467 */
2935 if (error != expected_error && expected_error != EBUSY) {
2936 fatal(0, "attach (%s %llu, %s %llu, %d) "
2937 "returned %d, expected %d",
2938 oldpath, (longlong_t)oldsize, newpath,
2939 (longlong_t)newsize, replacing, error, expected_error);
2940 }
2941 out:
2942 mutex_exit(&ztest_vdev_lock);
2943
2944 umem_free(oldpath, MAXPATHLEN);
2945 umem_free(newpath, MAXPATHLEN);
2946 }
2947
2948 /*
2949 * Callback function which expands the physical size of the vdev.
2950 */
2951 vdev_t *
2952 grow_vdev(vdev_t *vd, void *arg)
2953 {
2954 ASSERTV(spa_t *spa = vd->vdev_spa);
2955 size_t *newsize = arg;
2956 size_t fsize;
2957 int fd;
2958
2959 ASSERT(spa_config_held(spa, SCL_STATE, RW_READER) == SCL_STATE);
2960 ASSERT(vd->vdev_ops->vdev_op_leaf);
2961
2962 if ((fd = open(vd->vdev_path, O_RDWR)) == -1)
2963 return (vd);
2964
2965 fsize = lseek(fd, 0, SEEK_END);
2966 VERIFY(ftruncate(fd, *newsize) == 0);
2967
2968 if (ztest_opts.zo_verbose >= 6) {
2969 (void) printf("%s grew from %lu to %lu bytes\n",
2970 vd->vdev_path, (ulong_t)fsize, (ulong_t)*newsize);
2971 }
2972 (void) close(fd);
2973 return (NULL);
2974 }
2975
2976 /*
2977 * Callback function which expands a given vdev by calling vdev_online().
2978 */
2979 /* ARGSUSED */
2980 vdev_t *
2981 online_vdev(vdev_t *vd, void *arg)
2982 {
2983 spa_t *spa = vd->vdev_spa;
2984 vdev_t *tvd = vd->vdev_top;
2985 uint64_t guid = vd->vdev_guid;
2986 uint64_t generation = spa->spa_config_generation + 1;
2987 vdev_state_t newstate = VDEV_STATE_UNKNOWN;
2988 int error;
2989
2990 ASSERT(spa_config_held(spa, SCL_STATE, RW_READER) == SCL_STATE);
2991 ASSERT(vd->vdev_ops->vdev_op_leaf);
2992
2993 /* Calling vdev_online will initialize the new metaslabs */
2994 spa_config_exit(spa, SCL_STATE, spa);
2995 error = vdev_online(spa, guid, ZFS_ONLINE_EXPAND, &newstate);
2996 spa_config_enter(spa, SCL_STATE, spa, RW_READER);
2997
2998 /*
2999 * If vdev_online returned an error or the underlying vdev_open
3000 * failed then we abort the expand. The only way to know that
3001 * vdev_open fails is by checking the returned newstate.
3002 */
3003 if (error || newstate != VDEV_STATE_HEALTHY) {
3004 if (ztest_opts.zo_verbose >= 5) {
3005 (void) printf("Unable to expand vdev, state %llu, "
3006 "error %d\n", (u_longlong_t)newstate, error);
3007 }
3008 return (vd);
3009 }
3010 ASSERT3U(newstate, ==, VDEV_STATE_HEALTHY);
3011
3012 /*
3013 * Since we dropped the lock we need to ensure that we're
3014 * still talking to the original vdev. It's possible this
3015 * vdev may have been detached/replaced while we were
3016 * trying to online it.
3017 */
3018 if (generation != spa->spa_config_generation) {
3019 if (ztest_opts.zo_verbose >= 5) {
3020 (void) printf("vdev configuration has changed, "
3021 "guid %llu, state %llu, expected gen %llu, "
3022 "got gen %llu\n",
3023 (u_longlong_t)guid,
3024 (u_longlong_t)tvd->vdev_state,
3025 (u_longlong_t)generation,
3026 (u_longlong_t)spa->spa_config_generation);
3027 }
3028 return (vd);
3029 }
3030 return (NULL);
3031 }
3032
3033 /*
3034 * Traverse the vdev tree calling the supplied function.
3035 * We continue to walk the tree until we either have walked all
3036 * children or we receive a non-NULL return from the callback.
3037 * If a NULL callback is passed, then we just return back the first
3038 * leaf vdev we encounter.
3039 */
3040 vdev_t *
3041 vdev_walk_tree(vdev_t *vd, vdev_t *(*func)(vdev_t *, void *), void *arg)
3042 {
3043 uint_t c;
3044
3045 if (vd->vdev_ops->vdev_op_leaf) {
3046 if (func == NULL)
3047 return (vd);
3048 else
3049 return (func(vd, arg));
3050 }
3051
3052 for (c = 0; c < vd->vdev_children; c++) {
3053 vdev_t *cvd = vd->vdev_child[c];
3054 if ((cvd = vdev_walk_tree(cvd, func, arg)) != NULL)
3055 return (cvd);
3056 }
3057 return (NULL);
3058 }
3059
3060 /*
3061 * Verify that dynamic LUN growth works as expected.
3062 */
3063 /* ARGSUSED */
3064 void
3065 ztest_vdev_LUN_growth(ztest_ds_t *zd, uint64_t id)
3066 {
3067 spa_t *spa = ztest_spa;
3068 vdev_t *vd, *tvd;
3069 metaslab_class_t *mc;
3070 metaslab_group_t *mg;
3071 size_t psize, newsize;
3072 uint64_t top;
3073 uint64_t old_class_space, new_class_space, old_ms_count, new_ms_count;
3074
3075 mutex_enter(&ztest_vdev_lock);
3076 spa_config_enter(spa, SCL_STATE, spa, RW_READER);
3077
3078 top = ztest_random_vdev_top(spa, B_TRUE);
3079
3080 tvd = spa->spa_root_vdev->vdev_child[top];
3081 mg = tvd->vdev_mg;
3082 mc = mg->mg_class;
3083 old_ms_count = tvd->vdev_ms_count;
3084 old_class_space = metaslab_class_get_space(mc);
3085
3086 /*
3087 * Determine the size of the first leaf vdev associated with
3088 * our top-level device.
3089 */
3090 vd = vdev_walk_tree(tvd, NULL, NULL);
3091 ASSERT3P(vd, !=, NULL);
3092 ASSERT(vd->vdev_ops->vdev_op_leaf);
3093
3094 psize = vd->vdev_psize;
3095
3096 /*
3097 * We only try to expand the vdev if it's healthy, less than 4x its
3098 * original size, and it has a valid psize.
3099 */
3100 if (tvd->vdev_state != VDEV_STATE_HEALTHY ||
3101 psize == 0 || psize >= 4 * ztest_opts.zo_vdev_size) {
3102 spa_config_exit(spa, SCL_STATE, spa);
3103 mutex_exit(&ztest_vdev_lock);
3104 return;
3105 }
3106 ASSERT(psize > 0);
3107 newsize = psize + psize / 8;
3108 ASSERT3U(newsize, >, psize);
3109
3110 if (ztest_opts.zo_verbose >= 6) {
3111 (void) printf("Expanding LUN %s from %lu to %lu\n",
3112 vd->vdev_path, (ulong_t)psize, (ulong_t)newsize);
3113 }
3114
3115 /*
3116 * Growing the vdev is a two step process:
3117 * 1). expand the physical size (i.e. relabel)
3118 * 2). online the vdev to create the new metaslabs
3119 */
3120 if (vdev_walk_tree(tvd, grow_vdev, &newsize) != NULL ||
3121 vdev_walk_tree(tvd, online_vdev, NULL) != NULL ||
3122 tvd->vdev_state != VDEV_STATE_HEALTHY) {
3123 if (ztest_opts.zo_verbose >= 5) {
3124 (void) printf("Could not expand LUN because "
3125 "the vdev configuration changed.\n");
3126 }
3127 spa_config_exit(spa, SCL_STATE, spa);
3128 mutex_exit(&ztest_vdev_lock);
3129 return;
3130 }
3131
3132 spa_config_exit(spa, SCL_STATE, spa);
3133
3134 /*
3135 * Expanding the LUN will update the config asynchronously,
3136 * thus we must wait for the async thread to complete any
3137 * pending tasks before proceeding.
3138 */
3139 for (;;) {
3140 boolean_t done;
3141 mutex_enter(&spa->spa_async_lock);
3142 done = (spa->spa_async_thread == NULL && !spa->spa_async_tasks);
3143 mutex_exit(&spa->spa_async_lock);
3144 if (done)
3145 break;
3146 txg_wait_synced(spa_get_dsl(spa), 0);
3147 (void) poll(NULL, 0, 100);
3148 }
3149
3150 spa_config_enter(spa, SCL_STATE, spa, RW_READER);
3151
3152 tvd = spa->spa_root_vdev->vdev_child[top];
3153 new_ms_count = tvd->vdev_ms_count;
3154 new_class_space = metaslab_class_get_space(mc);
3155
3156 if (tvd->vdev_mg != mg || mg->mg_class != mc) {
3157 if (ztest_opts.zo_verbose >= 5) {
3158 (void) printf("Could not verify LUN expansion due to "
3159 "intervening vdev offline or remove.\n");
3160 }
3161 spa_config_exit(spa, SCL_STATE, spa);
3162 mutex_exit(&ztest_vdev_lock);
3163 return;
3164 }
3165
3166 /*
3167 * Make sure we were able to grow the vdev.
3168 */
3169 if (new_ms_count <= old_ms_count)
3170 fatal(0, "LUN expansion failed: ms_count %llu <= %llu\n",
3171 old_ms_count, new_ms_count);
3172
3173 /*
3174 * Make sure we were able to grow the pool.
3175 */
3176 if (new_class_space <= old_class_space)
3177 fatal(0, "LUN expansion failed: class_space %llu <= %llu\n",
3178 old_class_space, new_class_space);
3179
3180 if (ztest_opts.zo_verbose >= 5) {
3181 char oldnumbuf[6], newnumbuf[6];
3182
3183 nicenum(old_class_space, oldnumbuf);
3184 nicenum(new_class_space, newnumbuf);
3185 (void) printf("%s grew from %s to %s\n",
3186 spa->spa_name, oldnumbuf, newnumbuf);
3187 }
3188
3189 spa_config_exit(spa, SCL_STATE, spa);
3190 mutex_exit(&ztest_vdev_lock);
3191 }
3192
3193 /*
3194 * Verify that dmu_objset_{create,destroy,open,close} work as expected.
3195 */
3196 /* ARGSUSED */
3197 static void
3198 ztest_objset_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
3199 {
3200 /*
3201 * Create the objects common to all ztest datasets.
3202 */
3203 VERIFY(zap_create_claim(os, ZTEST_DIROBJ,
3204 DMU_OT_ZAP_OTHER, DMU_OT_NONE, 0, tx) == 0);
3205 }
3206
3207 static int
3208 ztest_dataset_create(char *dsname)
3209 {
3210 uint64_t zilset = ztest_random(100);
3211 int err = dmu_objset_create(dsname, DMU_OST_OTHER, 0,
3212 ztest_objset_create_cb, NULL);
3213
3214 if (err || zilset < 80)
3215 return (err);
3216
3217 if (ztest_opts.zo_verbose >= 5)
3218 (void) printf("Setting dataset %s to sync always\n", dsname);
3219 return (ztest_dsl_prop_set_uint64(dsname, ZFS_PROP_SYNC,
3220 ZFS_SYNC_ALWAYS, B_FALSE));
3221 }
3222
3223 /* ARGSUSED */
3224 static int
3225 ztest_objset_destroy_cb(const char *name, void *arg)
3226 {
3227 objset_t *os;
3228 dmu_object_info_t doi;
3229 int error;
3230
3231 /*
3232 * Verify that the dataset contains a directory object.
3233 */
3234 VERIFY0(dmu_objset_own(name, DMU_OST_OTHER, B_TRUE, FTAG, &os));
3235 error = dmu_object_info(os, ZTEST_DIROBJ, &doi);
3236 if (error != ENOENT) {
3237 /* We could have crashed in the middle of destroying it */
3238 ASSERT0(error);
3239 ASSERT3U(doi.doi_type, ==, DMU_OT_ZAP_OTHER);
3240 ASSERT3S(doi.doi_physical_blocks_512, >=, 0);
3241 }
3242 dmu_objset_disown(os, FTAG);
3243
3244 /*
3245 * Destroy the dataset.
3246 */
3247 if (strchr(name, '@') != NULL) {
3248 VERIFY0(dsl_destroy_snapshot(name, B_FALSE));
3249 } else {
3250 VERIFY0(dsl_destroy_head(name));
3251 }
3252 return (0);
3253 }
3254
3255 static boolean_t
3256 ztest_snapshot_create(char *osname, uint64_t id)
3257 {
3258 char snapname[MAXNAMELEN];
3259 int error;
3260
3261 (void) snprintf(snapname, sizeof (snapname), "%llu", (u_longlong_t)id);
3262
3263 error = dmu_objset_snapshot_one(osname, snapname);
3264 if (error == ENOSPC) {
3265 ztest_record_enospc(FTAG);
3266 return (B_FALSE);
3267 }
3268 if (error != 0 && error != EEXIST) {
3269 fatal(0, "ztest_snapshot_create(%s@%s) = %d", osname,
3270 snapname, error);
3271 }
3272 return (B_TRUE);
3273 }
3274
3275 static boolean_t
3276 ztest_snapshot_destroy(char *osname, uint64_t id)
3277 {
3278 char snapname[MAXNAMELEN];
3279 int error;
3280
3281 (void) snprintf(snapname, MAXNAMELEN, "%s@%llu", osname,
3282 (u_longlong_t)id);
3283
3284 error = dsl_destroy_snapshot(snapname, B_FALSE);
3285 if (error != 0 && error != ENOENT)
3286 fatal(0, "ztest_snapshot_destroy(%s) = %d", snapname, error);
3287 return (B_TRUE);
3288 }
3289
3290 /* ARGSUSED */
3291 void
3292 ztest_dmu_objset_create_destroy(ztest_ds_t *zd, uint64_t id)
3293 {
3294 ztest_ds_t *zdtmp;
3295 int iters;
3296 int error;
3297 objset_t *os, *os2;
3298 char *name;
3299 zilog_t *zilog;
3300 int i;
3301
3302 zdtmp = umem_alloc(sizeof (ztest_ds_t), UMEM_NOFAIL);
3303 name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
3304
3305 (void) rw_enter(&ztest_name_lock, RW_READER);
3306
3307 (void) snprintf(name, MAXNAMELEN, "%s/temp_%llu",
3308 ztest_opts.zo_pool, (u_longlong_t)id);
3309
3310 /*
3311 * If this dataset exists from a previous run, process its replay log
3312 * half of the time. If we don't replay it, then dsl_destroy_head()
3313 * (invoked from ztest_objset_destroy_cb()) should just throw it away.
3314 */
3315 if (ztest_random(2) == 0 &&
3316 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os) == 0) {
3317 ztest_zd_init(zdtmp, NULL, os);
3318 zil_replay(os, zdtmp, ztest_replay_vector);
3319 ztest_zd_fini(zdtmp);
3320 dmu_objset_disown(os, FTAG);
3321 }
3322
3323 /*
3324 * There may be an old instance of the dataset we're about to
3325 * create lying around from a previous run. If so, destroy it
3326 * and all of its snapshots.
3327 */
3328 (void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL,
3329 DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
3330
3331 /*
3332 * Verify that the destroyed dataset is no longer in the namespace.
3333 */
3334 VERIFY3U(ENOENT, ==, dmu_objset_own(name, DMU_OST_OTHER, B_TRUE,
3335 FTAG, &os));
3336
3337 /*
3338 * Verify that we can create a new dataset.
3339 */
3340 error = ztest_dataset_create(name);
3341 if (error) {
3342 if (error == ENOSPC) {
3343 ztest_record_enospc(FTAG);
3344 goto out;
3345 }
3346 fatal(0, "dmu_objset_create(%s) = %d", name, error);
3347 }
3348
3349 VERIFY0(dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os));
3350
3351 ztest_zd_init(zdtmp, NULL, os);
3352
3353 /*
3354 * Open the intent log for it.
3355 */
3356 zilog = zil_open(os, ztest_get_data);
3357
3358 /*
3359 * Put some objects in there, do a little I/O to them,
3360 * and randomly take a couple of snapshots along the way.
3361 */
3362 iters = ztest_random(5);
3363 for (i = 0; i < iters; i++) {
3364 ztest_dmu_object_alloc_free(zdtmp, id);
3365 if (ztest_random(iters) == 0)
3366 (void) ztest_snapshot_create(name, i);
3367 }
3368
3369 /*
3370 * Verify that we cannot create an existing dataset.
3371 */
3372 VERIFY3U(EEXIST, ==,
3373 dmu_objset_create(name, DMU_OST_OTHER, 0, NULL, NULL));
3374
3375 /*
3376 * Verify that we can hold an objset that is also owned.
3377 */
3378 VERIFY3U(0, ==, dmu_objset_hold(name, FTAG, &os2));
3379 dmu_objset_rele(os2, FTAG);
3380
3381 /*
3382 * Verify that we cannot own an objset that is already owned.
3383 */
3384 VERIFY3U(EBUSY, ==,
3385 dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os2));
3386
3387 zil_close(zilog);
3388 dmu_objset_disown(os, FTAG);
3389 ztest_zd_fini(zdtmp);
3390 out:
3391 (void) rw_exit(&ztest_name_lock);
3392
3393 umem_free(name, MAXNAMELEN);
3394 umem_free(zdtmp, sizeof (ztest_ds_t));
3395 }
3396
3397 /*
3398 * Verify that dmu_snapshot_{create,destroy,open,close} work as expected.
3399 */
3400 void
3401 ztest_dmu_snapshot_create_destroy(ztest_ds_t *zd, uint64_t id)
3402 {
3403 (void) rw_enter(&ztest_name_lock, RW_READER);
3404 (void) ztest_snapshot_destroy(zd->zd_name, id);
3405 (void) ztest_snapshot_create(zd->zd_name, id);
3406 (void) rw_exit(&ztest_name_lock);
3407 }
3408
3409 /*
3410 * Cleanup non-standard snapshots and clones.
3411 */
3412 void
3413 ztest_dsl_dataset_cleanup(char *osname, uint64_t id)
3414 {
3415 char *snap1name;
3416 char *clone1name;
3417 char *snap2name;
3418 char *clone2name;
3419 char *snap3name;
3420 int error;
3421
3422 snap1name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
3423 clone1name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
3424 snap2name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
3425 clone2name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
3426 snap3name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
3427
3428 (void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu",
3429 osname, (u_longlong_t)id);
3430 (void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu",
3431 osname, (u_longlong_t)id);
3432 (void) snprintf(snap2name, MAXNAMELEN, "%s@s2_%llu",
3433 clone1name, (u_longlong_t)id);
3434 (void) snprintf(clone2name, MAXNAMELEN, "%s/c2_%llu",
3435 osname, (u_longlong_t)id);
3436 (void) snprintf(snap3name, MAXNAMELEN, "%s@s3_%llu",
3437 clone1name, (u_longlong_t)id);
3438
3439 error = dsl_destroy_head(clone2name);
3440 if (error && error != ENOENT)
3441 fatal(0, "dsl_destroy_head(%s) = %d", clone2name, error);
3442 error = dsl_destroy_snapshot(snap3name, B_FALSE);
3443 if (error && error != ENOENT)
3444 fatal(0, "dsl_destroy_snapshot(%s) = %d", snap3name, error);
3445 error = dsl_destroy_snapshot(snap2name, B_FALSE);
3446 if (error && error != ENOENT)
3447 fatal(0, "dsl_destroy_snapshot(%s) = %d", snap2name, error);
3448 error = dsl_destroy_head(clone1name);
3449 if (error && error != ENOENT)
3450 fatal(0, "dsl_destroy_head(%s) = %d", clone1name, error);
3451 error = dsl_destroy_snapshot(snap1name, B_FALSE);
3452 if (error && error != ENOENT)
3453 fatal(0, "dsl_destroy_snapshot(%s) = %d", snap1name, error);
3454
3455 umem_free(snap1name, MAXNAMELEN);
3456 umem_free(clone1name, MAXNAMELEN);
3457 umem_free(snap2name, MAXNAMELEN);
3458 umem_free(clone2name, MAXNAMELEN);
3459 umem_free(snap3name, MAXNAMELEN);
3460 }
3461
3462 /*
3463 * Verify dsl_dataset_promote handles EBUSY
3464 */
3465 void
3466 ztest_dsl_dataset_promote_busy(ztest_ds_t *zd, uint64_t id)
3467 {
3468 objset_t *os;
3469 char *snap1name;
3470 char *clone1name;
3471 char *snap2name;
3472 char *clone2name;
3473 char *snap3name;
3474 char *osname = zd->zd_name;
3475 int error;
3476
3477 snap1name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
3478 clone1name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
3479 snap2name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
3480 clone2name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
3481 snap3name = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
3482
3483 (void) rw_enter(&ztest_name_lock, RW_READER);
3484
3485 ztest_dsl_dataset_cleanup(osname, id);
3486
3487 (void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu",
3488 osname, (u_longlong_t)id);
3489 (void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu",
3490 osname, (u_longlong_t)id);
3491 (void) snprintf(snap2name, MAXNAMELEN, "%s@s2_%llu",
3492 clone1name, (u_longlong_t)id);
3493 (void) snprintf(clone2name, MAXNAMELEN, "%s/c2_%llu",
3494 osname, (u_longlong_t)id);
3495 (void) snprintf(snap3name, MAXNAMELEN, "%s@s3_%llu",
3496 clone1name, (u_longlong_t)id);
3497
3498 error = dmu_objset_snapshot_one(osname, strchr(snap1name, '@') + 1);
3499 if (error && error != EEXIST) {
3500 if (error == ENOSPC) {
3501 ztest_record_enospc(FTAG);
3502 goto out;
3503 }
3504 fatal(0, "dmu_take_snapshot(%s) = %d", snap1name, error);
3505 }
3506
3507 error = dmu_objset_clone(clone1name, snap1name);
3508 if (error) {
3509 if (error == ENOSPC) {
3510 ztest_record_enospc(FTAG);
3511 goto out;
3512 }
3513 fatal(0, "dmu_objset_create(%s) = %d", clone1name, error);
3514 }
3515
3516 error = dmu_objset_snapshot_one(clone1name, strchr(snap2name, '@') + 1);
3517 if (error && error != EEXIST) {
3518 if (error == ENOSPC) {
3519 ztest_record_enospc(FTAG);
3520 goto out;
3521 }
3522 fatal(0, "dmu_open_snapshot(%s) = %d", snap2name, error);
3523 }
3524
3525 error = dmu_objset_snapshot_one(clone1name, strchr(snap3name, '@') + 1);
3526 if (error && error != EEXIST) {
3527 if (error == ENOSPC) {
3528 ztest_record_enospc(FTAG);
3529 goto out;
3530 }
3531 fatal(0, "dmu_open_snapshot(%s) = %d", snap3name, error);
3532 }
3533
3534 error = dmu_objset_clone(clone2name, snap3name);
3535 if (error) {
3536 if (error == ENOSPC) {
3537 ztest_record_enospc(FTAG);
3538 goto out;
3539 }
3540 fatal(0, "dmu_objset_create(%s) = %d", clone2name, error);
3541 }
3542
3543 error = dmu_objset_own(snap2name, DMU_OST_ANY, B_TRUE, FTAG, &os);
3544 if (error)
3545 fatal(0, "dmu_objset_own(%s) = %d", snap2name, error);
3546 error = dsl_dataset_promote(clone2name, NULL);
3547 if (error != EBUSY)
3548 fatal(0, "dsl_dataset_promote(%s), %d, not EBUSY", clone2name,
3549 error);
3550 dmu_objset_disown(os, FTAG);
3551
3552 out:
3553 ztest_dsl_dataset_cleanup(osname, id);
3554
3555 (void) rw_exit(&ztest_name_lock);
3556
3557 umem_free(snap1name, MAXNAMELEN);
3558 umem_free(clone1name, MAXNAMELEN);
3559 umem_free(snap2name, MAXNAMELEN);
3560 umem_free(clone2name, MAXNAMELEN);
3561 umem_free(snap3name, MAXNAMELEN);
3562 }
3563
3564 #undef OD_ARRAY_SIZE
3565 #define OD_ARRAY_SIZE 4
3566
3567 /*
3568 * Verify that dmu_object_{alloc,free} work as expected.
3569 */
3570 void
3571 ztest_dmu_object_alloc_free(ztest_ds_t *zd, uint64_t id)
3572 {
3573 ztest_od_t *od;
3574 int batchsize;
3575 int size;
3576 int b;
3577
3578 size = sizeof(ztest_od_t) * OD_ARRAY_SIZE;
3579 od = umem_alloc(size, UMEM_NOFAIL);
3580 batchsize = OD_ARRAY_SIZE;
3581
3582 for (b = 0; b < batchsize; b++)
3583 ztest_od_init(od + b, id, FTAG, b, DMU_OT_UINT64_OTHER, 0, 0);
3584
3585 /*
3586 * Destroy the previous batch of objects, create a new batch,
3587 * and do some I/O on the new objects.
3588 */
3589 if (ztest_object_init(zd, od, size, B_TRUE) != 0)
3590 return;
3591
3592 while (ztest_random(4 * batchsize) != 0)
3593 ztest_io(zd, od[ztest_random(batchsize)].od_object,
3594 ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
3595
3596 umem_free(od, size);
3597 }
3598
3599 #undef OD_ARRAY_SIZE
3600 #define OD_ARRAY_SIZE 2
3601
3602 /*
3603 * Verify that dmu_{read,write} work as expected.
3604 */
3605 void
3606 ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id)
3607 {
3608 int size;
3609 ztest_od_t *od;
3610
3611 objset_t *os = zd->zd_os;
3612 size = sizeof(ztest_od_t) * OD_ARRAY_SIZE;
3613 od = umem_alloc(size, UMEM_NOFAIL);
3614 dmu_tx_t *tx;
3615 int i, freeit, error;
3616 uint64_t n, s, txg;
3617 bufwad_t *packbuf, *bigbuf, *pack, *bigH, *bigT;
3618 uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize;
3619 uint64_t chunksize = (1000 + ztest_random(1000)) * sizeof (uint64_t);
3620 uint64_t regions = 997;
3621 uint64_t stride = 123456789ULL;
3622 uint64_t width = 40;
3623 int free_percent = 5;
3624
3625 /*
3626 * This test uses two objects, packobj and bigobj, that are always
3627 * updated together (i.e. in the same tx) so that their contents are
3628 * in sync and can be compared. Their contents relate to each other
3629 * in a simple way: packobj is a dense array of 'bufwad' structures,
3630 * while bigobj is a sparse array of the same bufwads. Specifically,
3631 * for any index n, there are three bufwads that should be identical:
3632 *
3633 * packobj, at offset n * sizeof (bufwad_t)
3634 * bigobj, at the head of the nth chunk
3635 * bigobj, at the tail of the nth chunk
3636 *
3637 * The chunk size is arbitrary. It doesn't have to be a power of two,
3638 * and it doesn't have any relation to the object blocksize.
3639 * The only requirement is that it can hold at least two bufwads.
3640 *
3641 * Normally, we write the bufwad to each of these locations.
3642 * However, free_percent of the time we instead write zeroes to
3643 * packobj and perform a dmu_free_range() on bigobj. By comparing
3644 * bigobj to packobj, we can verify that the DMU is correctly
3645 * tracking which parts of an object are allocated and free,
3646 * and that the contents of the allocated blocks are correct.
3647 */
3648
3649 /*
3650 * Read the directory info. If it's the first time, set things up.
3651 */
3652 ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, chunksize);
3653 ztest_od_init(od + 1, id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize);
3654
3655 if (ztest_object_init(zd, od, size, B_FALSE) != 0) {
3656 umem_free(od, size);
3657 return;
3658 }
3659
3660 bigobj = od[0].od_object;
3661 packobj = od[1].od_object;
3662 chunksize = od[0].od_gen;
3663 ASSERT(chunksize == od[1].od_gen);
3664
3665 /*
3666 * Prefetch a random chunk of the big object.
3667 * Our aim here is to get some async reads in flight
3668 * for blocks that we may free below; the DMU should
3669 * handle this race correctly.
3670 */
3671 n = ztest_random(regions) * stride + ztest_random(width);
3672 s = 1 + ztest_random(2 * width - 1);
3673 dmu_prefetch(os, bigobj, n * chunksize, s * chunksize);
3674
3675 /*
3676 * Pick a random index and compute the offsets into packobj and bigobj.
3677 */
3678 n = ztest_random(regions) * stride + ztest_random(width);
3679 s = 1 + ztest_random(width - 1);
3680
3681 packoff = n * sizeof (bufwad_t);
3682 packsize = s * sizeof (bufwad_t);
3683
3684 bigoff = n * chunksize;
3685 bigsize = s * chunksize;
3686
3687 packbuf = umem_alloc(packsize, UMEM_NOFAIL);
3688 bigbuf = umem_alloc(bigsize, UMEM_NOFAIL);
3689
3690 /*
3691 * free_percent of the time, free a range of bigobj rather than
3692 * overwriting it.
3693 */
3694 freeit = (ztest_random(100) < free_percent);
3695
3696 /*
3697 * Read the current contents of our objects.
3698 */
3699 error = dmu_read(os, packobj, packoff, packsize, packbuf,
3700 DMU_READ_PREFETCH);
3701 ASSERT0(error);
3702 error = dmu_read(os, bigobj, bigoff, bigsize, bigbuf,
3703 DMU_READ_PREFETCH);
3704 ASSERT0(error);
3705
3706 /*
3707 * Get a tx for the mods to both packobj and bigobj.
3708 */
3709 tx = dmu_tx_create(os);
3710
3711 dmu_tx_hold_write(tx, packobj, packoff, packsize);
3712
3713 if (freeit)
3714 dmu_tx_hold_free(tx, bigobj, bigoff, bigsize);
3715 else
3716 dmu_tx_hold_write(tx, bigobj, bigoff, bigsize);
3717
3718 /* This accounts for setting the checksum/compression. */
3719 dmu_tx_hold_bonus(tx, bigobj);
3720
3721 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
3722 if (txg == 0) {
3723 umem_free(packbuf, packsize);
3724 umem_free(bigbuf, bigsize);
3725 umem_free(od, size);
3726 return;
3727 }
3728
3729 dmu_object_set_checksum(os, bigobj,
3730 (enum zio_checksum)ztest_random_dsl_prop(ZFS_PROP_CHECKSUM), tx);
3731
3732 dmu_object_set_compress(os, bigobj,
3733 (enum zio_compress)ztest_random_dsl_prop(ZFS_PROP_COMPRESSION), tx);
3734
3735 /*
3736 * For each index from n to n + s, verify that the existing bufwad
3737 * in packobj matches the bufwads at the head and tail of the
3738 * corresponding chunk in bigobj. Then update all three bufwads
3739 * with the new values we want to write out.
3740 */
3741 for (i = 0; i < s; i++) {
3742 /* LINTED */
3743 pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t));
3744 /* LINTED */
3745 bigH = (bufwad_t *)((char *)bigbuf + i * chunksize);
3746 /* LINTED */
3747 bigT = (bufwad_t *)((char *)bigH + chunksize) - 1;
3748
3749 ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize);
3750 ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize);
3751
3752 if (pack->bw_txg > txg)
3753 fatal(0, "future leak: got %llx, open txg is %llx",
3754 pack->bw_txg, txg);
3755
3756 if (pack->bw_data != 0 && pack->bw_index != n + i)
3757 fatal(0, "wrong index: got %llx, wanted %llx+%llx",
3758 pack->bw_index, n, i);
3759
3760 if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0)
3761 fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH);
3762
3763 if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0)
3764 fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT);
3765
3766 if (freeit) {
3767 bzero(pack, sizeof (bufwad_t));
3768 } else {
3769 pack->bw_index = n + i;
3770 pack->bw_txg = txg;
3771 pack->bw_data = 1 + ztest_random(-2ULL);
3772 }
3773 *bigH = *pack;
3774 *bigT = *pack;
3775 }
3776
3777 /*
3778 * We've verified all the old bufwads, and made new ones.
3779 * Now write them out.
3780 */
3781 dmu_write(os, packobj, packoff, packsize, packbuf, tx);
3782
3783 if (freeit) {
3784 if (ztest_opts.zo_verbose >= 7) {
3785 (void) printf("freeing offset %llx size %llx"
3786 " txg %llx\n",
3787 (u_longlong_t)bigoff,
3788 (u_longlong_t)bigsize,
3789 (u_longlong_t)txg);
3790 }
3791 VERIFY(0 == dmu_free_range(os, bigobj, bigoff, bigsize, tx));
3792 } else {
3793 if (ztest_opts.zo_verbose >= 7) {
3794 (void) printf("writing offset %llx size %llx"
3795 " txg %llx\n",
3796 (u_longlong_t)bigoff,
3797 (u_longlong_t)bigsize,
3798 (u_longlong_t)txg);
3799 }
3800 dmu_write(os, bigobj, bigoff, bigsize, bigbuf, tx);
3801 }
3802
3803 dmu_tx_commit(tx);
3804
3805 /*
3806 * Sanity check the stuff we just wrote.
3807 */
3808 {
3809 void *packcheck = umem_alloc(packsize, UMEM_NOFAIL);
3810 void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL);
3811
3812 VERIFY(0 == dmu_read(os, packobj, packoff,
3813 packsize, packcheck, DMU_READ_PREFETCH));
3814 VERIFY(0 == dmu_read(os, bigobj, bigoff,
3815 bigsize, bigcheck, DMU_READ_PREFETCH));
3816
3817 ASSERT(bcmp(packbuf, packcheck, packsize) == 0);
3818 ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0);
3819
3820 umem_free(packcheck, packsize);
3821 umem_free(bigcheck, bigsize);
3822 }
3823
3824 umem_free(packbuf, packsize);
3825 umem_free(bigbuf, bigsize);
3826 umem_free(od, size);
3827 }
3828
3829 void
3830 compare_and_update_pbbufs(uint64_t s, bufwad_t *packbuf, bufwad_t *bigbuf,
3831 uint64_t bigsize, uint64_t n, uint64_t chunksize, uint64_t txg)
3832 {
3833 uint64_t i;
3834 bufwad_t *pack;
3835 bufwad_t *bigH;
3836 bufwad_t *bigT;
3837
3838 /*
3839 * For each index from n to n + s, verify that the existing bufwad
3840 * in packobj matches the bufwads at the head and tail of the
3841 * corresponding chunk in bigobj. Then update all three bufwads
3842 * with the new values we want to write out.
3843 */
3844 for (i = 0; i < s; i++) {
3845 /* LINTED */
3846 pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t));
3847 /* LINTED */
3848 bigH = (bufwad_t *)((char *)bigbuf + i * chunksize);
3849 /* LINTED */
3850 bigT = (bufwad_t *)((char *)bigH + chunksize) - 1;
3851
3852 ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize);
3853 ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize);
3854
3855 if (pack->bw_txg > txg)
3856 fatal(0, "future leak: got %llx, open txg is %llx",
3857 pack->bw_txg, txg);
3858
3859 if (pack->bw_data != 0 && pack->bw_index != n + i)
3860 fatal(0, "wrong index: got %llx, wanted %llx+%llx",
3861 pack->bw_index, n, i);
3862
3863 if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0)
3864 fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH);
3865
3866 if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0)
3867 fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT);
3868
3869 pack->bw_index = n + i;
3870 pack->bw_txg = txg;
3871 pack->bw_data = 1 + ztest_random(-2ULL);
3872
3873 *bigH = *pack;
3874 *bigT = *pack;
3875 }
3876 }
3877
3878 #undef OD_ARRAY_SIZE
3879 #define OD_ARRAY_SIZE 2
3880
3881 void
3882 ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id)
3883 {
3884 objset_t *os = zd->zd_os;
3885 ztest_od_t *od;
3886 dmu_tx_t *tx;
3887 uint64_t i;
3888 int error;
3889 int size;
3890 uint64_t n, s, txg;
3891 bufwad_t *packbuf, *bigbuf;
3892 uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize;
3893 uint64_t blocksize = ztest_random_blocksize();
3894 uint64_t chunksize = blocksize;
3895 uint64_t regions = 997;
3896 uint64_t stride = 123456789ULL;
3897 uint64_t width = 9;
3898 dmu_buf_t *bonus_db;
3899 arc_buf_t **bigbuf_arcbufs;
3900 dmu_object_info_t doi;
3901
3902 size = sizeof(ztest_od_t) * OD_ARRAY_SIZE;
3903 od = umem_alloc(size, UMEM_NOFAIL);
3904
3905 /*
3906 * This test uses two objects, packobj and bigobj, that are always
3907 * updated together (i.e. in the same tx) so that their contents are
3908 * in sync and can be compared. Their contents relate to each other
3909 * in a simple way: packobj is a dense array of 'bufwad' structures,
3910 * while bigobj is a sparse array of the same bufwads. Specifically,
3911 * for any index n, there are three bufwads that should be identical:
3912 *
3913 * packobj, at offset n * sizeof (bufwad_t)
3914 * bigobj, at the head of the nth chunk
3915 * bigobj, at the tail of the nth chunk
3916 *
3917 * The chunk size is set equal to bigobj block size so that
3918 * dmu_assign_arcbuf() can be tested for object updates.
3919 */
3920
3921 /*
3922 * Read the directory info. If it's the first time, set things up.
3923 */
3924 ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0);
3925 ztest_od_init(od + 1, id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize);
3926
3927
3928 if (ztest_object_init(zd, od, size, B_FALSE) != 0) {
3929 umem_free(od, size);
3930 return;
3931 }
3932
3933 bigobj = od[0].od_object;
3934 packobj = od[1].od_object;
3935 blocksize = od[0].od_blocksize;
3936 chunksize = blocksize;
3937 ASSERT(chunksize == od[1].od_gen);
3938
3939 VERIFY(dmu_object_info(os, bigobj, &doi) == 0);
3940 VERIFY(ISP2(doi.doi_data_block_size));
3941 VERIFY(chunksize == doi.doi_data_block_size);
3942 VERIFY(chunksize >= 2 * sizeof (bufwad_t));
3943
3944 /*
3945 * Pick a random index and compute the offsets into packobj and bigobj.
3946 */
3947 n = ztest_random(regions) * stride + ztest_random(width);
3948 s = 1 + ztest_random(width - 1);
3949
3950 packoff = n * sizeof (bufwad_t);
3951 packsize = s * sizeof (bufwad_t);
3952
3953 bigoff = n * chunksize;
3954 bigsize = s * chunksize;
3955
3956 packbuf = umem_zalloc(packsize, UMEM_NOFAIL);
3957 bigbuf = umem_zalloc(bigsize, UMEM_NOFAIL);
3958
3959 VERIFY3U(0, ==, dmu_bonus_hold(os, bigobj, FTAG, &bonus_db));
3960
3961 bigbuf_arcbufs = umem_zalloc(2 * s * sizeof (arc_buf_t *), UMEM_NOFAIL);
3962
3963 /*
3964 * Iteration 0 test zcopy for DB_UNCACHED dbufs.
3965 * Iteration 1 test zcopy to already referenced dbufs.
3966 * Iteration 2 test zcopy to dirty dbuf in the same txg.
3967 * Iteration 3 test zcopy to dbuf dirty in previous txg.
3968 * Iteration 4 test zcopy when dbuf is no longer dirty.
3969 * Iteration 5 test zcopy when it can't be done.
3970 * Iteration 6 one more zcopy write.
3971 */
3972 for (i = 0; i < 7; i++) {
3973 uint64_t j;
3974 uint64_t off;
3975
3976 /*
3977 * In iteration 5 (i == 5) use arcbufs
3978 * that don't match bigobj blksz to test
3979 * dmu_assign_arcbuf() when it can't directly
3980 * assign an arcbuf to a dbuf.
3981 */
3982 for (j = 0; j < s; j++) {
3983 if (i != 5) {
3984 bigbuf_arcbufs[j] =
3985 dmu_request_arcbuf(bonus_db, chunksize);
3986 } else {
3987 bigbuf_arcbufs[2 * j] =
3988 dmu_request_arcbuf(bonus_db, chunksize / 2);
3989 bigbuf_arcbufs[2 * j + 1] =
3990 dmu_request_arcbuf(bonus_db, chunksize / 2);
3991 }
3992 }
3993
3994 /*
3995 * Get a tx for the mods to both packobj and bigobj.
3996 */
3997 tx = dmu_tx_create(os);
3998
3999 dmu_tx_hold_write(tx, packobj, packoff, packsize);
4000 dmu_tx_hold_write(tx, bigobj, bigoff, bigsize);
4001
4002 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4003 if (txg == 0) {
4004 umem_free(packbuf, packsize);
4005 umem_free(bigbuf, bigsize);
4006 for (j = 0; j < s; j++) {
4007 if (i != 5) {
4008 dmu_return_arcbuf(bigbuf_arcbufs[j]);
4009 } else {
4010 dmu_return_arcbuf(
4011 bigbuf_arcbufs[2 * j]);
4012 dmu_return_arcbuf(
4013 bigbuf_arcbufs[2 * j + 1]);
4014 }
4015 }
4016 umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *));
4017 umem_free(od, size);
4018 dmu_buf_rele(bonus_db, FTAG);
4019 return;
4020 }
4021
4022 /*
4023 * 50% of the time don't read objects in the 1st iteration to
4024 * test dmu_assign_arcbuf() for the case when there're no
4025 * existing dbufs for the specified offsets.
4026 */
4027 if (i != 0 || ztest_random(2) != 0) {
4028 error = dmu_read(os, packobj, packoff,
4029 packsize, packbuf, DMU_READ_PREFETCH);
4030 ASSERT0(error);
4031 error = dmu_read(os, bigobj, bigoff, bigsize,
4032 bigbuf, DMU_READ_PREFETCH);
4033 ASSERT0(error);
4034 }
4035 compare_and_update_pbbufs(s, packbuf, bigbuf, bigsize,
4036 n, chunksize, txg);
4037
4038 /*
4039 * We've verified all the old bufwads, and made new ones.
4040 * Now write them out.
4041 */
4042 dmu_write(os, packobj, packoff, packsize, packbuf, tx);
4043 if (ztest_opts.zo_verbose >= 7) {
4044 (void) printf("writing offset %llx size %llx"
4045 " txg %llx\n",
4046 (u_longlong_t)bigoff,
4047 (u_longlong_t)bigsize,
4048 (u_longlong_t)txg);
4049 }
4050 for (off = bigoff, j = 0; j < s; j++, off += chunksize) {
4051 dmu_buf_t *dbt;
4052 if (i != 5) {
4053 bcopy((caddr_t)bigbuf + (off - bigoff),
4054 bigbuf_arcbufs[j]->b_data, chunksize);
4055 } else {
4056 bcopy((caddr_t)bigbuf + (off - bigoff),
4057 bigbuf_arcbufs[2 * j]->b_data,
4058 chunksize / 2);
4059 bcopy((caddr_t)bigbuf + (off - bigoff) +
4060 chunksize / 2,
4061 bigbuf_arcbufs[2 * j + 1]->b_data,
4062 chunksize / 2);
4063 }
4064
4065 if (i == 1) {
4066 VERIFY(dmu_buf_hold(os, bigobj, off,
4067 FTAG, &dbt, DMU_READ_NO_PREFETCH) == 0);
4068 }
4069 if (i != 5) {
4070 dmu_assign_arcbuf(bonus_db, off,
4071 bigbuf_arcbufs[j], tx);
4072 } else {
4073 dmu_assign_arcbuf(bonus_db, off,
4074 bigbuf_arcbufs[2 * j], tx);
4075 dmu_assign_arcbuf(bonus_db,
4076 off + chunksize / 2,
4077 bigbuf_arcbufs[2 * j + 1], tx);
4078 }
4079 if (i == 1) {
4080 dmu_buf_rele(dbt, FTAG);
4081 }
4082 }
4083 dmu_tx_commit(tx);
4084
4085 /*
4086 * Sanity check the stuff we just wrote.
4087 */
4088 {
4089 void *packcheck = umem_alloc(packsize, UMEM_NOFAIL);
4090 void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL);
4091
4092 VERIFY(0 == dmu_read(os, packobj, packoff,
4093 packsize, packcheck, DMU_READ_PREFETCH));
4094 VERIFY(0 == dmu_read(os, bigobj, bigoff,
4095 bigsize, bigcheck, DMU_READ_PREFETCH));
4096
4097 ASSERT(bcmp(packbuf, packcheck, packsize) == 0);
4098 ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0);
4099
4100 umem_free(packcheck, packsize);
4101 umem_free(bigcheck, bigsize);
4102 }
4103 if (i == 2) {
4104 txg_wait_open(dmu_objset_pool(os), 0);
4105 } else if (i == 3) {
4106 txg_wait_synced(dmu_objset_pool(os), 0);
4107 }
4108 }
4109
4110 dmu_buf_rele(bonus_db, FTAG);
4111 umem_free(packbuf, packsize);
4112 umem_free(bigbuf, bigsize);
4113 umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *));
4114 umem_free(od, size);
4115 }
4116
4117 /* ARGSUSED */
4118 void
4119 ztest_dmu_write_parallel(ztest_ds_t *zd, uint64_t id)
4120 {
4121 ztest_od_t *od;
4122
4123 od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL);
4124 uint64_t offset = (1ULL << (ztest_random(20) + 43)) +
4125 (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
4126
4127 /*
4128 * Have multiple threads write to large offsets in an object
4129 * to verify that parallel writes to an object -- even to the
4130 * same blocks within the object -- doesn't cause any trouble.
4131 */
4132 ztest_od_init(od, ID_PARALLEL, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0);
4133
4134 if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0)
4135 return;
4136
4137 while (ztest_random(10) != 0)
4138 ztest_io(zd, od->od_object, offset);
4139
4140 umem_free(od, sizeof(ztest_od_t));
4141 }
4142
4143 void
4144 ztest_dmu_prealloc(ztest_ds_t *zd, uint64_t id)
4145 {
4146 ztest_od_t *od;
4147 uint64_t offset = (1ULL << (ztest_random(4) + SPA_MAXBLOCKSHIFT)) +
4148 (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
4149 uint64_t count = ztest_random(20) + 1;
4150 uint64_t blocksize = ztest_random_blocksize();
4151 void *data;
4152
4153 od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL);
4154
4155 ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0);
4156
4157 if (ztest_object_init(zd, od, sizeof (ztest_od_t), !ztest_random(2)) != 0) {
4158 umem_free(od, sizeof(ztest_od_t));
4159 return;
4160 }
4161
4162 if (ztest_truncate(zd, od->od_object, offset, count * blocksize) != 0) {
4163 umem_free(od, sizeof(ztest_od_t));
4164 return;
4165 }
4166
4167 ztest_prealloc(zd, od->od_object, offset, count * blocksize);
4168
4169 data = umem_zalloc(blocksize, UMEM_NOFAIL);
4170
4171 while (ztest_random(count) != 0) {
4172 uint64_t randoff = offset + (ztest_random(count) * blocksize);
4173 if (ztest_write(zd, od->od_object, randoff, blocksize,
4174 data) != 0)
4175 break;
4176 while (ztest_random(4) != 0)
4177 ztest_io(zd, od->od_object, randoff);
4178 }
4179
4180 umem_free(data, blocksize);
4181 umem_free(od, sizeof(ztest_od_t));
4182 }
4183
4184 /*
4185 * Verify that zap_{create,destroy,add,remove,update} work as expected.
4186 */
4187 #define ZTEST_ZAP_MIN_INTS 1
4188 #define ZTEST_ZAP_MAX_INTS 4
4189 #define ZTEST_ZAP_MAX_PROPS 1000
4190
4191 void
4192 ztest_zap(ztest_ds_t *zd, uint64_t id)
4193 {
4194 objset_t *os = zd->zd_os;
4195 ztest_od_t *od;
4196 uint64_t object;
4197 uint64_t txg, last_txg;
4198 uint64_t value[ZTEST_ZAP_MAX_INTS];
4199 uint64_t zl_ints, zl_intsize, prop;
4200 int i, ints;
4201 dmu_tx_t *tx;
4202 char propname[100], txgname[100];
4203 int error;
4204 char *hc[2] = { "s.acl.h", ".s.open.h.hyLZlg" };
4205
4206 od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL);
4207 ztest_od_init(od, id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0);
4208
4209 if (ztest_object_init(zd, od, sizeof (ztest_od_t),
4210 !ztest_random(2)) != 0)
4211 goto out;
4212
4213 object = od->od_object;
4214
4215 /*
4216 * Generate a known hash collision, and verify that
4217 * we can lookup and remove both entries.
4218 */
4219 tx = dmu_tx_create(os);
4220 dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
4221 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4222 if (txg == 0)
4223 goto out;
4224 for (i = 0; i < 2; i++) {
4225 value[i] = i;
4226 VERIFY3U(0, ==, zap_add(os, object, hc[i], sizeof (uint64_t),
4227 1, &value[i], tx));
4228 }
4229 for (i = 0; i < 2; i++) {
4230 VERIFY3U(EEXIST, ==, zap_add(os, object, hc[i],
4231 sizeof (uint64_t), 1, &value[i], tx));
4232 VERIFY3U(0, ==,
4233 zap_length(os, object, hc[i], &zl_intsize, &zl_ints));
4234 ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
4235 ASSERT3U(zl_ints, ==, 1);
4236 }
4237 for (i = 0; i < 2; i++) {
4238 VERIFY3U(0, ==, zap_remove(os, object, hc[i], tx));
4239 }
4240 dmu_tx_commit(tx);
4241
4242 /*
4243 * Generate a buch of random entries.
4244 */
4245 ints = MAX(ZTEST_ZAP_MIN_INTS, object % ZTEST_ZAP_MAX_INTS);
4246
4247 prop = ztest_random(ZTEST_ZAP_MAX_PROPS);
4248 (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop);
4249 (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop);
4250 bzero(value, sizeof (value));
4251 last_txg = 0;
4252
4253 /*
4254 * If these zap entries already exist, validate their contents.
4255 */
4256 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints);
4257 if (error == 0) {
4258 ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
4259 ASSERT3U(zl_ints, ==, 1);
4260
4261 VERIFY(zap_lookup(os, object, txgname, zl_intsize,
4262 zl_ints, &last_txg) == 0);
4263
4264 VERIFY(zap_length(os, object, propname, &zl_intsize,
4265 &zl_ints) == 0);
4266
4267 ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
4268 ASSERT3U(zl_ints, ==, ints);
4269
4270 VERIFY(zap_lookup(os, object, propname, zl_intsize,
4271 zl_ints, value) == 0);
4272
4273 for (i = 0; i < ints; i++) {
4274 ASSERT3U(value[i], ==, last_txg + object + i);
4275 }
4276 } else {
4277 ASSERT3U(error, ==, ENOENT);
4278 }
4279
4280 /*
4281 * Atomically update two entries in our zap object.
4282 * The first is named txg_%llu, and contains the txg
4283 * in which the property was last updated. The second
4284 * is named prop_%llu, and the nth element of its value
4285 * should be txg + object + n.
4286 */
4287 tx = dmu_tx_create(os);
4288 dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
4289 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4290 if (txg == 0)
4291 goto out;
4292
4293 if (last_txg > txg)
4294 fatal(0, "zap future leak: old %llu new %llu", last_txg, txg);
4295
4296 for (i = 0; i < ints; i++)
4297 value[i] = txg + object + i;
4298
4299 VERIFY3U(0, ==, zap_update(os, object, txgname, sizeof (uint64_t),
4300 1, &txg, tx));
4301 VERIFY3U(0, ==, zap_update(os, object, propname, sizeof (uint64_t),
4302 ints, value, tx));
4303
4304 dmu_tx_commit(tx);
4305
4306 /*
4307 * Remove a random pair of entries.
4308 */
4309 prop = ztest_random(ZTEST_ZAP_MAX_PROPS);
4310 (void) sprintf(propname, "prop_%llu", (u_longlong_t)prop);
4311 (void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop);
4312
4313 error = zap_length(os, object, txgname, &zl_intsize, &zl_ints);
4314
4315 if (error == ENOENT)
4316 goto out;
4317
4318 ASSERT0(error);
4319
4320 tx = dmu_tx_create(os);
4321 dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
4322 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4323 if (txg == 0)
4324 goto out;
4325 VERIFY3U(0, ==, zap_remove(os, object, txgname, tx));
4326 VERIFY3U(0, ==, zap_remove(os, object, propname, tx));
4327 dmu_tx_commit(tx);
4328 out:
4329 umem_free(od, sizeof(ztest_od_t));
4330 }
4331
4332 /*
4333 * Testcase to test the upgrading of a microzap to fatzap.
4334 */
4335 void
4336 ztest_fzap(ztest_ds_t *zd, uint64_t id)
4337 {
4338 objset_t *os = zd->zd_os;
4339 ztest_od_t *od;
4340 uint64_t object, txg;
4341 int i;
4342
4343 od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL);
4344 ztest_od_init(od, id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0);
4345
4346 if (ztest_object_init(zd, od, sizeof (ztest_od_t),
4347 !ztest_random(2)) != 0)
4348 goto out;
4349 object = od->od_object;
4350
4351 /*
4352 * Add entries to this ZAP and make sure it spills over
4353 * and gets upgraded to a fatzap. Also, since we are adding
4354 * 2050 entries we should see ptrtbl growth and leaf-block split.
4355 */
4356 for (i = 0; i < 2050; i++) {
4357 char name[MAXNAMELEN];
4358 uint64_t value = i;
4359 dmu_tx_t *tx;
4360 int error;
4361
4362 (void) snprintf(name, sizeof (name), "fzap-%llu-%llu",
4363 (u_longlong_t)id, (u_longlong_t)value);
4364
4365 tx = dmu_tx_create(os);
4366 dmu_tx_hold_zap(tx, object, B_TRUE, name);
4367 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4368 if (txg == 0)
4369 goto out;
4370 error = zap_add(os, object, name, sizeof (uint64_t), 1,
4371 &value, tx);
4372 ASSERT(error == 0 || error == EEXIST);
4373 dmu_tx_commit(tx);
4374 }
4375 out:
4376 umem_free(od, sizeof(ztest_od_t));
4377 }
4378
4379 /* ARGSUSED */
4380 void
4381 ztest_zap_parallel(ztest_ds_t *zd, uint64_t id)
4382 {
4383 objset_t *os = zd->zd_os;
4384 ztest_od_t *od;
4385 uint64_t txg, object, count, wsize, wc, zl_wsize, zl_wc;
4386 dmu_tx_t *tx;
4387 int i, namelen, error;
4388 int micro = ztest_random(2);
4389 char name[20], string_value[20];
4390 void *data;
4391
4392 od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL);
4393 ztest_od_init(od, ID_PARALLEL, FTAG, micro, DMU_OT_ZAP_OTHER, 0, 0);
4394
4395 if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0) {
4396 umem_free(od, sizeof(ztest_od_t));
4397 return;
4398 }
4399
4400 object = od->od_object;
4401
4402 /*
4403 * Generate a random name of the form 'xxx.....' where each
4404 * x is a random printable character and the dots are dots.
4405 * There are 94 such characters, and the name length goes from
4406 * 6 to 20, so there are 94^3 * 15 = 12,458,760 possible names.
4407 */
4408 namelen = ztest_random(sizeof (name) - 5) + 5 + 1;
4409
4410 for (i = 0; i < 3; i++)
4411 name[i] = '!' + ztest_random('~' - '!' + 1);
4412 for (; i < namelen - 1; i++)
4413 name[i] = '.';
4414 name[i] = '\0';
4415
4416 if ((namelen & 1) || micro) {
4417 wsize = sizeof (txg);
4418 wc = 1;
4419 data = &txg;
4420 } else {
4421 wsize = 1;
4422 wc = namelen;
4423 data = string_value;
4424 }
4425
4426 count = -1ULL;
4427 VERIFY0(zap_count(os, object, &count));
4428 ASSERT(count != -1ULL);
4429
4430 /*
4431 * Select an operation: length, lookup, add, update, remove.
4432 */
4433 i = ztest_random(5);
4434
4435 if (i >= 2) {
4436 tx = dmu_tx_create(os);
4437 dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
4438 txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
4439 if (txg == 0)
4440 return;
4441 bcopy(name, string_value, namelen);
4442 } else {
4443 tx = NULL;
4444 txg = 0;
4445 bzero(string_value, namelen);
4446 }
4447
4448 switch (i) {
4449
4450 case 0:
4451 error = zap_length(os, object, name, &zl_wsize, &zl_wc);
4452 if (error == 0) {
4453 ASSERT3U(wsize, ==, zl_wsize);
4454 ASSERT3U(wc, ==, zl_wc);
4455 } else {
4456 ASSERT3U(error, ==, ENOENT);
4457 }
4458 break;
4459
4460 case 1:
4461 error = zap_lookup(os, object, name, wsize, wc, data);
4462 if (error == 0) {
4463 if (data == string_value &&
4464 bcmp(name, data, namelen) != 0)
4465 fatal(0, "name '%s' != val '%s' len %d",
4466 name, data, namelen);
4467 } else {
4468 ASSERT3U(error, ==, ENOENT);
4469 }
4470 break;
4471
4472 case 2:
4473 error = zap_add(os, object, name, wsize, wc, data, tx);
4474 ASSERT(error == 0 || error == EEXIST);
4475 break;
4476
4477 case 3:
4478 VERIFY(zap_update(os, object, name, wsize, wc, data, tx) == 0);
4479 break;
4480
4481 case 4:
4482 error = zap_remove(os, object, name, tx);
4483 ASSERT(error == 0 || error == ENOENT);
4484 break;
4485 }
4486
4487 if (tx != NULL)
4488 dmu_tx_commit(tx);
4489
4490 umem_free(od, sizeof(ztest_od_t));
4491 }
4492
4493 /*
4494 * Commit callback data.
4495 */
4496 typedef struct ztest_cb_data {
4497 list_node_t zcd_node;
4498 uint64_t zcd_txg;
4499 int zcd_expected_err;
4500 boolean_t zcd_added;
4501 boolean_t zcd_called;
4502 spa_t *zcd_spa;
4503 } ztest_cb_data_t;
4504
4505 /* This is the actual commit callback function */
4506 static void
4507 ztest_commit_callback(void *arg, int error)
4508 {
4509 ztest_cb_data_t *data = arg;
4510 uint64_t synced_txg;
4511
4512 VERIFY(data != NULL);
4513 VERIFY3S(data->zcd_expected_err, ==, error);
4514 VERIFY(!data->zcd_called);
4515
4516 synced_txg = spa_last_synced_txg(data->zcd_spa);
4517 if (data->zcd_txg > synced_txg)
4518 fatal(0, "commit callback of txg %" PRIu64 " called prematurely"
4519 ", last synced txg = %" PRIu64 "\n", data->zcd_txg,
4520 synced_txg);
4521
4522 data->zcd_called = B_TRUE;
4523
4524 if (error == ECANCELED) {
4525 ASSERT0(data->zcd_txg);
4526 ASSERT(!data->zcd_added);
4527
4528 /*
4529 * The private callback data should be destroyed here, but
4530 * since we are going to check the zcd_called field after
4531 * dmu_tx_abort(), we will destroy it there.
4532 */
4533 return;
4534 }
4535
4536 ASSERT(data->zcd_added);
4537 ASSERT3U(data->zcd_txg, !=, 0);
4538
4539 (void) mutex_enter(&zcl.zcl_callbacks_lock);
4540
4541 /* See if this cb was called more quickly */
4542 if ((synced_txg - data->zcd_txg) < zc_min_txg_delay)
4543 zc_min_txg_delay = synced_txg - data->zcd_txg;
4544
4545 /* Remove our callback from the list */
4546 list_remove(&zcl.zcl_callbacks, data);
4547
4548 (void) mutex_exit(&zcl.zcl_callbacks_lock);
4549
4550 umem_free(data, sizeof (ztest_cb_data_t));
4551 }
4552
4553 /* Allocate and initialize callback data structure */
4554 static ztest_cb_data_t *
4555 ztest_create_cb_data(objset_t *os, uint64_t txg)
4556 {
4557 ztest_cb_data_t *cb_data;
4558
4559 cb_data = umem_zalloc(sizeof (ztest_cb_data_t), UMEM_NOFAIL);
4560
4561 cb_data->zcd_txg = txg;
4562 cb_data->zcd_spa = dmu_objset_spa(os);
4563 list_link_init(&cb_data->zcd_node);
4564
4565 return (cb_data);
4566 }
4567
4568 /*
4569 * Commit callback test.
4570 */
4571 void
4572 ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id)
4573 {
4574 objset_t *os = zd->zd_os;
4575 ztest_od_t *od;
4576 dmu_tx_t *tx;
4577 ztest_cb_data_t *cb_data[3], *tmp_cb;
4578 uint64_t old_txg, txg;
4579 int i, error = 0;
4580
4581 od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL);
4582 ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0);
4583
4584 if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0) {
4585 umem_free(od, sizeof(ztest_od_t));
4586 return;
4587 }
4588
4589 tx = dmu_tx_create(os);
4590
4591 cb_data[0] = ztest_create_cb_data(os, 0);
4592 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[0]);
4593
4594 dmu_tx_hold_write(tx, od->od_object, 0, sizeof (uint64_t));
4595
4596 /* Every once in a while, abort the transaction on purpose */
4597 if (ztest_random(100) == 0)
4598 error = -1;
4599
4600 if (!error)
4601 error = dmu_tx_assign(tx, TXG_NOWAIT);
4602
4603 txg = error ? 0 : dmu_tx_get_txg(tx);
4604
4605 cb_data[0]->zcd_txg = txg;
4606 cb_data[1] = ztest_create_cb_data(os, txg);
4607 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[1]);
4608
4609 if (error) {
4610 /*
4611 * It's not a strict requirement to call the registered
4612 * callbacks from inside dmu_tx_abort(), but that's what
4613 * it's supposed to happen in the current implementation
4614 * so we will check for that.
4615 */
4616 for (i = 0; i < 2; i++) {
4617 cb_data[i]->zcd_expected_err = ECANCELED;
4618 VERIFY(!cb_data[i]->zcd_called);
4619 }
4620
4621 dmu_tx_abort(tx);
4622
4623 for (i = 0; i < 2; i++) {
4624 VERIFY(cb_data[i]->zcd_called);
4625 umem_free(cb_data[i], sizeof (ztest_cb_data_t));
4626 }
4627
4628 umem_free(od, sizeof(ztest_od_t));
4629 return;
4630 }
4631
4632 cb_data[2] = ztest_create_cb_data(os, txg);
4633 dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[2]);
4634
4635 /*
4636 * Read existing data to make sure there isn't a future leak.
4637 */
4638 VERIFY(0 == dmu_read(os, od->od_object, 0, sizeof (uint64_t),
4639 &old_txg, DMU_READ_PREFETCH));
4640
4641 if (old_txg > txg)
4642 fatal(0, "future leak: got %" PRIu64 ", open txg is %" PRIu64,
4643 old_txg, txg);
4644
4645 dmu_write(os, od->od_object, 0, sizeof (uint64_t), &txg, tx);
4646
4647 (void) mutex_enter(&zcl.zcl_callbacks_lock);
4648
4649 /*
4650 * Since commit callbacks don't have any ordering requirement and since
4651 * it is theoretically possible for a commit callback to be called
4652 * after an arbitrary amount of time has elapsed since its txg has been
4653 * synced, it is difficult to reliably determine whether a commit
4654 * callback hasn't been called due to high load or due to a flawed
4655 * implementation.
4656 *
4657 * In practice, we will assume that if after a certain number of txgs a
4658 * commit callback hasn't been called, then most likely there's an
4659 * implementation bug..
4660 */
4661 tmp_cb = list_head(&zcl.zcl_callbacks);
4662 if (tmp_cb != NULL &&
4663 tmp_cb->zcd_txg + ZTEST_COMMIT_CB_THRESH < txg) {
4664 fatal(0, "Commit callback threshold exceeded, oldest txg: %"
4665 PRIu64 ", open txg: %" PRIu64 "\n", tmp_cb->zcd_txg, txg);
4666 }
4667
4668 /*
4669 * Let's find the place to insert our callbacks.
4670 *
4671 * Even though the list is ordered by txg, it is possible for the
4672 * insertion point to not be the end because our txg may already be
4673 * quiescing at this point and other callbacks in the open txg
4674 * (from other objsets) may have sneaked in.
4675 */
4676 tmp_cb = list_tail(&zcl.zcl_callbacks);
4677 while (tmp_cb != NULL && tmp_cb->zcd_txg > txg)
4678 tmp_cb = list_prev(&zcl.zcl_callbacks, tmp_cb);
4679
4680 /* Add the 3 callbacks to the list */
4681 for (i = 0; i < 3; i++) {
4682 if (tmp_cb == NULL)
4683 list_insert_head(&zcl.zcl_callbacks, cb_data[i]);
4684 else
4685 list_insert_after(&zcl.zcl_callbacks, tmp_cb,
4686 cb_data[i]);
4687
4688 cb_data[i]->zcd_added = B_TRUE;
4689 VERIFY(!cb_data[i]->zcd_called);
4690
4691 tmp_cb = cb_data[i];
4692 }
4693
4694 zc_cb_counter += 3;
4695
4696 (void) mutex_exit(&zcl.zcl_callbacks_lock);
4697
4698 dmu_tx_commit(tx);
4699
4700 umem_free(od, sizeof(ztest_od_t));
4701 }
4702
4703 /* ARGSUSED */
4704 void
4705 ztest_dsl_prop_get_set(ztest_ds_t *zd, uint64_t id)
4706 {
4707 zfs_prop_t proplist[] = {
4708 ZFS_PROP_CHECKSUM,
4709 ZFS_PROP_COMPRESSION,
4710 ZFS_PROP_COPIES,
4711 ZFS_PROP_DEDUP
4712 };
4713 int p;
4714
4715 (void) rw_enter(&ztest_name_lock, RW_READER);
4716
4717 for (p = 0; p < sizeof (proplist) / sizeof (proplist[0]); p++)
4718 (void) ztest_dsl_prop_set_uint64(zd->zd_name, proplist[p],
4719 ztest_random_dsl_prop(proplist[p]), (int)ztest_random(2));
4720
4721 (void) rw_exit(&ztest_name_lock);
4722 }
4723
4724 /* ARGSUSED */
4725 void
4726 ztest_spa_prop_get_set(ztest_ds_t *zd, uint64_t id)
4727 {
4728 nvlist_t *props = NULL;
4729
4730 (void) rw_enter(&ztest_name_lock, RW_READER);
4731
4732 (void) ztest_spa_prop_set_uint64(ZPOOL_PROP_DEDUPDITTO,
4733 ZIO_DEDUPDITTO_MIN + ztest_random(ZIO_DEDUPDITTO_MIN));
4734
4735 VERIFY0(spa_prop_get(ztest_spa, &props));
4736
4737 if (ztest_opts.zo_verbose >= 6)
4738 dump_nvlist(props, 4);
4739
4740 nvlist_free(props);
4741
4742 (void) rw_exit(&ztest_name_lock);
4743 }
4744
4745 static int
4746 user_release_one(const char *snapname, const char *holdname)
4747 {
4748 nvlist_t *snaps, *holds;
4749 int error;
4750
4751 snaps = fnvlist_alloc();
4752 holds = fnvlist_alloc();
4753 fnvlist_add_boolean(holds, holdname);
4754 fnvlist_add_nvlist(snaps, snapname, holds);
4755 fnvlist_free(holds);
4756 error = dsl_dataset_user_release(snaps, NULL);
4757 fnvlist_free(snaps);
4758 return (error);
4759 }
4760
4761 /*
4762 * Test snapshot hold/release and deferred destroy.
4763 */
4764 void
4765 ztest_dmu_snapshot_hold(ztest_ds_t *zd, uint64_t id)
4766 {
4767 int error;
4768 objset_t *os = zd->zd_os;
4769 objset_t *origin;
4770 char snapname[100];
4771 char fullname[100];
4772 char clonename[100];
4773 char tag[100];
4774 char osname[MAXNAMELEN];
4775 nvlist_t *holds;
4776
4777 (void) rw_enter(&ztest_name_lock, RW_READER);
4778
4779 dmu_objset_name(os, osname);
4780
4781 (void) snprintf(snapname, sizeof (snapname), "sh1_%llu", (long long unsigned int)id);
4782 (void) snprintf(fullname, sizeof (fullname), "%s@%s", osname, snapname);
4783 (void) snprintf(clonename, sizeof (clonename),
4784 "%s/ch1_%llu", osname, (long long unsigned int)id);
4785 (void) snprintf(tag, sizeof (tag), "tag_%llu", (long long unsigned int)id);
4786
4787 /*
4788 * Clean up from any previous run.
4789 */
4790 error = dsl_destroy_head(clonename);
4791 if (error != ENOENT)
4792 ASSERT0(error);
4793 error = user_release_one(fullname, tag);
4794 if (error != ESRCH && error != ENOENT)
4795 ASSERT0(error);
4796 error = dsl_destroy_snapshot(fullname, B_FALSE);
4797 if (error != ENOENT)
4798 ASSERT0(error);
4799
4800 /*
4801 * Create snapshot, clone it, mark snap for deferred destroy,
4802 * destroy clone, verify snap was also destroyed.
4803 */
4804 error = dmu_objset_snapshot_one(osname, snapname);
4805 if (error) {
4806 if (error == ENOSPC) {
4807 ztest_record_enospc("dmu_objset_snapshot");
4808 goto out;
4809 }
4810 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error);
4811 }
4812
4813 error = dmu_objset_clone(clonename, fullname);
4814 if (error) {
4815 if (error == ENOSPC) {
4816 ztest_record_enospc("dmu_objset_clone");
4817 goto out;
4818 }
4819 fatal(0, "dmu_objset_clone(%s) = %d", clonename, error);
4820 }
4821
4822 error = dsl_destroy_snapshot(fullname, B_TRUE);
4823 if (error) {
4824 fatal(0, "dsl_destroy_snapshot(%s, B_TRUE) = %d",
4825 fullname, error);
4826 }
4827
4828 error = dsl_destroy_head(clonename);
4829 if (error)
4830 fatal(0, "dsl_destroy_head(%s) = %d", clonename, error);
4831
4832 error = dmu_objset_hold(fullname, FTAG, &origin);
4833 if (error != ENOENT)
4834 fatal(0, "dmu_objset_hold(%s) = %d", fullname, error);
4835
4836 /*
4837 * Create snapshot, add temporary hold, verify that we can't
4838 * destroy a held snapshot, mark for deferred destroy,
4839 * release hold, verify snapshot was destroyed.
4840 */
4841 error = dmu_objset_snapshot_one(osname, snapname);
4842 if (error) {
4843 if (error == ENOSPC) {
4844 ztest_record_enospc("dmu_objset_snapshot");
4845 goto out;
4846 }
4847 fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error);
4848 }
4849
4850 holds = fnvlist_alloc();
4851 fnvlist_add_string(holds, fullname, tag);
4852 error = dsl_dataset_user_hold(holds, 0, NULL);
4853 fnvlist_free(holds);
4854
4855 if (error)
4856 fatal(0, "dsl_dataset_user_hold(%s)", fullname, tag);
4857
4858 error = dsl_destroy_snapshot(fullname, B_FALSE);
4859 if (error != EBUSY) {
4860 fatal(0, "dsl_destroy_snapshot(%s, B_FALSE) = %d",
4861 fullname, error);
4862 }
4863
4864 error = dsl_destroy_snapshot(fullname, B_TRUE);
4865 if (error) {
4866 fatal(0, "dsl_destroy_snapshot(%s, B_TRUE) = %d",
4867 fullname, error);
4868 }
4869
4870 error = user_release_one(fullname, tag);
4871 if (error)
4872 fatal(0, "user_release_one(%s, %s) = %d", fullname, tag, error);
4873
4874 VERIFY3U(dmu_objset_hold(fullname, FTAG, &origin), ==, ENOENT);
4875
4876 out:
4877 (void) rw_exit(&ztest_name_lock);
4878 }
4879
4880 /*
4881 * Inject random faults into the on-disk data.
4882 */
4883 /* ARGSUSED */
4884 void
4885 ztest_fault_inject(ztest_ds_t *zd, uint64_t id)
4886 {
4887 ztest_shared_t *zs = ztest_shared;
4888 spa_t *spa = ztest_spa;
4889 int fd;
4890 uint64_t offset;
4891 uint64_t leaves;
4892 uint64_t bad = 0x1990c0ffeedecadeull;
4893 uint64_t top, leaf;
4894 char *path0;
4895 char *pathrand;
4896 size_t fsize;
4897 int bshift = SPA_MAXBLOCKSHIFT + 2; /* don't scrog all labels */
4898 int iters = 1000;
4899 int maxfaults;
4900 int mirror_save;
4901 vdev_t *vd0 = NULL;
4902 uint64_t guid0 = 0;
4903 boolean_t islog = B_FALSE;
4904
4905 path0 = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
4906 pathrand = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
4907
4908 mutex_enter(&ztest_vdev_lock);
4909 maxfaults = MAXFAULTS();
4910 leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz;
4911 mirror_save = zs->zs_mirrors;
4912 mutex_exit(&ztest_vdev_lock);
4913
4914 ASSERT(leaves >= 1);
4915
4916 /*
4917 * We need SCL_STATE here because we're going to look at vd0->vdev_tsd.
4918 */
4919 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
4920
4921 if (ztest_random(2) == 0) {
4922 /*
4923 * Inject errors on a normal data device or slog device.
4924 */
4925 top = ztest_random_vdev_top(spa, B_TRUE);
4926 leaf = ztest_random(leaves) + zs->zs_splits;
4927
4928 /*
4929 * Generate paths to the first leaf in this top-level vdev,
4930 * and to the random leaf we selected. We'll induce transient
4931 * write failures and random online/offline activity on leaf 0,
4932 * and we'll write random garbage to the randomly chosen leaf.
4933 */
4934 (void) snprintf(path0, MAXPATHLEN, ztest_dev_template,
4935 ztest_opts.zo_dir, ztest_opts.zo_pool,
4936 top * leaves + zs->zs_splits);
4937 (void) snprintf(pathrand, MAXPATHLEN, ztest_dev_template,
4938 ztest_opts.zo_dir, ztest_opts.zo_pool,
4939 top * leaves + leaf);
4940
4941 vd0 = vdev_lookup_by_path(spa->spa_root_vdev, path0);
4942 if (vd0 != NULL && vd0->vdev_top->vdev_islog)
4943 islog = B_TRUE;
4944
4945 if (vd0 != NULL && maxfaults != 1) {
4946 /*
4947 * Make vd0 explicitly claim to be unreadable,
4948 * or unwriteable, or reach behind its back
4949 * and close the underlying fd. We can do this if
4950 * maxfaults == 0 because we'll fail and reexecute,
4951 * and we can do it if maxfaults >= 2 because we'll
4952 * have enough redundancy. If maxfaults == 1, the
4953 * combination of this with injection of random data
4954 * corruption below exceeds the pool's fault tolerance.
4955 */
4956 vdev_file_t *vf = vd0->vdev_tsd;
4957
4958 if (vf != NULL && ztest_random(3) == 0) {
4959 (void) close(vf->vf_vnode->v_fd);
4960 vf->vf_vnode->v_fd = -1;
4961 } else if (ztest_random(2) == 0) {
4962 vd0->vdev_cant_read = B_TRUE;
4963 } else {
4964 vd0->vdev_cant_write = B_TRUE;
4965 }
4966 guid0 = vd0->vdev_guid;
4967 }
4968 } else {
4969 /*
4970 * Inject errors on an l2cache device.
4971 */
4972 spa_aux_vdev_t *sav = &spa->spa_l2cache;
4973
4974 if (sav->sav_count == 0) {
4975 spa_config_exit(spa, SCL_STATE, FTAG);
4976 goto out;
4977 }
4978 vd0 = sav->sav_vdevs[ztest_random(sav->sav_count)];
4979 guid0 = vd0->vdev_guid;
4980 (void) strcpy(path0, vd0->vdev_path);
4981 (void) strcpy(pathrand, vd0->vdev_path);
4982
4983 leaf = 0;
4984 leaves = 1;
4985 maxfaults = INT_MAX; /* no limit on cache devices */
4986 }
4987
4988 spa_config_exit(spa, SCL_STATE, FTAG);
4989
4990 /*
4991 * If we can tolerate two or more faults, or we're dealing
4992 * with a slog, randomly online/offline vd0.
4993 */
4994 if ((maxfaults >= 2 || islog) && guid0 != 0) {
4995 if (ztest_random(10) < 6) {
4996 int flags = (ztest_random(2) == 0 ?
4997 ZFS_OFFLINE_TEMPORARY : 0);
4998
4999 /*
5000 * We have to grab the zs_name_lock as writer to
5001 * prevent a race between offlining a slog and
5002 * destroying a dataset. Offlining the slog will
5003 * grab a reference on the dataset which may cause
5004 * dsl_destroy_head() to fail with EBUSY thus
5005 * leaving the dataset in an inconsistent state.
5006 */
5007 if (islog)
5008 (void) rw_enter(&ztest_name_lock,
5009 RW_WRITER);
5010
5011 VERIFY(vdev_offline(spa, guid0, flags) != EBUSY);
5012
5013 if (islog)
5014 (void) rw_exit(&ztest_name_lock);
5015 } else {
5016 /*
5017 * Ideally we would like to be able to randomly
5018 * call vdev_[on|off]line without holding locks
5019 * to force unpredictable failures but the side
5020 * effects of vdev_[on|off]line prevent us from
5021 * doing so. We grab the ztest_vdev_lock here to
5022 * prevent a race between injection testing and
5023 * aux_vdev removal.
5024 */
5025 mutex_enter(&ztest_vdev_lock);
5026 (void) vdev_online(spa, guid0, 0, NULL);
5027 mutex_exit(&ztest_vdev_lock);
5028 }
5029 }
5030
5031 if (maxfaults == 0)
5032 goto out;
5033
5034 /*
5035 * We have at least single-fault tolerance, so inject data corruption.
5036 */
5037 fd = open(pathrand, O_RDWR);
5038
5039 if (fd == -1) /* we hit a gap in the device namespace */
5040 goto out;
5041
5042 fsize = lseek(fd, 0, SEEK_END);
5043
5044 while (--iters != 0) {
5045 offset = ztest_random(fsize / (leaves << bshift)) *
5046 (leaves << bshift) + (leaf << bshift) +
5047 (ztest_random(1ULL << (bshift - 1)) & -8ULL);
5048
5049 if (offset >= fsize)
5050 continue;
5051
5052 mutex_enter(&ztest_vdev_lock);
5053 if (mirror_save != zs->zs_mirrors) {
5054 mutex_exit(&ztest_vdev_lock);
5055 (void) close(fd);
5056 goto out;
5057 }
5058
5059 if (pwrite(fd, &bad, sizeof (bad), offset) != sizeof (bad))
5060 fatal(1, "can't inject bad word at 0x%llx in %s",
5061 offset, pathrand);
5062
5063 mutex_exit(&ztest_vdev_lock);
5064
5065 if (ztest_opts.zo_verbose >= 7)
5066 (void) printf("injected bad word into %s,"
5067 " offset 0x%llx\n", pathrand, (u_longlong_t)offset);
5068 }
5069
5070 (void) close(fd);
5071 out:
5072 umem_free(path0, MAXPATHLEN);
5073 umem_free(pathrand, MAXPATHLEN);
5074 }
5075
5076 /*
5077 * Verify that DDT repair works as expected.
5078 */
5079 void
5080 ztest_ddt_repair(ztest_ds_t *zd, uint64_t id)
5081 {
5082 ztest_shared_t *zs = ztest_shared;
5083 spa_t *spa = ztest_spa;
5084 objset_t *os = zd->zd_os;
5085 ztest_od_t *od;
5086 uint64_t object, blocksize, txg, pattern, psize;
5087 enum zio_checksum checksum = spa_dedup_checksum(spa);
5088 dmu_buf_t *db;
5089 dmu_tx_t *tx;
5090 void *buf;
5091 blkptr_t blk;
5092 int copies = 2 * ZIO_DEDUPDITTO_MIN;
5093 int i;
5094
5095 blocksize = ztest_random_blocksize();
5096 blocksize = MIN(blocksize, 2048); /* because we write so many */
5097
5098 od = umem_alloc(sizeof(ztest_od_t), UMEM_NOFAIL);
5099 ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0);
5100
5101 if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0) {
5102 umem_free(od, sizeof(ztest_od_t));
5103 return;
5104 }
5105
5106 /*
5107 * Take the name lock as writer to prevent anyone else from changing
5108 * the pool and dataset properies we need to maintain during this test.
5109 */
5110 (void) rw_enter(&ztest_name_lock, RW_WRITER);
5111
5112 if (ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_DEDUP, checksum,
5113 B_FALSE) != 0 ||
5114 ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_COPIES, 1,
5115 B_FALSE) != 0) {
5116 (void) rw_exit(&ztest_name_lock);
5117 umem_free(od, sizeof(ztest_od_t));
5118 return;
5119 }
5120
5121 object = od[0].od_object;
5122 blocksize = od[0].od_blocksize;
5123 pattern = zs->zs_guid ^ dmu_objset_fsid_guid(os);
5124
5125 ASSERT(object != 0);
5126
5127 tx = dmu_tx_create(os);
5128 dmu_tx_hold_write(tx, object, 0, copies * blocksize);
5129 txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
5130 if (txg == 0) {
5131 (void) rw_exit(&ztest_name_lock);
5132 umem_free(od, sizeof(ztest_od_t));
5133 return;
5134 }
5135
5136 /*
5137 * Write all the copies of our block.
5138 */
5139 for (i = 0; i < copies; i++) {
5140 uint64_t offset = i * blocksize;
5141 int error = dmu_buf_hold(os, object, offset, FTAG, &db,
5142 DMU_READ_NO_PREFETCH);
5143 if (error != 0) {
5144 fatal(B_FALSE, "dmu_buf_hold(%p, %llu, %llu) = %u",
5145 os, (long long)object, (long long) offset, error);
5146 }
5147 ASSERT(db->db_offset == offset);
5148 ASSERT(db->db_size == blocksize);
5149 ASSERT(ztest_pattern_match(db->db_data, db->db_size, pattern) ||
5150 ztest_pattern_match(db->db_data, db->db_size, 0ULL));
5151 dmu_buf_will_fill(db, tx);
5152 ztest_pattern_set(db->db_data, db->db_size, pattern);
5153 dmu_buf_rele(db, FTAG);
5154 }
5155
5156 dmu_tx_commit(tx);
5157 txg_wait_synced(spa_get_dsl(spa), txg);
5158
5159 /*
5160 * Find out what block we got.
5161 */
5162 VERIFY0(dmu_buf_hold(os, object, 0, FTAG, &db,
5163 DMU_READ_NO_PREFETCH));
5164 blk = *((dmu_buf_impl_t *)db)->db_blkptr;
5165 dmu_buf_rele(db, FTAG);
5166
5167 /*
5168 * Damage the block. Dedup-ditto will save us when we read it later.
5169 */
5170 psize = BP_GET_PSIZE(&blk);
5171 buf = zio_buf_alloc(psize);
5172 ztest_pattern_set(buf, psize, ~pattern);
5173
5174 (void) zio_wait(zio_rewrite(NULL, spa, 0, &blk,
5175 buf, psize, NULL, NULL, ZIO_PRIORITY_SYNC_WRITE,
5176 ZIO_FLAG_CANFAIL | ZIO_FLAG_INDUCE_DAMAGE, NULL));
5177
5178 zio_buf_free(buf, psize);
5179
5180 (void) rw_exit(&ztest_name_lock);
5181 umem_free(od, sizeof(ztest_od_t));
5182 }
5183
5184 /*
5185 * Scrub the pool.
5186 */
5187 /* ARGSUSED */
5188 void
5189 ztest_scrub(ztest_ds_t *zd, uint64_t id)
5190 {
5191 spa_t *spa = ztest_spa;
5192
5193 (void) spa_scan(spa, POOL_SCAN_SCRUB);
5194 (void) poll(NULL, 0, 100); /* wait a moment, then force a restart */
5195 (void) spa_scan(spa, POOL_SCAN_SCRUB);
5196 }
5197
5198 /*
5199 * Change the guid for the pool.
5200 */
5201 /* ARGSUSED */
5202 void
5203 ztest_reguid(ztest_ds_t *zd, uint64_t id)
5204 {
5205 spa_t *spa = ztest_spa;
5206 uint64_t orig, load;
5207 int error;
5208
5209 orig = spa_guid(spa);
5210 load = spa_load_guid(spa);
5211
5212 (void) rw_enter(&ztest_name_lock, RW_WRITER);
5213 error = spa_change_guid(spa);
5214 (void) rw_exit(&ztest_name_lock);
5215
5216 if (error != 0)
5217 return;
5218
5219 if (ztest_opts.zo_verbose >= 4) {
5220 (void) printf("Changed guid old %llu -> %llu\n",
5221 (u_longlong_t)orig, (u_longlong_t)spa_guid(spa));
5222 }
5223
5224 VERIFY3U(orig, !=, spa_guid(spa));
5225 VERIFY3U(load, ==, spa_load_guid(spa));
5226 }
5227
5228 /*
5229 * Rename the pool to a different name and then rename it back.
5230 */
5231 /* ARGSUSED */
5232 void
5233 ztest_spa_rename(ztest_ds_t *zd, uint64_t id)
5234 {
5235 char *oldname, *newname;
5236 spa_t *spa;
5237
5238 (void) rw_enter(&ztest_name_lock, RW_WRITER);
5239
5240 oldname = ztest_opts.zo_pool;
5241 newname = umem_alloc(strlen(oldname) + 5, UMEM_NOFAIL);
5242 (void) strcpy(newname, oldname);
5243 (void) strcat(newname, "_tmp");
5244
5245 /*
5246 * Do the rename
5247 */
5248 VERIFY3U(0, ==, spa_rename(oldname, newname));
5249
5250 /*
5251 * Try to open it under the old name, which shouldn't exist
5252 */
5253 VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG));
5254
5255 /*
5256 * Open it under the new name and make sure it's still the same spa_t.
5257 */
5258 VERIFY3U(0, ==, spa_open(newname, &spa, FTAG));
5259
5260 ASSERT(spa == ztest_spa);
5261 spa_close(spa, FTAG);
5262
5263 /*
5264 * Rename it back to the original
5265 */
5266 VERIFY3U(0, ==, spa_rename(newname, oldname));
5267
5268 /*
5269 * Make sure it can still be opened
5270 */
5271 VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG));
5272
5273 ASSERT(spa == ztest_spa);
5274 spa_close(spa, FTAG);
5275
5276 umem_free(newname, strlen(newname) + 1);
5277
5278 (void) rw_exit(&ztest_name_lock);
5279 }
5280
5281 /*
5282 * Verify pool integrity by running zdb.
5283 */
5284 static void
5285 ztest_run_zdb(char *pool)
5286 {
5287 int status;
5288 char *bin;
5289 char *zdb;
5290 char *zbuf;
5291 FILE *fp;
5292
5293 bin = umem_alloc(MAXPATHLEN + MAXNAMELEN + 20, UMEM_NOFAIL);
5294 zdb = umem_alloc(MAXPATHLEN + MAXNAMELEN + 20, UMEM_NOFAIL);
5295 zbuf = umem_alloc(1024, UMEM_NOFAIL);
5296
5297 VERIFY(realpath(getexecname(), bin) != NULL);
5298 if (strncmp(bin, "/usr/sbin/ztest", 15) == 0) {
5299 strcpy(bin, "/usr/sbin/zdb"); /* Installed */
5300 } else if (strncmp(bin, "/sbin/ztest", 11) == 0) {
5301 strcpy(bin, "/sbin/zdb"); /* Installed */
5302 } else {
5303 strstr(bin, "/ztest/")[0] = '\0'; /* In-tree */
5304 strcat(bin, "/zdb/zdb");
5305 }
5306
5307 (void) sprintf(zdb,
5308 "%s -bcc%s%s -U %s %s",
5309 bin,
5310 ztest_opts.zo_verbose >= 3 ? "s" : "",
5311 ztest_opts.zo_verbose >= 4 ? "v" : "",
5312 spa_config_path,
5313 pool);
5314
5315 if (ztest_opts.zo_verbose >= 5)
5316 (void) printf("Executing %s\n", strstr(zdb, "zdb "));
5317
5318 fp = popen(zdb, "r");
5319
5320 while (fgets(zbuf, 1024, fp) != NULL)
5321 if (ztest_opts.zo_verbose >= 3)
5322 (void) printf("%s", zbuf);
5323
5324 status = pclose(fp);
5325
5326 if (status == 0)
5327 goto out;
5328
5329 ztest_dump_core = 0;
5330 if (WIFEXITED(status))
5331 fatal(0, "'%s' exit code %d", zdb, WEXITSTATUS(status));
5332 else
5333 fatal(0, "'%s' died with signal %d", zdb, WTERMSIG(status));
5334 out:
5335 umem_free(bin, MAXPATHLEN + MAXNAMELEN + 20);
5336 umem_free(zdb, MAXPATHLEN + MAXNAMELEN + 20);
5337 umem_free(zbuf, 1024);
5338 }
5339
5340 static void
5341 ztest_walk_pool_directory(char *header)
5342 {
5343 spa_t *spa = NULL;
5344
5345 if (ztest_opts.zo_verbose >= 6)
5346 (void) printf("%s\n", header);
5347
5348 mutex_enter(&spa_namespace_lock);
5349 while ((spa = spa_next(spa)) != NULL)
5350 if (ztest_opts.zo_verbose >= 6)
5351 (void) printf("\t%s\n", spa_name(spa));
5352 mutex_exit(&spa_namespace_lock);
5353 }
5354
5355 static void
5356 ztest_spa_import_export(char *oldname, char *newname)
5357 {
5358 nvlist_t *config, *newconfig;
5359 uint64_t pool_guid;
5360 spa_t *spa;
5361 int error;
5362
5363 if (ztest_opts.zo_verbose >= 4) {
5364 (void) printf("import/export: old = %s, new = %s\n",
5365 oldname, newname);
5366 }
5367
5368 /*
5369 * Clean up from previous runs.
5370 */
5371 (void) spa_destroy(newname);
5372
5373 /*
5374 * Get the pool's configuration and guid.
5375 */
5376 VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG));
5377
5378 /*
5379 * Kick off a scrub to tickle scrub/export races.
5380 */
5381 if (ztest_random(2) == 0)
5382 (void) spa_scan(spa, POOL_SCAN_SCRUB);
5383
5384 pool_guid = spa_guid(spa);
5385 spa_close(spa, FTAG);
5386
5387 ztest_walk_pool_directory("pools before export");
5388
5389 /*
5390 * Export it.
5391 */
5392 VERIFY3U(0, ==, spa_export(oldname, &config, B_FALSE, B_FALSE));
5393
5394 ztest_walk_pool_directory("pools after export");
5395
5396 /*
5397 * Try to import it.
5398 */
5399 newconfig = spa_tryimport(config);
5400 ASSERT(newconfig != NULL);
5401 nvlist_free(newconfig);
5402
5403 /*
5404 * Import it under the new name.
5405 */
5406 error = spa_import(newname, config, NULL, 0);
5407 if (error != 0) {
5408 dump_nvlist(config, 0);
5409 fatal(B_FALSE, "couldn't import pool %s as %s: error %u",
5410 oldname, newname, error);
5411 }
5412
5413 ztest_walk_pool_directory("pools after import");
5414
5415 /*
5416 * Try to import it again -- should fail with EEXIST.
5417 */
5418 VERIFY3U(EEXIST, ==, spa_import(newname, config, NULL, 0));
5419
5420 /*
5421 * Try to import it under a different name -- should fail with EEXIST.
5422 */
5423 VERIFY3U(EEXIST, ==, spa_import(oldname, config, NULL, 0));
5424
5425 /*
5426 * Verify that the pool is no longer visible under the old name.
5427 */
5428 VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG));
5429
5430 /*
5431 * Verify that we can open and close the pool using the new name.
5432 */
5433 VERIFY3U(0, ==, spa_open(newname, &spa, FTAG));
5434 ASSERT(pool_guid == spa_guid(spa));
5435 spa_close(spa, FTAG);
5436
5437 nvlist_free(config);
5438 }
5439
5440 static void
5441 ztest_resume(spa_t *spa)
5442 {
5443 if (spa_suspended(spa) && ztest_opts.zo_verbose >= 6)
5444 (void) printf("resuming from suspended state\n");
5445 spa_vdev_state_enter(spa, SCL_NONE);
5446 vdev_clear(spa, NULL);
5447 (void) spa_vdev_state_exit(spa, NULL, 0);
5448 (void) zio_resume(spa);
5449 }
5450
5451 static void *
5452 ztest_resume_thread(void *arg)
5453 {
5454 spa_t *spa = arg;
5455
5456 while (!ztest_exiting) {
5457 if (spa_suspended(spa))
5458 ztest_resume(spa);
5459 (void) poll(NULL, 0, 100);
5460 }
5461
5462 thread_exit();
5463
5464 return (NULL);
5465 }
5466
5467 #define GRACE 300
5468
5469 #if 0
5470 static void
5471 ztest_deadman_alarm(int sig)
5472 {
5473 fatal(0, "failed to complete within %d seconds of deadline", GRACE);
5474 }
5475 #endif
5476
5477 static void
5478 ztest_execute(int test, ztest_info_t *zi, uint64_t id)
5479 {
5480 ztest_ds_t *zd = &ztest_ds[id % ztest_opts.zo_datasets];
5481 ztest_shared_callstate_t *zc = ZTEST_GET_SHARED_CALLSTATE(test);
5482 hrtime_t functime = gethrtime();
5483 int i;
5484
5485 for (i = 0; i < zi->zi_iters; i++)
5486 zi->zi_func(zd, id);
5487
5488 functime = gethrtime() - functime;
5489
5490 atomic_add_64(&zc->zc_count, 1);
5491 atomic_add_64(&zc->zc_time, functime);
5492
5493 if (ztest_opts.zo_verbose >= 4) {
5494 Dl_info dli;
5495 (void) dladdr((void *)zi->zi_func, &dli);
5496 (void) printf("%6.2f sec in %s\n",
5497 (double)functime / NANOSEC, dli.dli_sname);
5498 }
5499 }
5500
5501 static void *
5502 ztest_thread(void *arg)
5503 {
5504 int rand;
5505 uint64_t id = (uintptr_t)arg;
5506 ztest_shared_t *zs = ztest_shared;
5507 uint64_t call_next;
5508 hrtime_t now;
5509 ztest_info_t *zi;
5510 ztest_shared_callstate_t *zc;
5511
5512 while ((now = gethrtime()) < zs->zs_thread_stop) {
5513 /*
5514 * See if it's time to force a crash.
5515 */
5516 if (now > zs->zs_thread_kill)
5517 ztest_kill(zs);
5518
5519 /*
5520 * If we're getting ENOSPC with some regularity, stop.
5521 */
5522 if (zs->zs_enospc_count > 10)
5523 break;
5524
5525 /*
5526 * Pick a random function to execute.
5527 */
5528 rand = ztest_random(ZTEST_FUNCS);
5529 zi = &ztest_info[rand];
5530 zc = ZTEST_GET_SHARED_CALLSTATE(rand);
5531 call_next = zc->zc_next;
5532
5533 if (now >= call_next &&
5534 atomic_cas_64(&zc->zc_next, call_next, call_next +
5535 ztest_random(2 * zi->zi_interval[0] + 1)) == call_next) {
5536 ztest_execute(rand, zi, id);
5537 }
5538 }
5539
5540 thread_exit();
5541
5542 return (NULL);
5543 }
5544
5545 static void
5546 ztest_dataset_name(char *dsname, char *pool, int d)
5547 {
5548 (void) snprintf(dsname, MAXNAMELEN, "%s/ds_%d", pool, d);
5549 }
5550
5551 static void
5552 ztest_dataset_destroy(int d)
5553 {
5554 char name[MAXNAMELEN];
5555 int t;
5556
5557 ztest_dataset_name(name, ztest_opts.zo_pool, d);
5558
5559 if (ztest_opts.zo_verbose >= 3)
5560 (void) printf("Destroying %s to free up space\n", name);
5561
5562 /*
5563 * Cleanup any non-standard clones and snapshots. In general,
5564 * ztest thread t operates on dataset (t % zopt_datasets),
5565 * so there may be more than one thing to clean up.
5566 */
5567 for (t = d; t < ztest_opts.zo_threads;
5568 t += ztest_opts.zo_datasets)
5569 ztest_dsl_dataset_cleanup(name, t);
5570
5571 (void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL,
5572 DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN);
5573 }
5574
5575 static void
5576 ztest_dataset_dirobj_verify(ztest_ds_t *zd)
5577 {
5578 uint64_t usedobjs, dirobjs, scratch;
5579
5580 /*
5581 * ZTEST_DIROBJ is the object directory for the entire dataset.
5582 * Therefore, the number of objects in use should equal the
5583 * number of ZTEST_DIROBJ entries, +1 for ZTEST_DIROBJ itself.
5584 * If not, we have an object leak.
5585 *
5586 * Note that we can only check this in ztest_dataset_open(),
5587 * when the open-context and syncing-context values agree.
5588 * That's because zap_count() returns the open-context value,
5589 * while dmu_objset_space() returns the rootbp fill count.
5590 */
5591 VERIFY3U(0, ==, zap_count(zd->zd_os, ZTEST_DIROBJ, &dirobjs));
5592 dmu_objset_space(zd->zd_os, &scratch, &scratch, &usedobjs, &scratch);
5593 ASSERT3U(dirobjs + 1, ==, usedobjs);
5594 }
5595
5596 static int
5597 ztest_dataset_open(int d)
5598 {
5599 ztest_ds_t *zd = &ztest_ds[d];
5600 uint64_t committed_seq = ZTEST_GET_SHARED_DS(d)->zd_seq;
5601 objset_t *os;
5602 zilog_t *zilog;
5603 char name[MAXNAMELEN];
5604 int error;
5605
5606 ztest_dataset_name(name, ztest_opts.zo_pool, d);
5607
5608 (void) rw_enter(&ztest_name_lock, RW_READER);
5609
5610 error = ztest_dataset_create(name);
5611 if (error == ENOSPC) {
5612 (void) rw_exit(&ztest_name_lock);
5613 ztest_record_enospc(FTAG);
5614 return (error);
5615 }
5616 ASSERT(error == 0 || error == EEXIST);
5617
5618 VERIFY0(dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, zd, &os));
5619 (void) rw_exit(&ztest_name_lock);
5620
5621 ztest_zd_init(zd, ZTEST_GET_SHARED_DS(d), os);
5622
5623 zilog = zd->zd_zilog;
5624
5625 if (zilog->zl_header->zh_claim_lr_seq != 0 &&
5626 zilog->zl_header->zh_claim_lr_seq < committed_seq)
5627 fatal(0, "missing log records: claimed %llu < committed %llu",
5628 zilog->zl_header->zh_claim_lr_seq, committed_seq);
5629
5630 ztest_dataset_dirobj_verify(zd);
5631
5632 zil_replay(os, zd, ztest_replay_vector);
5633
5634 ztest_dataset_dirobj_verify(zd);
5635
5636 if (ztest_opts.zo_verbose >= 6)
5637 (void) printf("%s replay %llu blocks, %llu records, seq %llu\n",
5638 zd->zd_name,
5639 (u_longlong_t)zilog->zl_parse_blk_count,
5640 (u_longlong_t)zilog->zl_parse_lr_count,
5641 (u_longlong_t)zilog->zl_replaying_seq);
5642
5643 zilog = zil_open(os, ztest_get_data);
5644
5645 if (zilog->zl_replaying_seq != 0 &&
5646 zilog->zl_replaying_seq < committed_seq)
5647 fatal(0, "missing log records: replayed %llu < committed %llu",
5648 zilog->zl_replaying_seq, committed_seq);
5649
5650 return (0);
5651 }
5652
5653 static void
5654 ztest_dataset_close(int d)
5655 {
5656 ztest_ds_t *zd = &ztest_ds[d];
5657
5658 zil_close(zd->zd_zilog);
5659 dmu_objset_disown(zd->zd_os, zd);
5660
5661 ztest_zd_fini(zd);
5662 }
5663
5664 /*
5665 * Kick off threads to run tests on all datasets in parallel.
5666 */
5667 static void
5668 ztest_run(ztest_shared_t *zs)
5669 {
5670 kt_did_t *tid;
5671 spa_t *spa;
5672 objset_t *os;
5673 kthread_t *resume_thread;
5674 uint64_t object;
5675 int error;
5676 int t, d;
5677
5678 ztest_exiting = B_FALSE;
5679
5680 /*
5681 * Initialize parent/child shared state.
5682 */
5683 mutex_init(&ztest_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
5684 rw_init(&ztest_name_lock, NULL, RW_DEFAULT, NULL);
5685
5686 zs->zs_thread_start = gethrtime();
5687 zs->zs_thread_stop =
5688 zs->zs_thread_start + ztest_opts.zo_passtime * NANOSEC;
5689 zs->zs_thread_stop = MIN(zs->zs_thread_stop, zs->zs_proc_stop);
5690 zs->zs_thread_kill = zs->zs_thread_stop;
5691 if (ztest_random(100) < ztest_opts.zo_killrate) {
5692 zs->zs_thread_kill -=
5693 ztest_random(ztest_opts.zo_passtime * NANOSEC);
5694 }
5695
5696 mutex_init(&zcl.zcl_callbacks_lock, NULL, MUTEX_DEFAULT, NULL);
5697
5698 list_create(&zcl.zcl_callbacks, sizeof (ztest_cb_data_t),
5699 offsetof(ztest_cb_data_t, zcd_node));
5700
5701 /*
5702 * Open our pool.
5703 */
5704 kernel_init(FREAD | FWRITE);
5705 VERIFY0(spa_open(ztest_opts.zo_pool, &spa, FTAG));
5706 spa->spa_debug = B_TRUE;
5707 ztest_spa = spa;
5708
5709 VERIFY0(dmu_objset_own(ztest_opts.zo_pool,
5710 DMU_OST_ANY, B_TRUE, FTAG, &os));
5711 zs->zs_guid = dmu_objset_fsid_guid(os);
5712 dmu_objset_disown(os, FTAG);
5713
5714 spa->spa_dedup_ditto = 2 * ZIO_DEDUPDITTO_MIN;
5715
5716 /*
5717 * We don't expect the pool to suspend unless maxfaults == 0,
5718 * in which case ztest_fault_inject() temporarily takes away
5719 * the only valid replica.
5720 */
5721 if (MAXFAULTS() == 0)
5722 spa->spa_failmode = ZIO_FAILURE_MODE_WAIT;
5723 else
5724 spa->spa_failmode = ZIO_FAILURE_MODE_PANIC;
5725
5726 /*
5727 * Create a thread to periodically resume suspended I/O.
5728 */
5729 VERIFY3P((resume_thread = zk_thread_create(NULL, 0,
5730 (thread_func_t)ztest_resume_thread, spa, TS_RUN, NULL, 0, 0,
5731 PTHREAD_CREATE_JOINABLE)), !=, NULL);
5732
5733 #if 0
5734 /*
5735 * Set a deadman alarm to abort() if we hang.
5736 */
5737 signal(SIGALRM, ztest_deadman_alarm);
5738 alarm((zs->zs_thread_stop - zs->zs_thread_start) / NANOSEC + GRACE);
5739 #endif
5740
5741 /*
5742 * Verify that we can safely inquire about about any object,
5743 * whether it's allocated or not. To make it interesting,
5744 * we probe a 5-wide window around each power of two.
5745 * This hits all edge cases, including zero and the max.
5746 */
5747 for (t = 0; t < 64; t++) {
5748 for (d = -5; d <= 5; d++) {
5749 error = dmu_object_info(spa->spa_meta_objset,
5750 (1ULL << t) + d, NULL);
5751 ASSERT(error == 0 || error == ENOENT ||
5752 error == EINVAL);
5753 }
5754 }
5755
5756 /*
5757 * If we got any ENOSPC errors on the previous run, destroy something.
5758 */
5759 if (zs->zs_enospc_count != 0) {
5760 int d = ztest_random(ztest_opts.zo_datasets);
5761 ztest_dataset_destroy(d);
5762 }
5763 zs->zs_enospc_count = 0;
5764
5765 tid = umem_zalloc(ztest_opts.zo_threads * sizeof (kt_did_t),
5766 UMEM_NOFAIL);
5767
5768 if (ztest_opts.zo_verbose >= 4)
5769 (void) printf("starting main threads...\n");
5770
5771 /*
5772 * Kick off all the tests that run in parallel.
5773 */
5774 for (t = 0; t < ztest_opts.zo_threads; t++) {
5775 kthread_t *thread;
5776
5777 if (t < ztest_opts.zo_datasets &&
5778 ztest_dataset_open(t) != 0)
5779 return;
5780
5781 VERIFY3P(thread = zk_thread_create(NULL, 0,
5782 (thread_func_t)ztest_thread,
5783 (void *)(uintptr_t)t, TS_RUN, NULL, 0, 0,
5784 PTHREAD_CREATE_JOINABLE), !=, NULL);
5785 tid[t] = thread->t_tid;
5786 }
5787
5788 /*
5789 * Wait for all of the tests to complete. We go in reverse order
5790 * so we don't close datasets while threads are still using them.
5791 */
5792 for (t = ztest_opts.zo_threads - 1; t >= 0; t--) {
5793 thread_join(tid[t]);
5794 if (t < ztest_opts.zo_datasets)
5795 ztest_dataset_close(t);
5796 }
5797
5798 txg_wait_synced(spa_get_dsl(spa), 0);
5799
5800 zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(spa));
5801 zs->zs_space = metaslab_class_get_space(spa_normal_class(spa));
5802
5803 umem_free(tid, ztest_opts.zo_threads * sizeof (kt_did_t));
5804
5805 /* Kill the resume thread */
5806 ztest_exiting = B_TRUE;
5807 thread_join(resume_thread->t_tid);
5808 ztest_resume(spa);
5809
5810 /*
5811 * Right before closing the pool, kick off a bunch of async I/O;
5812 * spa_close() should wait for it to complete.
5813 */
5814 for (object = 1; object < 50; object++)
5815 dmu_prefetch(spa->spa_meta_objset, object, 0, 1ULL << 20);
5816
5817 /* Verify that at least one commit cb was called in a timely fashion */
5818 if (zc_cb_counter >= ZTEST_COMMIT_CB_MIN_REG)
5819 VERIFY0(zc_min_txg_delay);
5820
5821 spa_close(spa, FTAG);
5822
5823 /*
5824 * Verify that we can loop over all pools.
5825 */
5826 mutex_enter(&spa_namespace_lock);
5827 for (spa = spa_next(NULL); spa != NULL; spa = spa_next(spa))
5828 if (ztest_opts.zo_verbose > 3)
5829 (void) printf("spa_next: found %s\n", spa_name(spa));
5830 mutex_exit(&spa_namespace_lock);
5831
5832 /*
5833 * Verify that we can export the pool and reimport it under a
5834 * different name.
5835 */
5836 if (ztest_random(2) == 0) {
5837 char name[MAXNAMELEN];
5838 (void) snprintf(name, MAXNAMELEN, "%s_import",
5839 ztest_opts.zo_pool);
5840 ztest_spa_import_export(ztest_opts.zo_pool, name);
5841 ztest_spa_import_export(name, ztest_opts.zo_pool);
5842 }
5843
5844 kernel_fini();
5845
5846 list_destroy(&zcl.zcl_callbacks);
5847 mutex_destroy(&zcl.zcl_callbacks_lock);
5848 rw_destroy(&ztest_name_lock);
5849 mutex_destroy(&ztest_vdev_lock);
5850 }
5851
5852 static void
5853 ztest_freeze(void)
5854 {
5855 ztest_ds_t *zd = &ztest_ds[0];
5856 spa_t *spa;
5857 int numloops = 0;
5858
5859 if (ztest_opts.zo_verbose >= 3)
5860 (void) printf("testing spa_freeze()...\n");
5861
5862 kernel_init(FREAD | FWRITE);
5863 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG));
5864 VERIFY3U(0, ==, ztest_dataset_open(0));
5865 spa->spa_debug = B_TRUE;
5866 ztest_spa = spa;
5867
5868 /*
5869 * Force the first log block to be transactionally allocated.
5870 * We have to do this before we freeze the pool -- otherwise
5871 * the log chain won't be anchored.
5872 */
5873 while (BP_IS_HOLE(&zd->zd_zilog->zl_header->zh_log)) {
5874 ztest_dmu_object_alloc_free(zd, 0);
5875 zil_commit(zd->zd_zilog, 0);
5876 }
5877
5878 txg_wait_synced(spa_get_dsl(spa), 0);
5879
5880 /*
5881 * Freeze the pool. This stops spa_sync() from doing anything,
5882 * so that the only way to record changes from now on is the ZIL.
5883 */
5884 spa_freeze(spa);
5885
5886 /*
5887 * Run tests that generate log records but don't alter the pool config
5888 * or depend on DSL sync tasks (snapshots, objset create/destroy, etc).
5889 * We do a txg_wait_synced() after each iteration to force the txg
5890 * to increase well beyond the last synced value in the uberblock.
5891 * The ZIL should be OK with that.
5892 */
5893 while (ztest_random(10) != 0 &&
5894 numloops++ < ztest_opts.zo_maxloops) {
5895 ztest_dmu_write_parallel(zd, 0);
5896 ztest_dmu_object_alloc_free(zd, 0);
5897 txg_wait_synced(spa_get_dsl(spa), 0);
5898 }
5899
5900 /*
5901 * Commit all of the changes we just generated.
5902 */
5903 zil_commit(zd->zd_zilog, 0);
5904 txg_wait_synced(spa_get_dsl(spa), 0);
5905
5906 /*
5907 * Close our dataset and close the pool.
5908 */
5909 ztest_dataset_close(0);
5910 spa_close(spa, FTAG);
5911 kernel_fini();
5912
5913 /*
5914 * Open and close the pool and dataset to induce log replay.
5915 */
5916 kernel_init(FREAD | FWRITE);
5917 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG));
5918 ASSERT(spa_freeze_txg(spa) == UINT64_MAX);
5919 VERIFY3U(0, ==, ztest_dataset_open(0));
5920 ztest_dataset_close(0);
5921
5922 spa->spa_debug = B_TRUE;
5923 ztest_spa = spa;
5924 txg_wait_synced(spa_get_dsl(spa), 0);
5925 ztest_reguid(NULL, 0);
5926
5927 spa_close(spa, FTAG);
5928 kernel_fini();
5929 }
5930
5931 void
5932 print_time(hrtime_t t, char *timebuf)
5933 {
5934 hrtime_t s = t / NANOSEC;
5935 hrtime_t m = s / 60;
5936 hrtime_t h = m / 60;
5937 hrtime_t d = h / 24;
5938
5939 s -= m * 60;
5940 m -= h * 60;
5941 h -= d * 24;
5942
5943 timebuf[0] = '\0';
5944
5945 if (d)
5946 (void) sprintf(timebuf,
5947 "%llud%02lluh%02llum%02llus", d, h, m, s);
5948 else if (h)
5949 (void) sprintf(timebuf, "%lluh%02llum%02llus", h, m, s);
5950 else if (m)
5951 (void) sprintf(timebuf, "%llum%02llus", m, s);
5952 else
5953 (void) sprintf(timebuf, "%llus", s);
5954 }
5955
5956 static nvlist_t *
5957 make_random_props(void)
5958 {
5959 nvlist_t *props;
5960
5961 VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0);
5962 if (ztest_random(2) == 0)
5963 return (props);
5964 VERIFY(nvlist_add_uint64(props, "autoreplace", 1) == 0);
5965
5966 return (props);
5967 }
5968
5969 /*
5970 * Create a storage pool with the given name and initial vdev size.
5971 * Then test spa_freeze() functionality.
5972 */
5973 static void
5974 ztest_init(ztest_shared_t *zs)
5975 {
5976 spa_t *spa;
5977 nvlist_t *nvroot, *props;
5978 int i;
5979
5980 mutex_init(&ztest_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
5981 rw_init(&ztest_name_lock, NULL, RW_DEFAULT, NULL);
5982
5983 kernel_init(FREAD | FWRITE);
5984
5985 /*
5986 * Create the storage pool.
5987 */
5988 (void) spa_destroy(ztest_opts.zo_pool);
5989 ztest_shared->zs_vdev_next_leaf = 0;
5990 zs->zs_splits = 0;
5991 zs->zs_mirrors = ztest_opts.zo_mirrors;
5992 nvroot = make_vdev_root(NULL, NULL, NULL, ztest_opts.zo_vdev_size, 0,
5993 0, ztest_opts.zo_raidz, zs->zs_mirrors, 1);
5994 props = make_random_props();
5995 for (i = 0; i < SPA_FEATURES; i++) {
5996 char *buf;
5997 VERIFY3S(-1, !=, asprintf(&buf, "feature@%s",
5998 spa_feature_table[i].fi_uname));
5999 VERIFY3U(0, ==, nvlist_add_uint64(props, buf, 0));
6000 free(buf);
6001 }
6002 VERIFY3U(0, ==, spa_create(ztest_opts.zo_pool, nvroot, props, NULL));
6003 nvlist_free(nvroot);
6004
6005 VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG));
6006 zs->zs_metaslab_sz =
6007 1ULL << spa->spa_root_vdev->vdev_child[0]->vdev_ms_shift;
6008 spa_close(spa, FTAG);
6009
6010 kernel_fini();
6011
6012 ztest_run_zdb(ztest_opts.zo_pool);
6013
6014 ztest_freeze();
6015
6016 ztest_run_zdb(ztest_opts.zo_pool);
6017
6018 rw_destroy(&ztest_name_lock);
6019 mutex_destroy(&ztest_vdev_lock);
6020 }
6021
6022 static void
6023 setup_data_fd(void)
6024 {
6025 static char ztest_name_data[] = "/tmp/ztest.data.XXXXXX";
6026
6027 ztest_fd_data = mkstemp(ztest_name_data);
6028 ASSERT3S(ztest_fd_data, >=, 0);
6029 (void) unlink(ztest_name_data);
6030 }
6031
6032 static int
6033 shared_data_size(ztest_shared_hdr_t *hdr)
6034 {
6035 int size;
6036
6037 size = hdr->zh_hdr_size;
6038 size += hdr->zh_opts_size;
6039 size += hdr->zh_size;
6040 size += hdr->zh_stats_size * hdr->zh_stats_count;
6041 size += hdr->zh_ds_size * hdr->zh_ds_count;
6042
6043 return (size);
6044 }
6045
6046 static void
6047 setup_hdr(void)
6048 {
6049 int size;
6050 ztest_shared_hdr_t *hdr;
6051
6052 hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()),
6053 PROT_READ | PROT_WRITE, MAP_SHARED, ztest_fd_data, 0);
6054 ASSERT(hdr != MAP_FAILED);
6055
6056 VERIFY3U(0, ==, ftruncate(ztest_fd_data, sizeof (ztest_shared_hdr_t)));
6057
6058 hdr->zh_hdr_size = sizeof (ztest_shared_hdr_t);
6059 hdr->zh_opts_size = sizeof (ztest_shared_opts_t);
6060 hdr->zh_size = sizeof (ztest_shared_t);
6061 hdr->zh_stats_size = sizeof (ztest_shared_callstate_t);
6062 hdr->zh_stats_count = ZTEST_FUNCS;
6063 hdr->zh_ds_size = sizeof (ztest_shared_ds_t);
6064 hdr->zh_ds_count = ztest_opts.zo_datasets;
6065
6066 size = shared_data_size(hdr);
6067 VERIFY3U(0, ==, ftruncate(ztest_fd_data, size));
6068
6069 (void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize()));
6070 }
6071
6072 static void
6073 setup_data(void)
6074 {
6075 int size, offset;
6076 ztest_shared_hdr_t *hdr;
6077 uint8_t *buf;
6078
6079 hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()),
6080 PROT_READ, MAP_SHARED, ztest_fd_data, 0);
6081 ASSERT(hdr != MAP_FAILED);
6082
6083 size = shared_data_size(hdr);
6084
6085 (void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize()));
6086 hdr = ztest_shared_hdr = (void *)mmap(0, P2ROUNDUP(size, getpagesize()),
6087 PROT_READ | PROT_WRITE, MAP_SHARED, ztest_fd_data, 0);
6088 ASSERT(hdr != MAP_FAILED);
6089 buf = (uint8_t *)hdr;
6090
6091 offset = hdr->zh_hdr_size;
6092 ztest_shared_opts = (void *)&buf[offset];
6093 offset += hdr->zh_opts_size;
6094 ztest_shared = (void *)&buf[offset];
6095 offset += hdr->zh_size;
6096 ztest_shared_callstate = (void *)&buf[offset];
6097 offset += hdr->zh_stats_size * hdr->zh_stats_count;
6098 ztest_shared_ds = (void *)&buf[offset];
6099 }
6100
6101 static boolean_t
6102 exec_child(char *cmd, char *libpath, boolean_t ignorekill, int *statusp)
6103 {
6104 pid_t pid;
6105 int status;
6106 char *cmdbuf = NULL;
6107
6108 pid = fork();
6109
6110 if (cmd == NULL) {
6111 cmdbuf = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
6112 (void) strlcpy(cmdbuf, getexecname(), MAXPATHLEN);
6113 cmd = cmdbuf;
6114 }
6115
6116 if (pid == -1)
6117 fatal(1, "fork failed");
6118
6119 if (pid == 0) { /* child */
6120 char *emptyargv[2] = { cmd, NULL };
6121 char fd_data_str[12];
6122
6123 struct rlimit rl = { 1024, 1024 };
6124 (void) setrlimit(RLIMIT_NOFILE, &rl);
6125
6126 (void) close(ztest_fd_rand);
6127 VERIFY(11 >= snprintf(fd_data_str, 12, "%d", ztest_fd_data));
6128 VERIFY(0 == setenv("ZTEST_FD_DATA", fd_data_str, 1));
6129
6130 (void) enable_extended_FILE_stdio(-1, -1);
6131 if (libpath != NULL)
6132 VERIFY(0 == setenv("LD_LIBRARY_PATH", libpath, 1));
6133 (void) execv(cmd, emptyargv);
6134 ztest_dump_core = B_FALSE;
6135 fatal(B_TRUE, "exec failed: %s", cmd);
6136 }
6137
6138 if (cmdbuf != NULL) {
6139 umem_free(cmdbuf, MAXPATHLEN);
6140 cmd = NULL;
6141 }
6142
6143 while (waitpid(pid, &status, 0) != pid)
6144 continue;
6145 if (statusp != NULL)
6146 *statusp = status;
6147
6148 if (WIFEXITED(status)) {
6149 if (WEXITSTATUS(status) != 0) {
6150 (void) fprintf(stderr, "child exited with code %d\n",
6151 WEXITSTATUS(status));
6152 exit(2);
6153 }
6154 return (B_FALSE);
6155 } else if (WIFSIGNALED(status)) {
6156 if (!ignorekill || WTERMSIG(status) != SIGKILL) {
6157 (void) fprintf(stderr, "child died with signal %d\n",
6158 WTERMSIG(status));
6159 exit(3);
6160 }
6161 return (B_TRUE);
6162 } else {
6163 (void) fprintf(stderr, "something strange happened to child\n");
6164 exit(4);
6165 /* NOTREACHED */
6166 }
6167 }
6168
6169 static void
6170 ztest_run_init(void)
6171 {
6172 int i;
6173
6174 ztest_shared_t *zs = ztest_shared;
6175
6176 ASSERT(ztest_opts.zo_init != 0);
6177
6178 /*
6179 * Blow away any existing copy of zpool.cache
6180 */
6181 (void) remove(spa_config_path);
6182
6183 /*
6184 * Create and initialize our storage pool.
6185 */
6186 for (i = 1; i <= ztest_opts.zo_init; i++) {
6187 bzero(zs, sizeof (ztest_shared_t));
6188 if (ztest_opts.zo_verbose >= 3 &&
6189 ztest_opts.zo_init != 1) {
6190 (void) printf("ztest_init(), pass %d\n", i);
6191 }
6192 ztest_init(zs);
6193 }
6194 }
6195
6196 int
6197 main(int argc, char **argv)
6198 {
6199 int kills = 0;
6200 int iters = 0;
6201 int older = 0;
6202 int newer = 0;
6203 ztest_shared_t *zs;
6204 ztest_info_t *zi;
6205 ztest_shared_callstate_t *zc;
6206 char timebuf[100];
6207 char numbuf[6];
6208 spa_t *spa;
6209 char *cmd;
6210 boolean_t hasalt;
6211 int f;
6212 char *fd_data_str = getenv("ZTEST_FD_DATA");
6213
6214 (void) setvbuf(stdout, NULL, _IOLBF, 0);
6215
6216 dprintf_setup(&argc, argv);
6217
6218 ztest_fd_rand = open("/dev/urandom", O_RDONLY);
6219 ASSERT3S(ztest_fd_rand, >=, 0);
6220
6221 if (!fd_data_str) {
6222 process_options(argc, argv);
6223
6224 setup_data_fd();
6225 setup_hdr();
6226 setup_data();
6227 bcopy(&ztest_opts, ztest_shared_opts,
6228 sizeof (*ztest_shared_opts));
6229 } else {
6230 ztest_fd_data = atoi(fd_data_str);
6231 setup_data();
6232 bcopy(ztest_shared_opts, &ztest_opts, sizeof (ztest_opts));
6233 }
6234 ASSERT3U(ztest_opts.zo_datasets, ==, ztest_shared_hdr->zh_ds_count);
6235
6236 /* Override location of zpool.cache */
6237 VERIFY(asprintf((char **)&spa_config_path, "%s/zpool.cache",
6238 ztest_opts.zo_dir) != -1);
6239
6240 ztest_ds = umem_alloc(ztest_opts.zo_datasets * sizeof (ztest_ds_t),
6241 UMEM_NOFAIL);
6242 zs = ztest_shared;
6243
6244 if (fd_data_str) {
6245 metaslab_gang_bang = ztest_opts.zo_metaslab_gang_bang;
6246 metaslab_df_alloc_threshold =
6247 zs->zs_metaslab_df_alloc_threshold;
6248
6249 if (zs->zs_do_init)
6250 ztest_run_init();
6251 else
6252 ztest_run(zs);
6253 exit(0);
6254 }
6255
6256 hasalt = (strlen(ztest_opts.zo_alt_ztest) != 0);
6257
6258 if (ztest_opts.zo_verbose >= 1) {
6259 (void) printf("%llu vdevs, %d datasets, %d threads,"
6260 " %llu seconds...\n",
6261 (u_longlong_t)ztest_opts.zo_vdevs,
6262 ztest_opts.zo_datasets,
6263 ztest_opts.zo_threads,
6264 (u_longlong_t)ztest_opts.zo_time);
6265 }
6266
6267 cmd = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
6268 (void) strlcpy(cmd, getexecname(), MAXNAMELEN);
6269
6270 zs->zs_do_init = B_TRUE;
6271 if (strlen(ztest_opts.zo_alt_ztest) != 0) {
6272 if (ztest_opts.zo_verbose >= 1) {
6273 (void) printf("Executing older ztest for "
6274 "initialization: %s\n", ztest_opts.zo_alt_ztest);
6275 }
6276 VERIFY(!exec_child(ztest_opts.zo_alt_ztest,
6277 ztest_opts.zo_alt_libpath, B_FALSE, NULL));
6278 } else {
6279 VERIFY(!exec_child(NULL, NULL, B_FALSE, NULL));
6280 }
6281 zs->zs_do_init = B_FALSE;
6282
6283 zs->zs_proc_start = gethrtime();
6284 zs->zs_proc_stop = zs->zs_proc_start + ztest_opts.zo_time * NANOSEC;
6285
6286 for (f = 0; f < ZTEST_FUNCS; f++) {
6287 zi = &ztest_info[f];
6288 zc = ZTEST_GET_SHARED_CALLSTATE(f);
6289 if (zs->zs_proc_start + zi->zi_interval[0] > zs->zs_proc_stop)
6290 zc->zc_next = UINT64_MAX;
6291 else
6292 zc->zc_next = zs->zs_proc_start +
6293 ztest_random(2 * zi->zi_interval[0] + 1);
6294 }
6295
6296 /*
6297 * Run the tests in a loop. These tests include fault injection
6298 * to verify that self-healing data works, and forced crashes
6299 * to verify that we never lose on-disk consistency.
6300 */
6301 while (gethrtime() < zs->zs_proc_stop) {
6302 int status;
6303 boolean_t killed;
6304
6305 /*
6306 * Initialize the workload counters for each function.
6307 */
6308 for (f = 0; f < ZTEST_FUNCS; f++) {
6309 zc = ZTEST_GET_SHARED_CALLSTATE(f);
6310 zc->zc_count = 0;
6311 zc->zc_time = 0;
6312 }
6313
6314 /* Set the allocation switch size */
6315 zs->zs_metaslab_df_alloc_threshold =
6316 ztest_random(zs->zs_metaslab_sz / 4) + 1;
6317
6318 if (!hasalt || ztest_random(2) == 0) {
6319 if (hasalt && ztest_opts.zo_verbose >= 1) {
6320 (void) printf("Executing newer ztest: %s\n",
6321 cmd);
6322 }
6323 newer++;
6324 killed = exec_child(cmd, NULL, B_TRUE, &status);
6325 } else {
6326 if (hasalt && ztest_opts.zo_verbose >= 1) {
6327 (void) printf("Executing older ztest: %s\n",
6328 ztest_opts.zo_alt_ztest);
6329 }
6330 older++;
6331 killed = exec_child(ztest_opts.zo_alt_ztest,
6332 ztest_opts.zo_alt_libpath, B_TRUE, &status);
6333 }
6334
6335 if (killed)
6336 kills++;
6337 iters++;
6338
6339 if (ztest_opts.zo_verbose >= 1) {
6340 hrtime_t now = gethrtime();
6341
6342 now = MIN(now, zs->zs_proc_stop);
6343 print_time(zs->zs_proc_stop - now, timebuf);
6344 nicenum(zs->zs_space, numbuf);
6345
6346 (void) printf("Pass %3d, %8s, %3llu ENOSPC, "
6347 "%4.1f%% of %5s used, %3.0f%% done, %8s to go\n",
6348 iters,
6349 WIFEXITED(status) ? "Complete" : "SIGKILL",
6350 (u_longlong_t)zs->zs_enospc_count,
6351 100.0 * zs->zs_alloc / zs->zs_space,
6352 numbuf,
6353 100.0 * (now - zs->zs_proc_start) /
6354 (ztest_opts.zo_time * NANOSEC), timebuf);
6355 }
6356
6357 if (ztest_opts.zo_verbose >= 2) {
6358 (void) printf("\nWorkload summary:\n\n");
6359 (void) printf("%7s %9s %s\n",
6360 "Calls", "Time", "Function");
6361 (void) printf("%7s %9s %s\n",
6362 "-----", "----", "--------");
6363 for (f = 0; f < ZTEST_FUNCS; f++) {
6364 Dl_info dli;
6365
6366 zi = &ztest_info[f];
6367 zc = ZTEST_GET_SHARED_CALLSTATE(f);
6368 print_time(zc->zc_time, timebuf);
6369 (void) dladdr((void *)zi->zi_func, &dli);
6370 (void) printf("%7llu %9s %s\n",
6371 (u_longlong_t)zc->zc_count, timebuf,
6372 dli.dli_sname);
6373 }
6374 (void) printf("\n");
6375 }
6376
6377 /*
6378 * It's possible that we killed a child during a rename test,
6379 * in which case we'll have a 'ztest_tmp' pool lying around
6380 * instead of 'ztest'. Do a blind rename in case this happened.
6381 */
6382 kernel_init(FREAD);
6383 if (spa_open(ztest_opts.zo_pool, &spa, FTAG) == 0) {
6384 spa_close(spa, FTAG);
6385 } else {
6386 char tmpname[MAXNAMELEN];
6387 kernel_fini();
6388 kernel_init(FREAD | FWRITE);
6389 (void) snprintf(tmpname, sizeof (tmpname), "%s_tmp",
6390 ztest_opts.zo_pool);
6391 (void) spa_rename(tmpname, ztest_opts.zo_pool);
6392 }
6393 kernel_fini();
6394
6395 ztest_run_zdb(ztest_opts.zo_pool);
6396 }
6397
6398 if (ztest_opts.zo_verbose >= 1) {
6399 if (hasalt) {
6400 (void) printf("%d runs of older ztest: %s\n", older,
6401 ztest_opts.zo_alt_ztest);
6402 (void) printf("%d runs of newer ztest: %s\n", newer,
6403 cmd);
6404 }
6405 (void) printf("%d killed, %d completed, %.0f%% kill rate\n",
6406 kills, iters - kills, (100.0 * kills) / MAX(1, iters));
6407 }
6408
6409 umem_free(cmd, MAXNAMELEN);
6410
6411 return (0);
6412 }