4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012 by Delphix. All rights reserved.
26 #include <sys/dsl_pool.h>
27 #include <sys/dsl_dataset.h>
28 #include <sys/dsl_prop.h>
29 #include <sys/dsl_dir.h>
30 #include <sys/dsl_synctask.h>
31 #include <sys/dsl_scan.h>
32 #include <sys/dnode.h>
33 #include <sys/dmu_tx.h>
34 #include <sys/dmu_objset.h>
38 #include <sys/zfs_context.h>
39 #include <sys/fs/zfs.h>
40 #include <sys/zfs_znode.h>
41 #include <sys/spa_impl.h>
42 #include <sys/dsl_deadlist.h>
43 #include <sys/bptree.h>
44 #include <sys/zfeature.h>
45 #include <sys/zil_impl.h>
47 int zfs_no_write_throttle
= 0;
48 int zfs_write_limit_shift
= 3; /* 1/8th of physical memory */
49 int zfs_txg_synctime_ms
= 1000; /* target millisecs to sync a txg */
50 int zfs_txg_history
= 60; /* statistics for the last N txgs */
52 unsigned long zfs_write_limit_min
= 32 << 20; /* min write limit is 32MB */
53 unsigned long zfs_write_limit_max
= 0; /* max data payload per txg */
54 unsigned long zfs_write_limit_inflated
= 0;
55 unsigned long zfs_write_limit_override
= 0;
57 kmutex_t zfs_write_limit_lock
;
59 static pgcnt_t old_physmem
= 0;
62 dsl_pool_tx_assign_init(dsl_pool_t
*dp
, unsigned int ndata
)
65 char name
[KSTAT_STRLEN
];
66 int i
, data_size
= ndata
* sizeof(kstat_named_t
);
68 (void) snprintf(name
, KSTAT_STRLEN
, "dmu_tx_assign-%s",
69 spa_name(dp
->dp_spa
));
71 dp
->dp_tx_assign_size
= ndata
;
74 dp
->dp_tx_assign_buckets
= kmem_alloc(data_size
, KM_SLEEP
);
76 dp
->dp_tx_assign_buckets
= NULL
;
78 for (i
= 0; i
< dp
->dp_tx_assign_size
; i
++) {
79 ks
= &dp
->dp_tx_assign_buckets
[i
];
80 ks
->data_type
= KSTAT_DATA_UINT64
;
82 (void) snprintf(ks
->name
, KSTAT_STRLEN
, "%u us", 1 << i
);
85 dp
->dp_tx_assign_kstat
= kstat_create("zfs", 0, name
, "misc",
86 KSTAT_TYPE_NAMED
, 0, KSTAT_FLAG_VIRTUAL
);
88 if (dp
->dp_tx_assign_kstat
) {
89 dp
->dp_tx_assign_kstat
->ks_data
= dp
->dp_tx_assign_buckets
;
90 dp
->dp_tx_assign_kstat
->ks_ndata
= dp
->dp_tx_assign_size
;
91 dp
->dp_tx_assign_kstat
->ks_data_size
= data_size
;
92 kstat_install(dp
->dp_tx_assign_kstat
);
97 dsl_pool_tx_assign_destroy(dsl_pool_t
*dp
)
99 if (dp
->dp_tx_assign_buckets
)
100 kmem_free(dp
->dp_tx_assign_buckets
,
101 dp
->dp_tx_assign_size
* sizeof(kstat_named_t
));
103 if (dp
->dp_tx_assign_kstat
)
104 kstat_delete(dp
->dp_tx_assign_kstat
);
108 dsl_pool_tx_assign_add_usecs(dsl_pool_t
*dp
, uint64_t usecs
)
112 while (((1 << idx
) < usecs
) && (idx
< dp
->dp_tx_assign_size
- 1))
115 atomic_inc_64(&dp
->dp_tx_assign_buckets
[idx
].value
.ui64
);
119 dsl_pool_txg_history_update(kstat_t
*ksp
, int rw
)
121 dsl_pool_t
*dp
= ksp
->ks_private
;
125 if (rw
== KSTAT_WRITE
)
129 kmem_free(ksp
->ks_data
, ksp
->ks_data_size
);
131 mutex_enter(&dp
->dp_lock
);
133 ksp
->ks_ndata
= dp
->dp_txg_history_size
;
134 ksp
->ks_data_size
= dp
->dp_txg_history_size
* sizeof(kstat_txg_t
);
135 if (ksp
->ks_data_size
> 0)
136 ksp
->ks_data
= kmem_alloc(ksp
->ks_data_size
, KM_PUSHPAGE
);
138 /* Traversed oldest to youngest for the most readable kstat output */
139 for (th
= list_tail(&dp
->dp_txg_history
); th
!= NULL
;
140 th
= list_prev(&dp
->dp_txg_history
, th
)) {
141 mutex_enter(&th
->th_lock
);
142 ASSERT3S(i
+ sizeof(kstat_txg_t
), <=, ksp
->ks_data_size
);
143 memcpy(ksp
->ks_data
+ i
, &th
->th_kstat
, sizeof(kstat_txg_t
));
144 i
+= sizeof(kstat_txg_t
);
145 mutex_exit(&th
->th_lock
);
148 mutex_exit(&dp
->dp_lock
);
154 dsl_pool_txg_history_init(dsl_pool_t
*dp
, uint64_t txg
)
156 char name
[KSTAT_STRLEN
];
158 list_create(&dp
->dp_txg_history
, sizeof (txg_history_t
),
159 offsetof(txg_history_t
, th_link
));
160 dsl_pool_txg_history_add(dp
, txg
);
162 (void) snprintf(name
, KSTAT_STRLEN
, "txgs-%s", spa_name(dp
->dp_spa
));
163 dp
->dp_txg_kstat
= kstat_create("zfs", 0, name
, "misc",
164 KSTAT_TYPE_TXG
, 0, KSTAT_FLAG_VIRTUAL
);
165 if (dp
->dp_txg_kstat
) {
166 dp
->dp_txg_kstat
->ks_data
= NULL
;
167 dp
->dp_txg_kstat
->ks_private
= dp
;
168 dp
->dp_txg_kstat
->ks_update
= dsl_pool_txg_history_update
;
169 kstat_install(dp
->dp_txg_kstat
);
174 dsl_pool_txg_history_destroy(dsl_pool_t
*dp
)
178 if (dp
->dp_txg_kstat
) {
179 if (dp
->dp_txg_kstat
->ks_data
)
180 kmem_free(dp
->dp_txg_kstat
->ks_data
,
181 dp
->dp_txg_kstat
->ks_data_size
);
183 kstat_delete(dp
->dp_txg_kstat
);
186 mutex_enter(&dp
->dp_lock
);
187 while ((th
= list_remove_head(&dp
->dp_txg_history
))) {
188 dp
->dp_txg_history_size
--;
189 mutex_destroy(&th
->th_lock
);
190 kmem_free(th
, sizeof(txg_history_t
));
193 ASSERT3U(dp
->dp_txg_history_size
, ==, 0);
194 list_destroy(&dp
->dp_txg_history
);
195 mutex_exit(&dp
->dp_lock
);
199 dsl_pool_txg_history_add(dsl_pool_t
*dp
, uint64_t txg
)
201 txg_history_t
*th
, *rm
;
203 th
= kmem_zalloc(sizeof(txg_history_t
), KM_PUSHPAGE
);
204 mutex_init(&th
->th_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
205 th
->th_kstat
.txg
= txg
;
206 th
->th_kstat
.state
= TXG_STATE_OPEN
;
207 th
->th_kstat
.birth
= gethrtime();
209 mutex_enter(&dp
->dp_lock
);
211 list_insert_head(&dp
->dp_txg_history
, th
);
212 dp
->dp_txg_history_size
++;
214 while (dp
->dp_txg_history_size
> zfs_txg_history
) {
215 dp
->dp_txg_history_size
--;
216 rm
= list_remove_tail(&dp
->dp_txg_history
);
217 mutex_destroy(&rm
->th_lock
);
218 kmem_free(rm
, sizeof(txg_history_t
));
221 mutex_exit(&dp
->dp_lock
);
227 * Traversed youngest to oldest because lookups are only done for open
228 * or syncing txgs which are guaranteed to be at the head of the list.
229 * The txg_history_t structure will be returned locked.
232 dsl_pool_txg_history_get(dsl_pool_t
*dp
, uint64_t txg
)
236 mutex_enter(&dp
->dp_lock
);
237 for (th
= list_head(&dp
->dp_txg_history
); th
!= NULL
;
238 th
= list_next(&dp
->dp_txg_history
, th
)) {
239 if (th
->th_kstat
.txg
== txg
) {
240 mutex_enter(&th
->th_lock
);
244 mutex_exit(&dp
->dp_lock
);
250 dsl_pool_txg_history_put(txg_history_t
*th
)
252 mutex_exit(&th
->th_lock
);
256 dsl_pool_open_special_dir(dsl_pool_t
*dp
, const char *name
, dsl_dir_t
**ddp
)
261 err
= zap_lookup(dp
->dp_meta_objset
,
262 dp
->dp_root_dir
->dd_phys
->dd_child_dir_zapobj
,
263 name
, sizeof (obj
), 1, &obj
);
267 return (dsl_dir_open_obj(dp
, obj
, name
, dp
, ddp
));
271 dsl_pool_open_impl(spa_t
*spa
, uint64_t txg
)
274 blkptr_t
*bp
= spa_get_rootblkptr(spa
);
276 dp
= kmem_zalloc(sizeof (dsl_pool_t
), KM_SLEEP
);
278 dp
->dp_meta_rootbp
= *bp
;
279 rw_init(&dp
->dp_config_rwlock
, NULL
, RW_DEFAULT
, NULL
);
280 dp
->dp_write_limit
= zfs_write_limit_min
;
283 txg_list_create(&dp
->dp_dirty_datasets
,
284 offsetof(dsl_dataset_t
, ds_dirty_link
));
285 txg_list_create(&dp
->dp_dirty_zilogs
,
286 offsetof(zilog_t
, zl_dirty_link
));
287 txg_list_create(&dp
->dp_dirty_dirs
,
288 offsetof(dsl_dir_t
, dd_dirty_link
));
289 txg_list_create(&dp
->dp_sync_tasks
,
290 offsetof(dsl_sync_task_group_t
, dstg_node
));
292 mutex_init(&dp
->dp_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
294 dp
->dp_iput_taskq
= taskq_create("zfs_iput_taskq", 1, minclsyspri
,
297 dsl_pool_txg_history_init(dp
, txg
);
298 dsl_pool_tx_assign_init(dp
, 32);
304 dsl_pool_init(spa_t
*spa
, uint64_t txg
, dsl_pool_t
**dpp
)
307 dsl_pool_t
*dp
= dsl_pool_open_impl(spa
, txg
);
309 err
= dmu_objset_open_impl(spa
, NULL
, &dp
->dp_meta_rootbp
,
310 &dp
->dp_meta_objset
);
320 dsl_pool_open(dsl_pool_t
*dp
)
327 rw_enter(&dp
->dp_config_rwlock
, RW_WRITER
);
328 err
= zap_lookup(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
329 DMU_POOL_ROOT_DATASET
, sizeof (uint64_t), 1,
330 &dp
->dp_root_dir_obj
);
334 err
= dsl_dir_open_obj(dp
, dp
->dp_root_dir_obj
,
335 NULL
, dp
, &dp
->dp_root_dir
);
339 err
= dsl_pool_open_special_dir(dp
, MOS_DIR_NAME
, &dp
->dp_mos_dir
);
343 if (spa_version(dp
->dp_spa
) >= SPA_VERSION_ORIGIN
) {
344 err
= dsl_pool_open_special_dir(dp
, ORIGIN_DIR_NAME
, &dd
);
347 err
= dsl_dataset_hold_obj(dp
, dd
->dd_phys
->dd_head_dataset_obj
,
350 err
= dsl_dataset_hold_obj(dp
,
351 ds
->ds_phys
->ds_prev_snap_obj
, dp
,
352 &dp
->dp_origin_snap
);
353 dsl_dataset_rele(ds
, FTAG
);
355 dsl_dir_close(dd
, dp
);
360 if (spa_version(dp
->dp_spa
) >= SPA_VERSION_DEADLISTS
) {
361 err
= dsl_pool_open_special_dir(dp
, FREE_DIR_NAME
,
366 err
= zap_lookup(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
367 DMU_POOL_FREE_BPOBJ
, sizeof (uint64_t), 1, &obj
);
370 VERIFY3U(0, ==, bpobj_open(&dp
->dp_free_bpobj
,
371 dp
->dp_meta_objset
, obj
));
374 if (spa_feature_is_active(dp
->dp_spa
,
375 &spa_feature_table
[SPA_FEATURE_ASYNC_DESTROY
])) {
376 err
= zap_lookup(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
377 DMU_POOL_BPTREE_OBJ
, sizeof (uint64_t), 1,
383 if (spa_feature_is_active(dp
->dp_spa
,
384 &spa_feature_table
[SPA_FEATURE_EMPTY_BPOBJ
])) {
385 err
= zap_lookup(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
386 DMU_POOL_EMPTY_BPOBJ
, sizeof (uint64_t), 1,
387 &dp
->dp_empty_bpobj
);
392 err
= zap_lookup(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
393 DMU_POOL_TMP_USERREFS
, sizeof (uint64_t), 1,
394 &dp
->dp_tmp_userrefs_obj
);
400 err
= dsl_scan_init(dp
, dp
->dp_tx
.tx_open_txg
);
403 rw_exit(&dp
->dp_config_rwlock
);
408 dsl_pool_close(dsl_pool_t
*dp
)
410 /* drop our references from dsl_pool_open() */
413 * Since we held the origin_snap from "syncing" context (which
414 * includes pool-opening context), it actually only got a "ref"
415 * and not a hold, so just drop that here.
417 if (dp
->dp_origin_snap
)
418 dsl_dataset_drop_ref(dp
->dp_origin_snap
, dp
);
420 dsl_dir_close(dp
->dp_mos_dir
, dp
);
422 dsl_dir_close(dp
->dp_free_dir
, dp
);
424 dsl_dir_close(dp
->dp_root_dir
, dp
);
426 bpobj_close(&dp
->dp_free_bpobj
);
428 /* undo the dmu_objset_open_impl(mos) from dsl_pool_open() */
429 if (dp
->dp_meta_objset
)
430 dmu_objset_evict(dp
->dp_meta_objset
);
432 txg_list_destroy(&dp
->dp_dirty_datasets
);
433 txg_list_destroy(&dp
->dp_dirty_zilogs
);
434 txg_list_destroy(&dp
->dp_sync_tasks
);
435 txg_list_destroy(&dp
->dp_dirty_dirs
);
437 arc_flush(dp
->dp_spa
);
440 dsl_pool_tx_assign_destroy(dp
);
441 dsl_pool_txg_history_destroy(dp
);
442 rw_destroy(&dp
->dp_config_rwlock
);
443 mutex_destroy(&dp
->dp_lock
);
444 taskq_destroy(dp
->dp_iput_taskq
);
446 kmem_free(dp
->dp_blkstats
, sizeof (zfs_all_blkstats_t
));
447 kmem_free(dp
, sizeof (dsl_pool_t
));
451 dsl_pool_create(spa_t
*spa
, nvlist_t
*zplprops
, uint64_t txg
)
454 dsl_pool_t
*dp
= dsl_pool_open_impl(spa
, txg
);
455 dmu_tx_t
*tx
= dmu_tx_create_assigned(dp
, txg
);
460 /* create and open the MOS (meta-objset) */
461 dp
->dp_meta_objset
= dmu_objset_create_impl(spa
,
462 NULL
, &dp
->dp_meta_rootbp
, DMU_OST_META
, tx
);
464 /* create the pool directory */
465 err
= zap_create_claim(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
466 DMU_OT_OBJECT_DIRECTORY
, DMU_OT_NONE
, 0, tx
);
469 /* Initialize scan structures */
470 VERIFY3U(0, ==, dsl_scan_init(dp
, txg
));
472 /* create and open the root dir */
473 dp
->dp_root_dir_obj
= dsl_dir_create_sync(dp
, NULL
, NULL
, tx
);
474 VERIFY(0 == dsl_dir_open_obj(dp
, dp
->dp_root_dir_obj
,
475 NULL
, dp
, &dp
->dp_root_dir
));
477 /* create and open the meta-objset dir */
478 (void) dsl_dir_create_sync(dp
, dp
->dp_root_dir
, MOS_DIR_NAME
, tx
);
479 VERIFY(0 == dsl_pool_open_special_dir(dp
,
480 MOS_DIR_NAME
, &dp
->dp_mos_dir
));
482 if (spa_version(spa
) >= SPA_VERSION_DEADLISTS
) {
483 /* create and open the free dir */
484 (void) dsl_dir_create_sync(dp
, dp
->dp_root_dir
,
486 VERIFY(0 == dsl_pool_open_special_dir(dp
,
487 FREE_DIR_NAME
, &dp
->dp_free_dir
));
489 /* create and open the free_bplist */
490 obj
= bpobj_alloc(dp
->dp_meta_objset
, SPA_MAXBLOCKSIZE
, tx
);
491 VERIFY(zap_add(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
492 DMU_POOL_FREE_BPOBJ
, sizeof (uint64_t), 1, &obj
, tx
) == 0);
493 VERIFY3U(0, ==, bpobj_open(&dp
->dp_free_bpobj
,
494 dp
->dp_meta_objset
, obj
));
497 if (spa_version(spa
) >= SPA_VERSION_DSL_SCRUB
)
498 dsl_pool_create_origin(dp
, tx
);
500 /* create the root dataset */
501 obj
= dsl_dataset_create_sync_dd(dp
->dp_root_dir
, NULL
, 0, tx
);
503 /* create the root objset */
504 VERIFY(0 == dsl_dataset_hold_obj(dp
, obj
, FTAG
, &ds
));
505 VERIFY(NULL
!= (os
= dmu_objset_create_impl(dp
->dp_spa
, ds
,
506 dsl_dataset_get_blkptr(ds
), DMU_OST_ZFS
, tx
)));
508 zfs_create_fs(os
, kcred
, zplprops
, tx
);
510 dsl_dataset_rele(ds
, FTAG
);
518 * Account for the meta-objset space in its placeholder dsl_dir.
521 dsl_pool_mos_diduse_space(dsl_pool_t
*dp
,
522 int64_t used
, int64_t comp
, int64_t uncomp
)
524 ASSERT3U(comp
, ==, uncomp
); /* it's all metadata */
525 mutex_enter(&dp
->dp_lock
);
526 dp
->dp_mos_used_delta
+= used
;
527 dp
->dp_mos_compressed_delta
+= comp
;
528 dp
->dp_mos_uncompressed_delta
+= uncomp
;
529 mutex_exit(&dp
->dp_lock
);
533 deadlist_enqueue_cb(void *arg
, const blkptr_t
*bp
, dmu_tx_t
*tx
)
535 dsl_deadlist_t
*dl
= arg
;
536 dsl_pool_t
*dp
= dmu_objset_pool(dl
->dl_os
);
537 rw_enter(&dp
->dp_config_rwlock
, RW_READER
);
538 dsl_deadlist_insert(dl
, bp
, tx
);
539 rw_exit(&dp
->dp_config_rwlock
);
544 dsl_pool_sync(dsl_pool_t
*dp
, uint64_t txg
)
550 objset_t
*mos
= dp
->dp_meta_objset
;
551 hrtime_t start
, write_time
;
552 uint64_t data_written
;
554 list_t synced_datasets
;
556 list_create(&synced_datasets
, sizeof (dsl_dataset_t
),
557 offsetof(dsl_dataset_t
, ds_synced_link
));
560 * We need to copy dp_space_towrite() before doing
561 * dsl_sync_task_group_sync(), because
562 * dsl_dataset_snapshot_reserve_space() will increase
563 * dp_space_towrite but not actually write anything.
565 data_written
= dp
->dp_space_towrite
[txg
& TXG_MASK
];
567 tx
= dmu_tx_create_assigned(dp
, txg
);
569 dp
->dp_read_overhead
= 0;
572 zio
= zio_root(dp
->dp_spa
, NULL
, NULL
, ZIO_FLAG_MUSTSUCCEED
);
573 while ((ds
= txg_list_remove(&dp
->dp_dirty_datasets
, txg
))) {
575 * We must not sync any non-MOS datasets twice, because
576 * we may have taken a snapshot of them. However, we
577 * may sync newly-created datasets on pass 2.
579 ASSERT(!list_link_active(&ds
->ds_synced_link
));
580 list_insert_tail(&synced_datasets
, ds
);
581 dsl_dataset_sync(ds
, zio
, tx
);
583 DTRACE_PROBE(pool_sync__1setup
);
586 write_time
= gethrtime() - start
;
588 DTRACE_PROBE(pool_sync__2rootzio
);
591 * After the data blocks have been written (ensured by the zio_wait()
592 * above), update the user/group space accounting.
594 for (ds
= list_head(&synced_datasets
); ds
;
595 ds
= list_next(&synced_datasets
, ds
))
596 dmu_objset_do_userquota_updates(ds
->ds_objset
, tx
);
599 * Sync the datasets again to push out the changes due to
600 * userspace updates. This must be done before we process the
601 * sync tasks, so that any snapshots will have the correct
602 * user accounting information (and we won't get confused
603 * about which blocks are part of the snapshot).
605 zio
= zio_root(dp
->dp_spa
, NULL
, NULL
, ZIO_FLAG_MUSTSUCCEED
);
606 while ((ds
= txg_list_remove(&dp
->dp_dirty_datasets
, txg
))) {
607 ASSERT(list_link_active(&ds
->ds_synced_link
));
608 dmu_buf_rele(ds
->ds_dbuf
, ds
);
609 dsl_dataset_sync(ds
, zio
, tx
);
614 * Now that the datasets have been completely synced, we can
615 * clean up our in-memory structures accumulated while syncing:
617 * - move dead blocks from the pending deadlist to the on-disk deadlist
618 * - clean up zil records
619 * - release hold from dsl_dataset_dirty()
621 while ((ds
= list_remove_head(&synced_datasets
))) {
622 ASSERTV(objset_t
*os
= ds
->ds_objset
);
623 bplist_iterate(&ds
->ds_pending_deadlist
,
624 deadlist_enqueue_cb
, &ds
->ds_deadlist
, tx
);
625 ASSERT(!dmu_objset_is_dirty(os
, txg
));
626 dmu_buf_rele(ds
->ds_dbuf
, ds
);
630 while ((dd
= txg_list_remove(&dp
->dp_dirty_dirs
, txg
)))
631 dsl_dir_sync(dd
, tx
);
632 write_time
+= gethrtime() - start
;
635 * The MOS's space is accounted for in the pool/$MOS
636 * (dp_mos_dir). We can't modify the mos while we're syncing
637 * it, so we remember the deltas and apply them here.
639 if (dp
->dp_mos_used_delta
!= 0 || dp
->dp_mos_compressed_delta
!= 0 ||
640 dp
->dp_mos_uncompressed_delta
!= 0) {
641 dsl_dir_diduse_space(dp
->dp_mos_dir
, DD_USED_HEAD
,
642 dp
->dp_mos_used_delta
,
643 dp
->dp_mos_compressed_delta
,
644 dp
->dp_mos_uncompressed_delta
, tx
);
645 dp
->dp_mos_used_delta
= 0;
646 dp
->dp_mos_compressed_delta
= 0;
647 dp
->dp_mos_uncompressed_delta
= 0;
651 if (list_head(&mos
->os_dirty_dnodes
[txg
& TXG_MASK
]) != NULL
||
652 list_head(&mos
->os_free_dnodes
[txg
& TXG_MASK
]) != NULL
) {
653 zio
= zio_root(dp
->dp_spa
, NULL
, NULL
, ZIO_FLAG_MUSTSUCCEED
);
654 dmu_objset_sync(mos
, zio
, tx
);
657 dprintf_bp(&dp
->dp_meta_rootbp
, "meta objset rootbp is %s", "");
658 spa_set_rootblkptr(dp
->dp_spa
, &dp
->dp_meta_rootbp
);
660 write_time
+= gethrtime() - start
;
661 DTRACE_PROBE2(pool_sync__4io
, hrtime_t
, write_time
,
662 hrtime_t
, dp
->dp_read_overhead
);
663 write_time
-= dp
->dp_read_overhead
;
666 * If we modify a dataset in the same txg that we want to destroy it,
667 * its dsl_dir's dd_dbuf will be dirty, and thus have a hold on it.
668 * dsl_dir_destroy_check() will fail if there are unexpected holds.
669 * Therefore, we want to sync the MOS (thus syncing the dd_dbuf
670 * and clearing the hold on it) before we process the sync_tasks.
671 * The MOS data dirtied by the sync_tasks will be synced on the next
674 DTRACE_PROBE(pool_sync__3task
);
675 if (!txg_list_empty(&dp
->dp_sync_tasks
, txg
)) {
676 dsl_sync_task_group_t
*dstg
;
678 * No more sync tasks should have been added while we
681 ASSERT(spa_sync_pass(dp
->dp_spa
) == 1);
682 while ((dstg
= txg_list_remove(&dp
->dp_sync_tasks
, txg
)))
683 dsl_sync_task_group_sync(dstg
, tx
);
688 dp
->dp_space_towrite
[txg
& TXG_MASK
] = 0;
689 ASSERT(dp
->dp_tempreserved
[txg
& TXG_MASK
] == 0);
692 * If the write limit max has not been explicitly set, set it
693 * to a fraction of available physical memory (default 1/8th).
694 * Note that we must inflate the limit because the spa
695 * inflates write sizes to account for data replication.
696 * Check this each sync phase to catch changing memory size.
698 if (physmem
!= old_physmem
&& zfs_write_limit_shift
) {
699 mutex_enter(&zfs_write_limit_lock
);
700 old_physmem
= physmem
;
701 zfs_write_limit_max
= ptob(physmem
) >> zfs_write_limit_shift
;
702 zfs_write_limit_inflated
= MAX(zfs_write_limit_min
,
703 spa_get_asize(dp
->dp_spa
, zfs_write_limit_max
));
704 mutex_exit(&zfs_write_limit_lock
);
708 * Attempt to keep the sync time consistent by adjusting the
709 * amount of write traffic allowed into each transaction group.
710 * Weight the throughput calculation towards the current value:
711 * thru = 3/4 old_thru + 1/4 new_thru
713 * Note: write_time is in nanosecs, so write_time/MICROSEC
716 ASSERT(zfs_write_limit_min
> 0);
717 if (data_written
> zfs_write_limit_min
/ 8 && write_time
> MICROSEC
) {
718 uint64_t throughput
= data_written
/ (write_time
/ MICROSEC
);
720 if (dp
->dp_throughput
)
721 dp
->dp_throughput
= throughput
/ 4 +
722 3 * dp
->dp_throughput
/ 4;
724 dp
->dp_throughput
= throughput
;
725 dp
->dp_write_limit
= MIN(zfs_write_limit_inflated
,
726 MAX(zfs_write_limit_min
,
727 dp
->dp_throughput
* zfs_txg_synctime_ms
));
732 dsl_pool_sync_done(dsl_pool_t
*dp
, uint64_t txg
)
737 while ((zilog
= txg_list_remove(&dp
->dp_dirty_zilogs
, txg
))) {
738 ds
= dmu_objset_ds(zilog
->zl_os
);
739 zil_clean(zilog
, txg
);
740 ASSERT(!dmu_objset_is_dirty(zilog
->zl_os
, txg
));
741 dmu_buf_rele(ds
->ds_dbuf
, zilog
);
743 ASSERT(!dmu_objset_is_dirty(dp
->dp_meta_objset
, txg
));
747 * TRUE if the current thread is the tx_sync_thread or if we
748 * are being called from SPA context during pool initialization.
751 dsl_pool_sync_context(dsl_pool_t
*dp
)
753 return (curthread
== dp
->dp_tx
.tx_sync_thread
||
754 spa_is_initializing(dp
->dp_spa
));
758 dsl_pool_adjustedsize(dsl_pool_t
*dp
, boolean_t netfree
)
760 uint64_t space
, resv
;
763 * Reserve about 1.6% (1/64), or at least 32MB, for allocation
765 * XXX The intent log is not accounted for, so it must fit
768 * If we're trying to assess whether it's OK to do a free,
769 * cut the reservation in half to allow forward progress
770 * (e.g. make it possible to rm(1) files from a full pool).
772 space
= spa_get_dspace(dp
->dp_spa
);
773 resv
= MAX(space
>> 6, SPA_MINDEVSIZE
>> 1);
777 return (space
- resv
);
781 dsl_pool_tempreserve_space(dsl_pool_t
*dp
, uint64_t space
, dmu_tx_t
*tx
)
783 uint64_t reserved
= 0;
784 uint64_t write_limit
= (zfs_write_limit_override
?
785 zfs_write_limit_override
: dp
->dp_write_limit
);
787 if (zfs_no_write_throttle
) {
788 atomic_add_64(&dp
->dp_tempreserved
[tx
->tx_txg
& TXG_MASK
],
794 * Check to see if we have exceeded the maximum allowed IO for
795 * this transaction group. We can do this without locks since
796 * a little slop here is ok. Note that we do the reserved check
797 * with only half the requested reserve: this is because the
798 * reserve requests are worst-case, and we really don't want to
799 * throttle based off of worst-case estimates.
801 if (write_limit
> 0) {
802 reserved
= dp
->dp_space_towrite
[tx
->tx_txg
& TXG_MASK
]
803 + dp
->dp_tempreserved
[tx
->tx_txg
& TXG_MASK
] / 2;
805 if (reserved
&& reserved
> write_limit
) {
806 DMU_TX_STAT_BUMP(dmu_tx_write_limit
);
811 atomic_add_64(&dp
->dp_tempreserved
[tx
->tx_txg
& TXG_MASK
], space
);
814 * If this transaction group is over 7/8ths capacity, delay
815 * the caller 1 clock tick. This will slow down the "fill"
816 * rate until the sync process can catch up with us.
818 if (reserved
&& reserved
> (write_limit
- (write_limit
>> 3)))
819 txg_delay(dp
, tx
->tx_txg
, 1);
825 dsl_pool_tempreserve_clear(dsl_pool_t
*dp
, int64_t space
, dmu_tx_t
*tx
)
827 ASSERT(dp
->dp_tempreserved
[tx
->tx_txg
& TXG_MASK
] >= space
);
828 atomic_add_64(&dp
->dp_tempreserved
[tx
->tx_txg
& TXG_MASK
], -space
);
832 dsl_pool_memory_pressure(dsl_pool_t
*dp
)
834 uint64_t space_inuse
= 0;
837 if (dp
->dp_write_limit
== zfs_write_limit_min
)
840 for (i
= 0; i
< TXG_SIZE
; i
++) {
841 space_inuse
+= dp
->dp_space_towrite
[i
];
842 space_inuse
+= dp
->dp_tempreserved
[i
];
844 dp
->dp_write_limit
= MAX(zfs_write_limit_min
,
845 MIN(dp
->dp_write_limit
, space_inuse
/ 4));
849 dsl_pool_willuse_space(dsl_pool_t
*dp
, int64_t space
, dmu_tx_t
*tx
)
852 mutex_enter(&dp
->dp_lock
);
853 dp
->dp_space_towrite
[tx
->tx_txg
& TXG_MASK
] += space
;
854 mutex_exit(&dp
->dp_lock
);
860 upgrade_clones_cb(spa_t
*spa
, uint64_t dsobj
, const char *dsname
, void *arg
)
863 dsl_dataset_t
*ds
, *prev
= NULL
;
865 dsl_pool_t
*dp
= spa_get_dsl(spa
);
867 err
= dsl_dataset_hold_obj(dp
, dsobj
, FTAG
, &ds
);
871 while (ds
->ds_phys
->ds_prev_snap_obj
!= 0) {
872 err
= dsl_dataset_hold_obj(dp
, ds
->ds_phys
->ds_prev_snap_obj
,
875 dsl_dataset_rele(ds
, FTAG
);
879 if (prev
->ds_phys
->ds_next_snap_obj
!= ds
->ds_object
)
881 dsl_dataset_rele(ds
, FTAG
);
887 prev
= dp
->dp_origin_snap
;
890 * The $ORIGIN can't have any data, or the accounting
893 ASSERT(prev
->ds_phys
->ds_bp
.blk_birth
== 0);
895 /* The origin doesn't get attached to itself */
896 if (ds
->ds_object
== prev
->ds_object
) {
897 dsl_dataset_rele(ds
, FTAG
);
901 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
902 ds
->ds_phys
->ds_prev_snap_obj
= prev
->ds_object
;
903 ds
->ds_phys
->ds_prev_snap_txg
= prev
->ds_phys
->ds_creation_txg
;
905 dmu_buf_will_dirty(ds
->ds_dir
->dd_dbuf
, tx
);
906 ds
->ds_dir
->dd_phys
->dd_origin_obj
= prev
->ds_object
;
908 dmu_buf_will_dirty(prev
->ds_dbuf
, tx
);
909 prev
->ds_phys
->ds_num_children
++;
911 if (ds
->ds_phys
->ds_next_snap_obj
== 0) {
912 ASSERT(ds
->ds_prev
== NULL
);
913 VERIFY(0 == dsl_dataset_hold_obj(dp
,
914 ds
->ds_phys
->ds_prev_snap_obj
, ds
, &ds
->ds_prev
));
918 ASSERT(ds
->ds_dir
->dd_phys
->dd_origin_obj
== prev
->ds_object
);
919 ASSERT(ds
->ds_phys
->ds_prev_snap_obj
== prev
->ds_object
);
921 if (prev
->ds_phys
->ds_next_clones_obj
== 0) {
922 dmu_buf_will_dirty(prev
->ds_dbuf
, tx
);
923 prev
->ds_phys
->ds_next_clones_obj
=
924 zap_create(dp
->dp_meta_objset
,
925 DMU_OT_NEXT_CLONES
, DMU_OT_NONE
, 0, tx
);
927 VERIFY(0 == zap_add_int(dp
->dp_meta_objset
,
928 prev
->ds_phys
->ds_next_clones_obj
, ds
->ds_object
, tx
));
930 dsl_dataset_rele(ds
, FTAG
);
931 if (prev
!= dp
->dp_origin_snap
)
932 dsl_dataset_rele(prev
, FTAG
);
937 dsl_pool_upgrade_clones(dsl_pool_t
*dp
, dmu_tx_t
*tx
)
939 ASSERT(dmu_tx_is_syncing(tx
));
940 ASSERT(dp
->dp_origin_snap
!= NULL
);
942 VERIFY3U(0, ==, dmu_objset_find_spa(dp
->dp_spa
, NULL
, upgrade_clones_cb
,
943 tx
, DS_FIND_CHILDREN
));
948 upgrade_dir_clones_cb(spa_t
*spa
, uint64_t dsobj
, const char *dsname
, void *arg
)
952 dsl_pool_t
*dp
= spa_get_dsl(spa
);
953 objset_t
*mos
= dp
->dp_meta_objset
;
955 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp
, dsobj
, FTAG
, &ds
));
957 if (ds
->ds_dir
->dd_phys
->dd_origin_obj
) {
958 dsl_dataset_t
*origin
;
960 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp
,
961 ds
->ds_dir
->dd_phys
->dd_origin_obj
, FTAG
, &origin
));
963 if (origin
->ds_dir
->dd_phys
->dd_clones
== 0) {
964 dmu_buf_will_dirty(origin
->ds_dir
->dd_dbuf
, tx
);
965 origin
->ds_dir
->dd_phys
->dd_clones
= zap_create(mos
,
966 DMU_OT_DSL_CLONES
, DMU_OT_NONE
, 0, tx
);
969 VERIFY3U(0, ==, zap_add_int(dp
->dp_meta_objset
,
970 origin
->ds_dir
->dd_phys
->dd_clones
, dsobj
, tx
));
972 dsl_dataset_rele(origin
, FTAG
);
975 dsl_dataset_rele(ds
, FTAG
);
980 dsl_pool_upgrade_dir_clones(dsl_pool_t
*dp
, dmu_tx_t
*tx
)
984 ASSERT(dmu_tx_is_syncing(tx
));
986 (void) dsl_dir_create_sync(dp
, dp
->dp_root_dir
, FREE_DIR_NAME
, tx
);
987 VERIFY(0 == dsl_pool_open_special_dir(dp
,
988 FREE_DIR_NAME
, &dp
->dp_free_dir
));
991 * We can't use bpobj_alloc(), because spa_version() still
992 * returns the old version, and we need a new-version bpobj with
993 * subobj support. So call dmu_object_alloc() directly.
995 obj
= dmu_object_alloc(dp
->dp_meta_objset
, DMU_OT_BPOBJ
,
996 SPA_MAXBLOCKSIZE
, DMU_OT_BPOBJ_HDR
, sizeof (bpobj_phys_t
), tx
);
997 VERIFY3U(0, ==, zap_add(dp
->dp_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
998 DMU_POOL_FREE_BPOBJ
, sizeof (uint64_t), 1, &obj
, tx
));
999 VERIFY3U(0, ==, bpobj_open(&dp
->dp_free_bpobj
,
1000 dp
->dp_meta_objset
, obj
));
1002 VERIFY3U(0, ==, dmu_objset_find_spa(dp
->dp_spa
, NULL
,
1003 upgrade_dir_clones_cb
, tx
, DS_FIND_CHILDREN
));
1007 dsl_pool_create_origin(dsl_pool_t
*dp
, dmu_tx_t
*tx
)
1012 ASSERT(dmu_tx_is_syncing(tx
));
1013 ASSERT(dp
->dp_origin_snap
== NULL
);
1015 /* create the origin dir, ds, & snap-ds */
1016 rw_enter(&dp
->dp_config_rwlock
, RW_WRITER
);
1017 dsobj
= dsl_dataset_create_sync(dp
->dp_root_dir
, ORIGIN_DIR_NAME
,
1018 NULL
, 0, kcred
, tx
);
1019 VERIFY(0 == dsl_dataset_hold_obj(dp
, dsobj
, FTAG
, &ds
));
1020 dsl_dataset_snapshot_sync(ds
, ORIGIN_DIR_NAME
, tx
);
1021 VERIFY(0 == dsl_dataset_hold_obj(dp
, ds
->ds_phys
->ds_prev_snap_obj
,
1022 dp
, &dp
->dp_origin_snap
));
1023 dsl_dataset_rele(ds
, FTAG
);
1024 rw_exit(&dp
->dp_config_rwlock
);
1028 dsl_pool_iput_taskq(dsl_pool_t
*dp
)
1030 return (dp
->dp_iput_taskq
);
1034 * Walk through the pool-wide zap object of temporary snapshot user holds
1038 dsl_pool_clean_tmp_userrefs(dsl_pool_t
*dp
)
1042 objset_t
*mos
= dp
->dp_meta_objset
;
1043 uint64_t zapobj
= dp
->dp_tmp_userrefs_obj
;
1047 ASSERT(spa_version(dp
->dp_spa
) >= SPA_VERSION_USERREFS
);
1049 for (zap_cursor_init(&zc
, mos
, zapobj
);
1050 zap_cursor_retrieve(&zc
, &za
) == 0;
1051 zap_cursor_advance(&zc
)) {
1055 htag
= strchr(za
.za_name
, '-');
1058 dsobj
= strtonum(za
.za_name
, NULL
);
1059 (void) dsl_dataset_user_release_tmp(dp
, dsobj
, htag
, B_FALSE
);
1061 zap_cursor_fini(&zc
);
1065 * Create the pool-wide zap object for storing temporary snapshot holds.
1068 dsl_pool_user_hold_create_obj(dsl_pool_t
*dp
, dmu_tx_t
*tx
)
1070 objset_t
*mos
= dp
->dp_meta_objset
;
1072 ASSERT(dp
->dp_tmp_userrefs_obj
== 0);
1073 ASSERT(dmu_tx_is_syncing(tx
));
1075 dp
->dp_tmp_userrefs_obj
= zap_create_link(mos
, DMU_OT_USERREFS
,
1076 DMU_POOL_DIRECTORY_OBJECT
, DMU_POOL_TMP_USERREFS
, tx
);
1080 dsl_pool_user_hold_rele_impl(dsl_pool_t
*dp
, uint64_t dsobj
,
1081 const char *tag
, uint64_t *now
, dmu_tx_t
*tx
, boolean_t holding
)
1083 objset_t
*mos
= dp
->dp_meta_objset
;
1084 uint64_t zapobj
= dp
->dp_tmp_userrefs_obj
;
1088 ASSERT(spa_version(dp
->dp_spa
) >= SPA_VERSION_USERREFS
);
1089 ASSERT(dmu_tx_is_syncing(tx
));
1092 * If the pool was created prior to SPA_VERSION_USERREFS, the
1093 * zap object for temporary holds might not exist yet.
1097 dsl_pool_user_hold_create_obj(dp
, tx
);
1098 zapobj
= dp
->dp_tmp_userrefs_obj
;
1104 name
= kmem_asprintf("%llx-%s", (u_longlong_t
)dsobj
, tag
);
1106 error
= zap_add(mos
, zapobj
, name
, 8, 1, now
, tx
);
1108 error
= zap_remove(mos
, zapobj
, name
, tx
);
1115 * Add a temporary hold for the given dataset object and tag.
1118 dsl_pool_user_hold(dsl_pool_t
*dp
, uint64_t dsobj
, const char *tag
,
1119 uint64_t *now
, dmu_tx_t
*tx
)
1121 return (dsl_pool_user_hold_rele_impl(dp
, dsobj
, tag
, now
, tx
, B_TRUE
));
1125 * Release a temporary hold for the given dataset object and tag.
1128 dsl_pool_user_release(dsl_pool_t
*dp
, uint64_t dsobj
, const char *tag
,
1131 return (dsl_pool_user_hold_rele_impl(dp
, dsobj
, tag
, NULL
,
1135 #if defined(_KERNEL) && defined(HAVE_SPL)
1136 module_param(zfs_no_write_throttle
, int, 0644);
1137 MODULE_PARM_DESC(zfs_no_write_throttle
, "Disable write throttling");
1139 module_param(zfs_write_limit_shift
, int, 0444);
1140 MODULE_PARM_DESC(zfs_write_limit_shift
, "log2(fraction of memory) per txg");
1142 module_param(zfs_txg_synctime_ms
, int, 0644);
1143 MODULE_PARM_DESC(zfs_txg_synctime_ms
, "Target milliseconds between txg sync");
1145 module_param(zfs_txg_history
, int, 0644);
1146 MODULE_PARM_DESC(zfs_txg_history
, "Historic statistics for the last N txgs");
1148 module_param(zfs_write_limit_min
, ulong
, 0444);
1149 MODULE_PARM_DESC(zfs_write_limit_min
, "Min txg write limit");
1151 module_param(zfs_write_limit_max
, ulong
, 0444);
1152 MODULE_PARM_DESC(zfs_write_limit_max
, "Max txg write limit");
1154 module_param(zfs_write_limit_inflated
, ulong
, 0444);
1155 MODULE_PARM_DESC(zfs_write_limit_inflated
, "Inflated txg write limit");
1157 module_param(zfs_write_limit_override
, ulong
, 0444);
1158 MODULE_PARM_DESC(zfs_write_limit_override
, "Override txg write limit");