]> git.proxmox.com Git - mirror_zfs-debian.git/blame - module/zfs/dsl_pool.c
Imported Upstream version 0.6.2
[mirror_zfs-debian.git] / module / zfs / dsl_pool.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
428870ff 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
9ae529ec 23 * Copyright (c) 2012 by Delphix. All rights reserved.
34dc7c2f
BB
24 */
25
34dc7c2f
BB
26#include <sys/dsl_pool.h>
27#include <sys/dsl_dataset.h>
428870ff 28#include <sys/dsl_prop.h>
34dc7c2f
BB
29#include <sys/dsl_dir.h>
30#include <sys/dsl_synctask.h>
428870ff
BB
31#include <sys/dsl_scan.h>
32#include <sys/dnode.h>
34dc7c2f
BB
33#include <sys/dmu_tx.h>
34#include <sys/dmu_objset.h>
35#include <sys/arc.h>
36#include <sys/zap.h>
37#include <sys/zio.h>
38#include <sys/zfs_context.h>
39#include <sys/fs/zfs.h>
b128c09f
BB
40#include <sys/zfs_znode.h>
41#include <sys/spa_impl.h>
428870ff 42#include <sys/dsl_deadlist.h>
9ae529ec
CS
43#include <sys/bptree.h>
44#include <sys/zfeature.h>
29809a6c 45#include <sys/zil_impl.h>
34dc7c2f
BB
46
47int zfs_no_write_throttle = 0;
b128c09f 48int zfs_write_limit_shift = 3; /* 1/8th of physical memory */
572e2857 49int zfs_txg_synctime_ms = 1000; /* target millisecs to sync a txg */
e95853a3 50int zfs_txg_history = 60; /* statistics for the last N txgs */
b128c09f 51
c409e464
BB
52unsigned long zfs_write_limit_min = 32 << 20; /* min write limit is 32MB */
53unsigned long zfs_write_limit_max = 0; /* max data payload per txg */
54unsigned long zfs_write_limit_inflated = 0;
55unsigned long zfs_write_limit_override = 0;
34dc7c2f 56
b128c09f
BB
57kmutex_t zfs_write_limit_lock;
58
59static pgcnt_t old_physmem = 0;
60
c06d4368
AX
61static void
62dsl_pool_tx_assign_init(dsl_pool_t *dp, unsigned int ndata)
63{
64 kstat_named_t *ks;
65 char name[KSTAT_STRLEN];
66 int i, data_size = ndata * sizeof(kstat_named_t);
67
68 (void) snprintf(name, KSTAT_STRLEN, "dmu_tx_assign-%s",
69 spa_name(dp->dp_spa));
70
71 dp->dp_tx_assign_size = ndata;
72
73 if (data_size)
74 dp->dp_tx_assign_buckets = kmem_alloc(data_size, KM_SLEEP);
75 else
76 dp->dp_tx_assign_buckets = NULL;
77
78 for (i = 0; i < dp->dp_tx_assign_size; i++) {
79 ks = &dp->dp_tx_assign_buckets[i];
80 ks->data_type = KSTAT_DATA_UINT64;
81 ks->value.ui64 = 0;
82 (void) snprintf(ks->name, KSTAT_STRLEN, "%u us", 1 << i);
83 }
84
85 dp->dp_tx_assign_kstat = kstat_create("zfs", 0, name, "misc",
86 KSTAT_TYPE_NAMED, 0, KSTAT_FLAG_VIRTUAL);
87
88 if (dp->dp_tx_assign_kstat) {
89 dp->dp_tx_assign_kstat->ks_data = dp->dp_tx_assign_buckets;
90 dp->dp_tx_assign_kstat->ks_ndata = dp->dp_tx_assign_size;
91 dp->dp_tx_assign_kstat->ks_data_size = data_size;
92 kstat_install(dp->dp_tx_assign_kstat);
93 }
94}
95
96static void
97dsl_pool_tx_assign_destroy(dsl_pool_t *dp)
98{
99 if (dp->dp_tx_assign_buckets)
100 kmem_free(dp->dp_tx_assign_buckets,
101 dp->dp_tx_assign_size * sizeof(kstat_named_t));
102
103 if (dp->dp_tx_assign_kstat)
104 kstat_delete(dp->dp_tx_assign_kstat);
105}
106
107void
108dsl_pool_tx_assign_add_usecs(dsl_pool_t *dp, uint64_t usecs)
109{
110 uint64_t idx = 0;
111
112 while (((1 << idx) < usecs) && (idx < dp->dp_tx_assign_size - 1))
113 idx++;
114
115 atomic_inc_64(&dp->dp_tx_assign_buckets[idx].value.ui64);
116}
117
e95853a3
BB
118static int
119dsl_pool_txg_history_update(kstat_t *ksp, int rw)
120{
121 dsl_pool_t *dp = ksp->ks_private;
122 txg_history_t *th;
123 int i = 0;
124
125 if (rw == KSTAT_WRITE)
126 return (EACCES);
127
128 if (ksp->ks_data)
129 kmem_free(ksp->ks_data, ksp->ks_data_size);
130
131 mutex_enter(&dp->dp_lock);
132
133 ksp->ks_ndata = dp->dp_txg_history_size;
134 ksp->ks_data_size = dp->dp_txg_history_size * sizeof(kstat_txg_t);
135 if (ksp->ks_data_size > 0)
136 ksp->ks_data = kmem_alloc(ksp->ks_data_size, KM_PUSHPAGE);
137
138 /* Traversed oldest to youngest for the most readable kstat output */
139 for (th = list_tail(&dp->dp_txg_history); th != NULL;
140 th = list_prev(&dp->dp_txg_history, th)) {
141 mutex_enter(&th->th_lock);
142 ASSERT3S(i + sizeof(kstat_txg_t), <=, ksp->ks_data_size);
143 memcpy(ksp->ks_data + i, &th->th_kstat, sizeof(kstat_txg_t));
144 i += sizeof(kstat_txg_t);
145 mutex_exit(&th->th_lock);
146 }
147
148 mutex_exit(&dp->dp_lock);
149
150 return (0);
151}
152
153static void
154dsl_pool_txg_history_init(dsl_pool_t *dp, uint64_t txg)
155{
156 char name[KSTAT_STRLEN];
157
158 list_create(&dp->dp_txg_history, sizeof (txg_history_t),
159 offsetof(txg_history_t, th_link));
160 dsl_pool_txg_history_add(dp, txg);
161
162 (void) snprintf(name, KSTAT_STRLEN, "txgs-%s", spa_name(dp->dp_spa));
163 dp->dp_txg_kstat = kstat_create("zfs", 0, name, "misc",
164 KSTAT_TYPE_TXG, 0, KSTAT_FLAG_VIRTUAL);
165 if (dp->dp_txg_kstat) {
166 dp->dp_txg_kstat->ks_data = NULL;
167 dp->dp_txg_kstat->ks_private = dp;
168 dp->dp_txg_kstat->ks_update = dsl_pool_txg_history_update;
169 kstat_install(dp->dp_txg_kstat);
170 }
171}
172
173static void
174dsl_pool_txg_history_destroy(dsl_pool_t *dp)
175{
176 txg_history_t *th;
177
178 if (dp->dp_txg_kstat) {
179 if (dp->dp_txg_kstat->ks_data)
180 kmem_free(dp->dp_txg_kstat->ks_data,
181 dp->dp_txg_kstat->ks_data_size);
182
183 kstat_delete(dp->dp_txg_kstat);
184 }
185
186 mutex_enter(&dp->dp_lock);
187 while ((th = list_remove_head(&dp->dp_txg_history))) {
188 dp->dp_txg_history_size--;
189 mutex_destroy(&th->th_lock);
190 kmem_free(th, sizeof(txg_history_t));
191 }
192
193 ASSERT3U(dp->dp_txg_history_size, ==, 0);
194 list_destroy(&dp->dp_txg_history);
195 mutex_exit(&dp->dp_lock);
196}
197
198txg_history_t *
199dsl_pool_txg_history_add(dsl_pool_t *dp, uint64_t txg)
200{
201 txg_history_t *th, *rm;
202
c06d4368 203 th = kmem_zalloc(sizeof(txg_history_t), KM_PUSHPAGE);
e95853a3
BB
204 mutex_init(&th->th_lock, NULL, MUTEX_DEFAULT, NULL);
205 th->th_kstat.txg = txg;
206 th->th_kstat.state = TXG_STATE_OPEN;
207 th->th_kstat.birth = gethrtime();
208
209 mutex_enter(&dp->dp_lock);
210
211 list_insert_head(&dp->dp_txg_history, th);
212 dp->dp_txg_history_size++;
213
214 while (dp->dp_txg_history_size > zfs_txg_history) {
215 dp->dp_txg_history_size--;
216 rm = list_remove_tail(&dp->dp_txg_history);
217 mutex_destroy(&rm->th_lock);
218 kmem_free(rm, sizeof(txg_history_t));
219 }
220
221 mutex_exit(&dp->dp_lock);
222
223 return (th);
224}
225
226/*
227 * Traversed youngest to oldest because lookups are only done for open
228 * or syncing txgs which are guaranteed to be at the head of the list.
229 * The txg_history_t structure will be returned locked.
230 */
231txg_history_t *
232dsl_pool_txg_history_get(dsl_pool_t *dp, uint64_t txg)
233{
234 txg_history_t *th;
235
236 mutex_enter(&dp->dp_lock);
237 for (th = list_head(&dp->dp_txg_history); th != NULL;
238 th = list_next(&dp->dp_txg_history, th)) {
239 if (th->th_kstat.txg == txg) {
240 mutex_enter(&th->th_lock);
241 break;
242 }
243 }
244 mutex_exit(&dp->dp_lock);
245
246 return (th);
247}
248
249void
250dsl_pool_txg_history_put(txg_history_t *th)
251{
252 mutex_exit(&th->th_lock);
253}
254
428870ff 255int
b128c09f 256dsl_pool_open_special_dir(dsl_pool_t *dp, const char *name, dsl_dir_t **ddp)
34dc7c2f
BB
257{
258 uint64_t obj;
259 int err;
260
261 err = zap_lookup(dp->dp_meta_objset,
262 dp->dp_root_dir->dd_phys->dd_child_dir_zapobj,
b128c09f 263 name, sizeof (obj), 1, &obj);
34dc7c2f
BB
264 if (err)
265 return (err);
266
b128c09f 267 return (dsl_dir_open_obj(dp, obj, name, dp, ddp));
34dc7c2f
BB
268}
269
270static dsl_pool_t *
271dsl_pool_open_impl(spa_t *spa, uint64_t txg)
272{
273 dsl_pool_t *dp;
274 blkptr_t *bp = spa_get_rootblkptr(spa);
34dc7c2f
BB
275
276 dp = kmem_zalloc(sizeof (dsl_pool_t), KM_SLEEP);
277 dp->dp_spa = spa;
278 dp->dp_meta_rootbp = *bp;
279 rw_init(&dp->dp_config_rwlock, NULL, RW_DEFAULT, NULL);
280 dp->dp_write_limit = zfs_write_limit_min;
281 txg_init(dp, txg);
282
283 txg_list_create(&dp->dp_dirty_datasets,
284 offsetof(dsl_dataset_t, ds_dirty_link));
29809a6c
MA
285 txg_list_create(&dp->dp_dirty_zilogs,
286 offsetof(zilog_t, zl_dirty_link));
34dc7c2f
BB
287 txg_list_create(&dp->dp_dirty_dirs,
288 offsetof(dsl_dir_t, dd_dirty_link));
289 txg_list_create(&dp->dp_sync_tasks,
290 offsetof(dsl_sync_task_group_t, dstg_node));
34dc7c2f
BB
291
292 mutex_init(&dp->dp_lock, NULL, MUTEX_DEFAULT, NULL);
293
3558fd73 294 dp->dp_iput_taskq = taskq_create("zfs_iput_taskq", 1, minclsyspri,
9babb374
BB
295 1, 4, 0);
296
e95853a3 297 dsl_pool_txg_history_init(dp, txg);
c06d4368 298 dsl_pool_tx_assign_init(dp, 32);
e95853a3 299
34dc7c2f
BB
300 return (dp);
301}
302
303int
9ae529ec 304dsl_pool_init(spa_t *spa, uint64_t txg, dsl_pool_t **dpp)
34dc7c2f
BB
305{
306 int err;
307 dsl_pool_t *dp = dsl_pool_open_impl(spa, txg);
9ae529ec
CS
308
309 err = dmu_objset_open_impl(spa, NULL, &dp->dp_meta_rootbp,
310 &dp->dp_meta_objset);
311 if (err != 0)
312 dsl_pool_close(dp);
313 else
314 *dpp = dp;
315
316 return (err);
317}
318
319int
320dsl_pool_open(dsl_pool_t *dp)
321{
322 int err;
b128c09f
BB
323 dsl_dir_t *dd;
324 dsl_dataset_t *ds;
428870ff 325 uint64_t obj;
34dc7c2f 326
b128c09f 327 rw_enter(&dp->dp_config_rwlock, RW_WRITER);
34dc7c2f
BB
328 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
329 DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1,
330 &dp->dp_root_dir_obj);
331 if (err)
332 goto out;
333
334 err = dsl_dir_open_obj(dp, dp->dp_root_dir_obj,
335 NULL, dp, &dp->dp_root_dir);
336 if (err)
337 goto out;
338
b128c09f 339 err = dsl_pool_open_special_dir(dp, MOS_DIR_NAME, &dp->dp_mos_dir);
34dc7c2f
BB
340 if (err)
341 goto out;
342
9ae529ec 343 if (spa_version(dp->dp_spa) >= SPA_VERSION_ORIGIN) {
b128c09f
BB
344 err = dsl_pool_open_special_dir(dp, ORIGIN_DIR_NAME, &dd);
345 if (err)
346 goto out;
347 err = dsl_dataset_hold_obj(dp, dd->dd_phys->dd_head_dataset_obj,
348 FTAG, &ds);
9babb374
BB
349 if (err == 0) {
350 err = dsl_dataset_hold_obj(dp,
351 ds->ds_phys->ds_prev_snap_obj, dp,
352 &dp->dp_origin_snap);
353 dsl_dataset_rele(ds, FTAG);
354 }
355 dsl_dir_close(dd, dp);
b128c09f
BB
356 if (err)
357 goto out;
b128c09f
BB
358 }
359
9ae529ec 360 if (spa_version(dp->dp_spa) >= SPA_VERSION_DEADLISTS) {
428870ff
BB
361 err = dsl_pool_open_special_dir(dp, FREE_DIR_NAME,
362 &dp->dp_free_dir);
b128c09f
BB
363 if (err)
364 goto out;
428870ff 365
b128c09f 366 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
428870ff 367 DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj);
b128c09f
BB
368 if (err)
369 goto out;
428870ff
BB
370 VERIFY3U(0, ==, bpobj_open(&dp->dp_free_bpobj,
371 dp->dp_meta_objset, obj));
b128c09f
BB
372 }
373
9ae529ec
CS
374 if (spa_feature_is_active(dp->dp_spa,
375 &spa_feature_table[SPA_FEATURE_ASYNC_DESTROY])) {
376 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
377 DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1,
378 &dp->dp_bptree_obj);
379 if (err != 0)
380 goto out;
381 }
382
753c3839
MA
383 if (spa_feature_is_active(dp->dp_spa,
384 &spa_feature_table[SPA_FEATURE_EMPTY_BPOBJ])) {
385 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
386 DMU_POOL_EMPTY_BPOBJ, sizeof (uint64_t), 1,
387 &dp->dp_empty_bpobj);
388 if (err != 0)
389 goto out;
390 }
391
428870ff
BB
392 err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
393 DMU_POOL_TMP_USERREFS, sizeof (uint64_t), 1,
394 &dp->dp_tmp_userrefs_obj);
395 if (err == ENOENT)
396 err = 0;
397 if (err)
398 goto out;
399
9ae529ec 400 err = dsl_scan_init(dp, dp->dp_tx.tx_open_txg);
428870ff 401
34dc7c2f
BB
402out:
403 rw_exit(&dp->dp_config_rwlock);
34dc7c2f
BB
404 return (err);
405}
406
407void
408dsl_pool_close(dsl_pool_t *dp)
409{
b128c09f
BB
410 /* drop our references from dsl_pool_open() */
411
412 /*
413 * Since we held the origin_snap from "syncing" context (which
414 * includes pool-opening context), it actually only got a "ref"
415 * and not a hold, so just drop that here.
416 */
417 if (dp->dp_origin_snap)
418 dsl_dataset_drop_ref(dp->dp_origin_snap, dp);
34dc7c2f
BB
419 if (dp->dp_mos_dir)
420 dsl_dir_close(dp->dp_mos_dir, dp);
428870ff
BB
421 if (dp->dp_free_dir)
422 dsl_dir_close(dp->dp_free_dir, dp);
34dc7c2f
BB
423 if (dp->dp_root_dir)
424 dsl_dir_close(dp->dp_root_dir, dp);
425
428870ff
BB
426 bpobj_close(&dp->dp_free_bpobj);
427
34dc7c2f
BB
428 /* undo the dmu_objset_open_impl(mos) from dsl_pool_open() */
429 if (dp->dp_meta_objset)
428870ff 430 dmu_objset_evict(dp->dp_meta_objset);
34dc7c2f
BB
431
432 txg_list_destroy(&dp->dp_dirty_datasets);
29809a6c 433 txg_list_destroy(&dp->dp_dirty_zilogs);
428870ff 434 txg_list_destroy(&dp->dp_sync_tasks);
34dc7c2f 435 txg_list_destroy(&dp->dp_dirty_dirs);
34dc7c2f
BB
436
437 arc_flush(dp->dp_spa);
438 txg_fini(dp);
428870ff 439 dsl_scan_fini(dp);
c06d4368 440 dsl_pool_tx_assign_destroy(dp);
e95853a3 441 dsl_pool_txg_history_destroy(dp);
34dc7c2f
BB
442 rw_destroy(&dp->dp_config_rwlock);
443 mutex_destroy(&dp->dp_lock);
3558fd73 444 taskq_destroy(dp->dp_iput_taskq);
b128c09f
BB
445 if (dp->dp_blkstats)
446 kmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t));
34dc7c2f
BB
447 kmem_free(dp, sizeof (dsl_pool_t));
448}
449
450dsl_pool_t *
b128c09f 451dsl_pool_create(spa_t *spa, nvlist_t *zplprops, uint64_t txg)
34dc7c2f
BB
452{
453 int err;
454 dsl_pool_t *dp = dsl_pool_open_impl(spa, txg);
455 dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg);
428870ff 456 objset_t *os;
b128c09f 457 dsl_dataset_t *ds;
428870ff 458 uint64_t obj;
b128c09f
BB
459
460 /* create and open the MOS (meta-objset) */
428870ff
BB
461 dp->dp_meta_objset = dmu_objset_create_impl(spa,
462 NULL, &dp->dp_meta_rootbp, DMU_OST_META, tx);
34dc7c2f
BB
463
464 /* create the pool directory */
465 err = zap_create_claim(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
466 DMU_OT_OBJECT_DIRECTORY, DMU_OT_NONE, 0, tx);
c06d4368 467 ASSERT0(err);
34dc7c2f 468
428870ff
BB
469 /* Initialize scan structures */
470 VERIFY3U(0, ==, dsl_scan_init(dp, txg));
471
34dc7c2f 472 /* create and open the root dir */
b128c09f 473 dp->dp_root_dir_obj = dsl_dir_create_sync(dp, NULL, NULL, tx);
34dc7c2f
BB
474 VERIFY(0 == dsl_dir_open_obj(dp, dp->dp_root_dir_obj,
475 NULL, dp, &dp->dp_root_dir));
476
477 /* create and open the meta-objset dir */
b128c09f
BB
478 (void) dsl_dir_create_sync(dp, dp->dp_root_dir, MOS_DIR_NAME, tx);
479 VERIFY(0 == dsl_pool_open_special_dir(dp,
480 MOS_DIR_NAME, &dp->dp_mos_dir));
481
428870ff
BB
482 if (spa_version(spa) >= SPA_VERSION_DEADLISTS) {
483 /* create and open the free dir */
484 (void) dsl_dir_create_sync(dp, dp->dp_root_dir,
485 FREE_DIR_NAME, tx);
486 VERIFY(0 == dsl_pool_open_special_dir(dp,
487 FREE_DIR_NAME, &dp->dp_free_dir));
488
489 /* create and open the free_bplist */
490 obj = bpobj_alloc(dp->dp_meta_objset, SPA_MAXBLOCKSIZE, tx);
491 VERIFY(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
492 DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx) == 0);
493 VERIFY3U(0, ==, bpobj_open(&dp->dp_free_bpobj,
494 dp->dp_meta_objset, obj));
495 }
496
b128c09f
BB
497 if (spa_version(spa) >= SPA_VERSION_DSL_SCRUB)
498 dsl_pool_create_origin(dp, tx);
499
500 /* create the root dataset */
428870ff 501 obj = dsl_dataset_create_sync_dd(dp->dp_root_dir, NULL, 0, tx);
b128c09f
BB
502
503 /* create the root objset */
428870ff 504 VERIFY(0 == dsl_dataset_hold_obj(dp, obj, FTAG, &ds));
0fe3d820
BB
505 VERIFY(NULL != (os = dmu_objset_create_impl(dp->dp_spa, ds,
506 dsl_dataset_get_blkptr(ds), DMU_OST_ZFS, tx)));
b128c09f 507#ifdef _KERNEL
428870ff 508 zfs_create_fs(os, kcred, zplprops, tx);
b128c09f
BB
509#endif
510 dsl_dataset_rele(ds, FTAG);
34dc7c2f
BB
511
512 dmu_tx_commit(tx);
513
514 return (dp);
515}
516
29809a6c
MA
517/*
518 * Account for the meta-objset space in its placeholder dsl_dir.
519 */
520void
521dsl_pool_mos_diduse_space(dsl_pool_t *dp,
522 int64_t used, int64_t comp, int64_t uncomp)
523{
524 ASSERT3U(comp, ==, uncomp); /* it's all metadata */
525 mutex_enter(&dp->dp_lock);
526 dp->dp_mos_used_delta += used;
527 dp->dp_mos_compressed_delta += comp;
528 dp->dp_mos_uncompressed_delta += uncomp;
529 mutex_exit(&dp->dp_lock);
530}
531
428870ff
BB
532static int
533deadlist_enqueue_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
534{
535 dsl_deadlist_t *dl = arg;
330d06f9
MA
536 dsl_pool_t *dp = dmu_objset_pool(dl->dl_os);
537 rw_enter(&dp->dp_config_rwlock, RW_READER);
428870ff 538 dsl_deadlist_insert(dl, bp, tx);
330d06f9 539 rw_exit(&dp->dp_config_rwlock);
428870ff
BB
540 return (0);
541}
542
34dc7c2f
BB
543void
544dsl_pool_sync(dsl_pool_t *dp, uint64_t txg)
545{
546 zio_t *zio;
547 dmu_tx_t *tx;
548 dsl_dir_t *dd;
549 dsl_dataset_t *ds;
428870ff 550 objset_t *mos = dp->dp_meta_objset;
b128c09f
BB
551 hrtime_t start, write_time;
552 uint64_t data_written;
34dc7c2f 553 int err;
29809a6c
MA
554 list_t synced_datasets;
555
556 list_create(&synced_datasets, sizeof (dsl_dataset_t),
557 offsetof(dsl_dataset_t, ds_synced_link));
34dc7c2f 558
428870ff
BB
559 /*
560 * We need to copy dp_space_towrite() before doing
561 * dsl_sync_task_group_sync(), because
562 * dsl_dataset_snapshot_reserve_space() will increase
563 * dp_space_towrite but not actually write anything.
564 */
565 data_written = dp->dp_space_towrite[txg & TXG_MASK];
566
34dc7c2f
BB
567 tx = dmu_tx_create_assigned(dp, txg);
568
b128c09f 569 dp->dp_read_overhead = 0;
9babb374
BB
570 start = gethrtime();
571
34dc7c2f 572 zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
c65aa5b2 573 while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg))) {
9babb374
BB
574 /*
575 * We must not sync any non-MOS datasets twice, because
576 * we may have taken a snapshot of them. However, we
577 * may sync newly-created datasets on pass 2.
578 */
579 ASSERT(!list_link_active(&ds->ds_synced_link));
29809a6c 580 list_insert_tail(&synced_datasets, ds);
34dc7c2f
BB
581 dsl_dataset_sync(ds, zio, tx);
582 }
b128c09f 583 DTRACE_PROBE(pool_sync__1setup);
34dc7c2f 584 err = zio_wait(zio);
9babb374 585
b128c09f 586 write_time = gethrtime() - start;
34dc7c2f 587 ASSERT(err == 0);
b128c09f 588 DTRACE_PROBE(pool_sync__2rootzio);
34dc7c2f 589
29809a6c
MA
590 /*
591 * After the data blocks have been written (ensured by the zio_wait()
592 * above), update the user/group space accounting.
593 */
594 for (ds = list_head(&synced_datasets); ds;
595 ds = list_next(&synced_datasets, ds))
428870ff 596 dmu_objset_do_userquota_updates(ds->ds_objset, tx);
9babb374
BB
597
598 /*
599 * Sync the datasets again to push out the changes due to
428870ff 600 * userspace updates. This must be done before we process the
29809a6c
MA
601 * sync tasks, so that any snapshots will have the correct
602 * user accounting information (and we won't get confused
603 * about which blocks are part of the snapshot).
9babb374
BB
604 */
605 zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
c65aa5b2 606 while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg))) {
9babb374
BB
607 ASSERT(list_link_active(&ds->ds_synced_link));
608 dmu_buf_rele(ds->ds_dbuf, ds);
609 dsl_dataset_sync(ds, zio, tx);
610 }
611 err = zio_wait(zio);
612
428870ff 613 /*
29809a6c
MA
614 * Now that the datasets have been completely synced, we can
615 * clean up our in-memory structures accumulated while syncing:
616 *
617 * - move dead blocks from the pending deadlist to the on-disk deadlist
618 * - clean up zil records
619 * - release hold from dsl_dataset_dirty()
428870ff 620 */
29809a6c
MA
621 while ((ds = list_remove_head(&synced_datasets))) {
622 ASSERTV(objset_t *os = ds->ds_objset);
428870ff
BB
623 bplist_iterate(&ds->ds_pending_deadlist,
624 deadlist_enqueue_cb, &ds->ds_deadlist, tx);
29809a6c
MA
625 ASSERT(!dmu_objset_is_dirty(os, txg));
626 dmu_buf_rele(ds->ds_dbuf, ds);
428870ff
BB
627 }
628
b128c09f 629 start = gethrtime();
c65aa5b2 630 while ((dd = txg_list_remove(&dp->dp_dirty_dirs, txg)))
34dc7c2f 631 dsl_dir_sync(dd, tx);
b128c09f
BB
632 write_time += gethrtime() - start;
633
29809a6c
MA
634 /*
635 * The MOS's space is accounted for in the pool/$MOS
636 * (dp_mos_dir). We can't modify the mos while we're syncing
637 * it, so we remember the deltas and apply them here.
638 */
639 if (dp->dp_mos_used_delta != 0 || dp->dp_mos_compressed_delta != 0 ||
640 dp->dp_mos_uncompressed_delta != 0) {
641 dsl_dir_diduse_space(dp->dp_mos_dir, DD_USED_HEAD,
642 dp->dp_mos_used_delta,
643 dp->dp_mos_compressed_delta,
644 dp->dp_mos_uncompressed_delta, tx);
645 dp->dp_mos_used_delta = 0;
646 dp->dp_mos_compressed_delta = 0;
647 dp->dp_mos_uncompressed_delta = 0;
648 }
649
b128c09f 650 start = gethrtime();
428870ff
BB
651 if (list_head(&mos->os_dirty_dnodes[txg & TXG_MASK]) != NULL ||
652 list_head(&mos->os_free_dnodes[txg & TXG_MASK]) != NULL) {
34dc7c2f 653 zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
428870ff 654 dmu_objset_sync(mos, zio, tx);
34dc7c2f
BB
655 err = zio_wait(zio);
656 ASSERT(err == 0);
657 dprintf_bp(&dp->dp_meta_rootbp, "meta objset rootbp is %s", "");
658 spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp);
659 }
b128c09f
BB
660 write_time += gethrtime() - start;
661 DTRACE_PROBE2(pool_sync__4io, hrtime_t, write_time,
662 hrtime_t, dp->dp_read_overhead);
663 write_time -= dp->dp_read_overhead;
34dc7c2f 664
29809a6c
MA
665 /*
666 * If we modify a dataset in the same txg that we want to destroy it,
667 * its dsl_dir's dd_dbuf will be dirty, and thus have a hold on it.
668 * dsl_dir_destroy_check() will fail if there are unexpected holds.
669 * Therefore, we want to sync the MOS (thus syncing the dd_dbuf
670 * and clearing the hold on it) before we process the sync_tasks.
671 * The MOS data dirtied by the sync_tasks will be synced on the next
672 * pass.
673 */
674 DTRACE_PROBE(pool_sync__3task);
675 if (!txg_list_empty(&dp->dp_sync_tasks, txg)) {
676 dsl_sync_task_group_t *dstg;
677 /*
678 * No more sync tasks should have been added while we
679 * were syncing.
680 */
681 ASSERT(spa_sync_pass(dp->dp_spa) == 1);
682 while ((dstg = txg_list_remove(&dp->dp_sync_tasks, txg)))
683 dsl_sync_task_group_sync(dstg, tx);
684 }
685
34dc7c2f 686 dmu_tx_commit(tx);
b128c09f 687
b128c09f
BB
688 dp->dp_space_towrite[txg & TXG_MASK] = 0;
689 ASSERT(dp->dp_tempreserved[txg & TXG_MASK] == 0);
690
691 /*
692 * If the write limit max has not been explicitly set, set it
693 * to a fraction of available physical memory (default 1/8th).
694 * Note that we must inflate the limit because the spa
695 * inflates write sizes to account for data replication.
696 * Check this each sync phase to catch changing memory size.
697 */
698 if (physmem != old_physmem && zfs_write_limit_shift) {
699 mutex_enter(&zfs_write_limit_lock);
700 old_physmem = physmem;
701 zfs_write_limit_max = ptob(physmem) >> zfs_write_limit_shift;
702 zfs_write_limit_inflated = MAX(zfs_write_limit_min,
703 spa_get_asize(dp->dp_spa, zfs_write_limit_max));
704 mutex_exit(&zfs_write_limit_lock);
705 }
706
707 /*
708 * Attempt to keep the sync time consistent by adjusting the
709 * amount of write traffic allowed into each transaction group.
710 * Weight the throughput calculation towards the current value:
711 * thru = 3/4 old_thru + 1/4 new_thru
428870ff
BB
712 *
713 * Note: write_time is in nanosecs, so write_time/MICROSEC
714 * yields millisecs
b128c09f
BB
715 */
716 ASSERT(zfs_write_limit_min > 0);
428870ff
BB
717 if (data_written > zfs_write_limit_min / 8 && write_time > MICROSEC) {
718 uint64_t throughput = data_written / (write_time / MICROSEC);
719
b128c09f
BB
720 if (dp->dp_throughput)
721 dp->dp_throughput = throughput / 4 +
722 3 * dp->dp_throughput / 4;
723 else
724 dp->dp_throughput = throughput;
725 dp->dp_write_limit = MIN(zfs_write_limit_inflated,
726 MAX(zfs_write_limit_min,
428870ff 727 dp->dp_throughput * zfs_txg_synctime_ms));
b128c09f 728 }
34dc7c2f
BB
729}
730
731void
428870ff 732dsl_pool_sync_done(dsl_pool_t *dp, uint64_t txg)
34dc7c2f 733{
29809a6c 734 zilog_t *zilog;
34dc7c2f
BB
735 dsl_dataset_t *ds;
736
29809a6c
MA
737 while ((zilog = txg_list_remove(&dp->dp_dirty_zilogs, txg))) {
738 ds = dmu_objset_ds(zilog->zl_os);
739 zil_clean(zilog, txg);
740 ASSERT(!dmu_objset_is_dirty(zilog->zl_os, txg));
741 dmu_buf_rele(ds->ds_dbuf, zilog);
34dc7c2f 742 }
428870ff 743 ASSERT(!dmu_objset_is_dirty(dp->dp_meta_objset, txg));
34dc7c2f
BB
744}
745
746/*
747 * TRUE if the current thread is the tx_sync_thread or if we
748 * are being called from SPA context during pool initialization.
749 */
750int
751dsl_pool_sync_context(dsl_pool_t *dp)
752{
753 return (curthread == dp->dp_tx.tx_sync_thread ||
9ae529ec 754 spa_is_initializing(dp->dp_spa));
34dc7c2f
BB
755}
756
757uint64_t
758dsl_pool_adjustedsize(dsl_pool_t *dp, boolean_t netfree)
759{
760 uint64_t space, resv;
761
762 /*
763 * Reserve about 1.6% (1/64), or at least 32MB, for allocation
764 * efficiency.
765 * XXX The intent log is not accounted for, so it must fit
766 * within this slop.
767 *
768 * If we're trying to assess whether it's OK to do a free,
769 * cut the reservation in half to allow forward progress
770 * (e.g. make it possible to rm(1) files from a full pool).
771 */
772 space = spa_get_dspace(dp->dp_spa);
773 resv = MAX(space >> 6, SPA_MINDEVSIZE >> 1);
774 if (netfree)
775 resv >>= 1;
776
777 return (space - resv);
778}
779
780int
781dsl_pool_tempreserve_space(dsl_pool_t *dp, uint64_t space, dmu_tx_t *tx)
782{
783 uint64_t reserved = 0;
784 uint64_t write_limit = (zfs_write_limit_override ?
785 zfs_write_limit_override : dp->dp_write_limit);
786
787 if (zfs_no_write_throttle) {
b128c09f
BB
788 atomic_add_64(&dp->dp_tempreserved[tx->tx_txg & TXG_MASK],
789 space);
34dc7c2f
BB
790 return (0);
791 }
792
793 /*
794 * Check to see if we have exceeded the maximum allowed IO for
795 * this transaction group. We can do this without locks since
796 * a little slop here is ok. Note that we do the reserved check
797 * with only half the requested reserve: this is because the
798 * reserve requests are worst-case, and we really don't want to
799 * throttle based off of worst-case estimates.
800 */
801 if (write_limit > 0) {
802 reserved = dp->dp_space_towrite[tx->tx_txg & TXG_MASK]
803 + dp->dp_tempreserved[tx->tx_txg & TXG_MASK] / 2;
804
570827e1
BB
805 if (reserved && reserved > write_limit) {
806 DMU_TX_STAT_BUMP(dmu_tx_write_limit);
34dc7c2f 807 return (ERESTART);
570827e1 808 }
34dc7c2f
BB
809 }
810
811 atomic_add_64(&dp->dp_tempreserved[tx->tx_txg & TXG_MASK], space);
812
813 /*
814 * If this transaction group is over 7/8ths capacity, delay
815 * the caller 1 clock tick. This will slow down the "fill"
816 * rate until the sync process can catch up with us.
817 */
b128c09f 818 if (reserved && reserved > (write_limit - (write_limit >> 3)))
34dc7c2f
BB
819 txg_delay(dp, tx->tx_txg, 1);
820
821 return (0);
822}
823
824void
825dsl_pool_tempreserve_clear(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx)
826{
827 ASSERT(dp->dp_tempreserved[tx->tx_txg & TXG_MASK] >= space);
828 atomic_add_64(&dp->dp_tempreserved[tx->tx_txg & TXG_MASK], -space);
829}
830
831void
832dsl_pool_memory_pressure(dsl_pool_t *dp)
833{
34dc7c2f
BB
834 uint64_t space_inuse = 0;
835 int i;
836
837 if (dp->dp_write_limit == zfs_write_limit_min)
838 return;
839
840 for (i = 0; i < TXG_SIZE; i++) {
841 space_inuse += dp->dp_space_towrite[i];
842 space_inuse += dp->dp_tempreserved[i];
843 }
844 dp->dp_write_limit = MAX(zfs_write_limit_min,
845 MIN(dp->dp_write_limit, space_inuse / 4));
846}
847
848void
849dsl_pool_willuse_space(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx)
850{
851 if (space > 0) {
852 mutex_enter(&dp->dp_lock);
853 dp->dp_space_towrite[tx->tx_txg & TXG_MASK] += space;
854 mutex_exit(&dp->dp_lock);
855 }
856}
b128c09f
BB
857
858/* ARGSUSED */
859static int
860upgrade_clones_cb(spa_t *spa, uint64_t dsobj, const char *dsname, void *arg)
861{
862 dmu_tx_t *tx = arg;
863 dsl_dataset_t *ds, *prev = NULL;
864 int err;
865 dsl_pool_t *dp = spa_get_dsl(spa);
866
867 err = dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds);
868 if (err)
869 return (err);
870
871 while (ds->ds_phys->ds_prev_snap_obj != 0) {
872 err = dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj,
873 FTAG, &prev);
874 if (err) {
875 dsl_dataset_rele(ds, FTAG);
876 return (err);
877 }
878
879 if (prev->ds_phys->ds_next_snap_obj != ds->ds_object)
880 break;
881 dsl_dataset_rele(ds, FTAG);
882 ds = prev;
883 prev = NULL;
884 }
885
886 if (prev == NULL) {
887 prev = dp->dp_origin_snap;
888
889 /*
890 * The $ORIGIN can't have any data, or the accounting
891 * will be wrong.
892 */
893 ASSERT(prev->ds_phys->ds_bp.blk_birth == 0);
894
895 /* The origin doesn't get attached to itself */
896 if (ds->ds_object == prev->ds_object) {
897 dsl_dataset_rele(ds, FTAG);
898 return (0);
899 }
900
901 dmu_buf_will_dirty(ds->ds_dbuf, tx);
902 ds->ds_phys->ds_prev_snap_obj = prev->ds_object;
903 ds->ds_phys->ds_prev_snap_txg = prev->ds_phys->ds_creation_txg;
904
905 dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
906 ds->ds_dir->dd_phys->dd_origin_obj = prev->ds_object;
907
908 dmu_buf_will_dirty(prev->ds_dbuf, tx);
909 prev->ds_phys->ds_num_children++;
910
911 if (ds->ds_phys->ds_next_snap_obj == 0) {
912 ASSERT(ds->ds_prev == NULL);
913 VERIFY(0 == dsl_dataset_hold_obj(dp,
914 ds->ds_phys->ds_prev_snap_obj, ds, &ds->ds_prev));
915 }
916 }
917
918 ASSERT(ds->ds_dir->dd_phys->dd_origin_obj == prev->ds_object);
919 ASSERT(ds->ds_phys->ds_prev_snap_obj == prev->ds_object);
920
921 if (prev->ds_phys->ds_next_clones_obj == 0) {
428870ff 922 dmu_buf_will_dirty(prev->ds_dbuf, tx);
b128c09f
BB
923 prev->ds_phys->ds_next_clones_obj =
924 zap_create(dp->dp_meta_objset,
925 DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
926 }
927 VERIFY(0 == zap_add_int(dp->dp_meta_objset,
928 prev->ds_phys->ds_next_clones_obj, ds->ds_object, tx));
929
930 dsl_dataset_rele(ds, FTAG);
931 if (prev != dp->dp_origin_snap)
932 dsl_dataset_rele(prev, FTAG);
933 return (0);
934}
935
936void
937dsl_pool_upgrade_clones(dsl_pool_t *dp, dmu_tx_t *tx)
938{
939 ASSERT(dmu_tx_is_syncing(tx));
940 ASSERT(dp->dp_origin_snap != NULL);
941
428870ff
BB
942 VERIFY3U(0, ==, dmu_objset_find_spa(dp->dp_spa, NULL, upgrade_clones_cb,
943 tx, DS_FIND_CHILDREN));
944}
945
946/* ARGSUSED */
947static int
948upgrade_dir_clones_cb(spa_t *spa, uint64_t dsobj, const char *dsname, void *arg)
949{
950 dmu_tx_t *tx = arg;
951 dsl_dataset_t *ds;
952 dsl_pool_t *dp = spa_get_dsl(spa);
953 objset_t *mos = dp->dp_meta_objset;
954
955 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
956
957 if (ds->ds_dir->dd_phys->dd_origin_obj) {
958 dsl_dataset_t *origin;
959
960 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp,
961 ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &origin));
962
963 if (origin->ds_dir->dd_phys->dd_clones == 0) {
964 dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx);
965 origin->ds_dir->dd_phys->dd_clones = zap_create(mos,
966 DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
967 }
968
969 VERIFY3U(0, ==, zap_add_int(dp->dp_meta_objset,
970 origin->ds_dir->dd_phys->dd_clones, dsobj, tx));
971
972 dsl_dataset_rele(origin, FTAG);
973 }
974
975 dsl_dataset_rele(ds, FTAG);
976 return (0);
977}
978
979void
980dsl_pool_upgrade_dir_clones(dsl_pool_t *dp, dmu_tx_t *tx)
981{
428870ff
BB
982 uint64_t obj;
983
d6320ddb
BB
984 ASSERT(dmu_tx_is_syncing(tx));
985
428870ff
BB
986 (void) dsl_dir_create_sync(dp, dp->dp_root_dir, FREE_DIR_NAME, tx);
987 VERIFY(0 == dsl_pool_open_special_dir(dp,
988 FREE_DIR_NAME, &dp->dp_free_dir));
989
990 /*
991 * We can't use bpobj_alloc(), because spa_version() still
992 * returns the old version, and we need a new-version bpobj with
993 * subobj support. So call dmu_object_alloc() directly.
994 */
995 obj = dmu_object_alloc(dp->dp_meta_objset, DMU_OT_BPOBJ,
996 SPA_MAXBLOCKSIZE, DMU_OT_BPOBJ_HDR, sizeof (bpobj_phys_t), tx);
997 VERIFY3U(0, ==, zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
998 DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx));
999 VERIFY3U(0, ==, bpobj_open(&dp->dp_free_bpobj,
1000 dp->dp_meta_objset, obj));
1001
1002 VERIFY3U(0, ==, dmu_objset_find_spa(dp->dp_spa, NULL,
1003 upgrade_dir_clones_cb, tx, DS_FIND_CHILDREN));
b128c09f
BB
1004}
1005
1006void
1007dsl_pool_create_origin(dsl_pool_t *dp, dmu_tx_t *tx)
1008{
1009 uint64_t dsobj;
1010 dsl_dataset_t *ds;
1011
1012 ASSERT(dmu_tx_is_syncing(tx));
1013 ASSERT(dp->dp_origin_snap == NULL);
1014
1015 /* create the origin dir, ds, & snap-ds */
1016 rw_enter(&dp->dp_config_rwlock, RW_WRITER);
1017 dsobj = dsl_dataset_create_sync(dp->dp_root_dir, ORIGIN_DIR_NAME,
1018 NULL, 0, kcred, tx);
1019 VERIFY(0 == dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
428870ff 1020 dsl_dataset_snapshot_sync(ds, ORIGIN_DIR_NAME, tx);
b128c09f
BB
1021 VERIFY(0 == dsl_dataset_hold_obj(dp, ds->ds_phys->ds_prev_snap_obj,
1022 dp, &dp->dp_origin_snap));
1023 dsl_dataset_rele(ds, FTAG);
1024 rw_exit(&dp->dp_config_rwlock);
1025}
9babb374
BB
1026
1027taskq_t *
3558fd73 1028dsl_pool_iput_taskq(dsl_pool_t *dp)
9babb374 1029{
3558fd73 1030 return (dp->dp_iput_taskq);
9babb374 1031}
428870ff
BB
1032
1033/*
1034 * Walk through the pool-wide zap object of temporary snapshot user holds
1035 * and release them.
1036 */
1037void
1038dsl_pool_clean_tmp_userrefs(dsl_pool_t *dp)
1039{
1040 zap_attribute_t za;
1041 zap_cursor_t zc;
1042 objset_t *mos = dp->dp_meta_objset;
1043 uint64_t zapobj = dp->dp_tmp_userrefs_obj;
1044
1045 if (zapobj == 0)
1046 return;
1047 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
1048
1049 for (zap_cursor_init(&zc, mos, zapobj);
1050 zap_cursor_retrieve(&zc, &za) == 0;
1051 zap_cursor_advance(&zc)) {
1052 char *htag;
1053 uint64_t dsobj;
1054
1055 htag = strchr(za.za_name, '-');
1056 *htag = '\0';
1057 ++htag;
1058 dsobj = strtonum(za.za_name, NULL);
572e2857 1059 (void) dsl_dataset_user_release_tmp(dp, dsobj, htag, B_FALSE);
428870ff
BB
1060 }
1061 zap_cursor_fini(&zc);
1062}
1063
1064/*
1065 * Create the pool-wide zap object for storing temporary snapshot holds.
1066 */
1067void
1068dsl_pool_user_hold_create_obj(dsl_pool_t *dp, dmu_tx_t *tx)
1069{
1070 objset_t *mos = dp->dp_meta_objset;
1071
1072 ASSERT(dp->dp_tmp_userrefs_obj == 0);
1073 ASSERT(dmu_tx_is_syncing(tx));
1074
9ae529ec
CS
1075 dp->dp_tmp_userrefs_obj = zap_create_link(mos, DMU_OT_USERREFS,
1076 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_TMP_USERREFS, tx);
428870ff
BB
1077}
1078
1079static int
1080dsl_pool_user_hold_rele_impl(dsl_pool_t *dp, uint64_t dsobj,
1081 const char *tag, uint64_t *now, dmu_tx_t *tx, boolean_t holding)
1082{
1083 objset_t *mos = dp->dp_meta_objset;
1084 uint64_t zapobj = dp->dp_tmp_userrefs_obj;
1085 char *name;
1086 int error;
1087
1088 ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
1089 ASSERT(dmu_tx_is_syncing(tx));
1090
1091 /*
1092 * If the pool was created prior to SPA_VERSION_USERREFS, the
1093 * zap object for temporary holds might not exist yet.
1094 */
1095 if (zapobj == 0) {
1096 if (holding) {
1097 dsl_pool_user_hold_create_obj(dp, tx);
1098 zapobj = dp->dp_tmp_userrefs_obj;
1099 } else {
1100 return (ENOENT);
1101 }
1102 }
1103
1104 name = kmem_asprintf("%llx-%s", (u_longlong_t)dsobj, tag);
1105 if (holding)
1106 error = zap_add(mos, zapobj, name, 8, 1, now, tx);
1107 else
1108 error = zap_remove(mos, zapobj, name, tx);
1109 strfree(name);
1110
1111 return (error);
1112}
1113
1114/*
1115 * Add a temporary hold for the given dataset object and tag.
1116 */
1117int
1118dsl_pool_user_hold(dsl_pool_t *dp, uint64_t dsobj, const char *tag,
1119 uint64_t *now, dmu_tx_t *tx)
1120{
1121 return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, now, tx, B_TRUE));
1122}
1123
1124/*
1125 * Release a temporary hold for the given dataset object and tag.
1126 */
1127int
1128dsl_pool_user_release(dsl_pool_t *dp, uint64_t dsobj, const char *tag,
1129 dmu_tx_t *tx)
1130{
1131 return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, NULL,
1132 tx, B_FALSE));
1133}
c409e464
BB
1134
1135#if defined(_KERNEL) && defined(HAVE_SPL)
1136module_param(zfs_no_write_throttle, int, 0644);
1137MODULE_PARM_DESC(zfs_no_write_throttle, "Disable write throttling");
1138
1139module_param(zfs_write_limit_shift, int, 0444);
1140MODULE_PARM_DESC(zfs_write_limit_shift, "log2(fraction of memory) per txg");
1141
1142module_param(zfs_txg_synctime_ms, int, 0644);
5b7e5b5a 1143MODULE_PARM_DESC(zfs_txg_synctime_ms, "Target milliseconds between txg sync");
c409e464 1144
e95853a3
BB
1145module_param(zfs_txg_history, int, 0644);
1146MODULE_PARM_DESC(zfs_txg_history, "Historic statistics for the last N txgs");
1147
c409e464 1148module_param(zfs_write_limit_min, ulong, 0444);
5b7e5b5a 1149MODULE_PARM_DESC(zfs_write_limit_min, "Min txg write limit");
c409e464
BB
1150
1151module_param(zfs_write_limit_max, ulong, 0444);
5b7e5b5a 1152MODULE_PARM_DESC(zfs_write_limit_max, "Max txg write limit");
c409e464
BB
1153
1154module_param(zfs_write_limit_inflated, ulong, 0444);
5b7e5b5a 1155MODULE_PARM_DESC(zfs_write_limit_inflated, "Inflated txg write limit");
c409e464
BB
1156
1157module_param(zfs_write_limit_override, ulong, 0444);
5b7e5b5a 1158MODULE_PARM_DESC(zfs_write_limit_override, "Override txg write limit");
c409e464 1159#endif