]>
Commit | Line | Data |
---|---|---|
34dc7c2f BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
428870ff | 22 | * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. |
64fc7762 | 23 | * Copyright (c) 2011, 2017 by Delphix. All rights reserved. |
95fd54a1 | 24 | * Copyright (c) 2013 Steven Hartland. All rights reserved. |
0c66c32d | 25 | * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. |
539d33c7 | 26 | * Copyright 2016 Nexenta Systems, Inc. All rights reserved. |
34dc7c2f BB |
27 | */ |
28 | ||
34dc7c2f BB |
29 | #include <sys/dsl_pool.h> |
30 | #include <sys/dsl_dataset.h> | |
428870ff | 31 | #include <sys/dsl_prop.h> |
34dc7c2f BB |
32 | #include <sys/dsl_dir.h> |
33 | #include <sys/dsl_synctask.h> | |
428870ff BB |
34 | #include <sys/dsl_scan.h> |
35 | #include <sys/dnode.h> | |
34dc7c2f BB |
36 | #include <sys/dmu_tx.h> |
37 | #include <sys/dmu_objset.h> | |
38 | #include <sys/arc.h> | |
39 | #include <sys/zap.h> | |
40 | #include <sys/zio.h> | |
41 | #include <sys/zfs_context.h> | |
42 | #include <sys/fs/zfs.h> | |
b128c09f BB |
43 | #include <sys/zfs_znode.h> |
44 | #include <sys/spa_impl.h> | |
428870ff | 45 | #include <sys/dsl_deadlist.h> |
d2734cce SD |
46 | #include <sys/vdev_impl.h> |
47 | #include <sys/metaslab_impl.h> | |
9ae529ec CS |
48 | #include <sys/bptree.h> |
49 | #include <sys/zfeature.h> | |
29809a6c | 50 | #include <sys/zil_impl.h> |
13fe0198 | 51 | #include <sys/dsl_userhold.h> |
49ee64e5 | 52 | #include <sys/trace_txg.h> |
379ca9cf | 53 | #include <sys/mmp.h> |
34dc7c2f | 54 | |
e8b96c60 MA |
55 | /* |
56 | * ZFS Write Throttle | |
57 | * ------------------ | |
58 | * | |
59 | * ZFS must limit the rate of incoming writes to the rate at which it is able | |
60 | * to sync data modifications to the backend storage. Throttling by too much | |
61 | * creates an artificial limit; throttling by too little can only be sustained | |
62 | * for short periods and would lead to highly lumpy performance. On a per-pool | |
63 | * basis, ZFS tracks the amount of modified (dirty) data. As operations change | |
64 | * data, the amount of dirty data increases; as ZFS syncs out data, the amount | |
65 | * of dirty data decreases. When the amount of dirty data exceeds a | |
66 | * predetermined threshold further modifications are blocked until the amount | |
67 | * of dirty data decreases (as data is synced out). | |
68 | * | |
69 | * The limit on dirty data is tunable, and should be adjusted according to | |
70 | * both the IO capacity and available memory of the system. The larger the | |
71 | * window, the more ZFS is able to aggregate and amortize metadata (and data) | |
72 | * changes. However, memory is a limited resource, and allowing for more dirty | |
73 | * data comes at the cost of keeping other useful data in memory (for example | |
74 | * ZFS data cached by the ARC). | |
75 | * | |
76 | * Implementation | |
77 | * | |
78 | * As buffers are modified dsl_pool_willuse_space() increments both the per- | |
79 | * txg (dp_dirty_pertxg[]) and poolwide (dp_dirty_total) accounting of | |
80 | * dirty space used; dsl_pool_dirty_space() decrements those values as data | |
81 | * is synced out from dsl_pool_sync(). While only the poolwide value is | |
82 | * relevant, the per-txg value is useful for debugging. The tunable | |
83 | * zfs_dirty_data_max determines the dirty space limit. Once that value is | |
84 | * exceeded, new writes are halted until space frees up. | |
85 | * | |
00f198de | 86 | * The zfs_dirty_data_sync_percent tunable dictates the threshold at which we |
e8b96c60 MA |
87 | * ensure that there is a txg syncing (see the comment in txg.c for a full |
88 | * description of transaction group stages). | |
89 | * | |
90 | * The IO scheduler uses both the dirty space limit and current amount of | |
91 | * dirty data as inputs. Those values affect the number of concurrent IOs ZFS | |
92 | * issues. See the comment in vdev_queue.c for details of the IO scheduler. | |
93 | * | |
94 | * The delay is also calculated based on the amount of dirty data. See the | |
95 | * comment above dmu_tx_delay() for details. | |
96 | */ | |
97 | ||
98 | /* | |
99 | * zfs_dirty_data_max will be set to zfs_dirty_data_max_percent% of all memory, | |
100 | * capped at zfs_dirty_data_max_max. It can also be overridden with a module | |
101 | * parameter. | |
102 | */ | |
103 | unsigned long zfs_dirty_data_max = 0; | |
104 | unsigned long zfs_dirty_data_max_max = 0; | |
105 | int zfs_dirty_data_max_percent = 10; | |
106 | int zfs_dirty_data_max_max_percent = 25; | |
b128c09f | 107 | |
e8b96c60 | 108 | /* |
dfbe2675 MA |
109 | * If there's at least this much dirty data (as a percentage of |
110 | * zfs_dirty_data_max), push out a txg. This should be less than | |
111 | * zfs_vdev_async_write_active_min_dirty_percent. | |
e8b96c60 | 112 | */ |
dfbe2675 | 113 | int zfs_dirty_data_sync_percent = 20; |
34dc7c2f | 114 | |
e8b96c60 MA |
115 | /* |
116 | * Once there is this amount of dirty data, the dmu_tx_delay() will kick in | |
117 | * and delay each transaction. | |
118 | * This value should be >= zfs_vdev_async_write_active_max_dirty_percent. | |
119 | */ | |
120 | int zfs_delay_min_dirty_percent = 60; | |
b128c09f | 121 | |
e8b96c60 MA |
122 | /* |
123 | * This controls how quickly the delay approaches infinity. | |
124 | * Larger values cause it to delay more for a given amount of dirty data. | |
125 | * Therefore larger values will cause there to be less dirty data for a | |
126 | * given throughput. | |
127 | * | |
128 | * For the smoothest delay, this value should be about 1 billion divided | |
129 | * by the maximum number of operations per second. This will smoothly | |
130 | * handle between 10x and 1/10th this number. | |
131 | * | |
132 | * Note: zfs_delay_scale * zfs_dirty_data_max must be < 2^64, due to the | |
133 | * multiply in dmu_tx_delay(). | |
134 | */ | |
135 | unsigned long zfs_delay_scale = 1000 * 1000 * 1000 / 2000; | |
b128c09f | 136 | |
64fc7762 MA |
137 | /* |
138 | * This determines the number of threads used by the dp_sync_taskq. | |
139 | */ | |
140 | int zfs_sync_taskq_batch_pct = 75; | |
141 | ||
a032ac4b BB |
142 | /* |
143 | * These tunables determine the behavior of how zil_itxg_clean() is | |
144 | * called via zil_clean() in the context of spa_sync(). When an itxg | |
145 | * list needs to be cleaned, TQ_NOSLEEP will be used when dispatching. | |
146 | * If the dispatch fails, the call to zil_itxg_clean() will occur | |
147 | * synchronously in the context of spa_sync(), which can negatively | |
148 | * impact the performance of spa_sync() (e.g. in the case of the itxg | |
149 | * list having a large number of itxs that needs to be cleaned). | |
150 | * | |
151 | * Thus, these tunables can be used to manipulate the behavior of the | |
152 | * taskq used by zil_clean(); they determine the number of taskq entries | |
153 | * that are pre-populated when the taskq is first created (via the | |
154 | * "zfs_zil_clean_taskq_minalloc" tunable) and the maximum number of | |
155 | * taskq entries that are cached after an on-demand allocation (via the | |
156 | * "zfs_zil_clean_taskq_maxalloc"). | |
157 | * | |
158 | * The idea being, we want to try reasonably hard to ensure there will | |
159 | * already be a taskq entry pre-allocated by the time that it is needed | |
160 | * by zil_clean(). This way, we can avoid the possibility of an | |
161 | * on-demand allocation of a new taskq entry from failing, which would | |
162 | * result in zil_itxg_clean() being called synchronously from zil_clean() | |
163 | * (which can adversely affect performance of spa_sync()). | |
164 | * | |
165 | * Additionally, the number of threads used by the taskq can be | |
166 | * configured via the "zfs_zil_clean_taskq_nthr_pct" tunable. | |
167 | */ | |
168 | int zfs_zil_clean_taskq_nthr_pct = 100; | |
169 | int zfs_zil_clean_taskq_minalloc = 1024; | |
170 | int zfs_zil_clean_taskq_maxalloc = 1024 * 1024; | |
171 | ||
428870ff | 172 | int |
b128c09f | 173 | dsl_pool_open_special_dir(dsl_pool_t *dp, const char *name, dsl_dir_t **ddp) |
34dc7c2f BB |
174 | { |
175 | uint64_t obj; | |
176 | int err; | |
177 | ||
178 | err = zap_lookup(dp->dp_meta_objset, | |
d683ddbb | 179 | dsl_dir_phys(dp->dp_root_dir)->dd_child_dir_zapobj, |
b128c09f | 180 | name, sizeof (obj), 1, &obj); |
34dc7c2f BB |
181 | if (err) |
182 | return (err); | |
183 | ||
13fe0198 | 184 | return (dsl_dir_hold_obj(dp, obj, name, dp, ddp)); |
34dc7c2f BB |
185 | } |
186 | ||
187 | static dsl_pool_t * | |
188 | dsl_pool_open_impl(spa_t *spa, uint64_t txg) | |
189 | { | |
190 | dsl_pool_t *dp; | |
191 | blkptr_t *bp = spa_get_rootblkptr(spa); | |
34dc7c2f BB |
192 | |
193 | dp = kmem_zalloc(sizeof (dsl_pool_t), KM_SLEEP); | |
194 | dp->dp_spa = spa; | |
195 | dp->dp_meta_rootbp = *bp; | |
13fe0198 | 196 | rrw_init(&dp->dp_config_rwlock, B_TRUE); |
34dc7c2f | 197 | txg_init(dp, txg); |
379ca9cf | 198 | mmp_init(spa); |
34dc7c2f | 199 | |
4747a7d3 | 200 | txg_list_create(&dp->dp_dirty_datasets, spa, |
34dc7c2f | 201 | offsetof(dsl_dataset_t, ds_dirty_link)); |
4747a7d3 | 202 | txg_list_create(&dp->dp_dirty_zilogs, spa, |
29809a6c | 203 | offsetof(zilog_t, zl_dirty_link)); |
4747a7d3 | 204 | txg_list_create(&dp->dp_dirty_dirs, spa, |
34dc7c2f | 205 | offsetof(dsl_dir_t, dd_dirty_link)); |
4747a7d3 | 206 | txg_list_create(&dp->dp_sync_tasks, spa, |
13fe0198 | 207 | offsetof(dsl_sync_task_t, dst_node)); |
d2734cce SD |
208 | txg_list_create(&dp->dp_early_sync_tasks, spa, |
209 | offsetof(dsl_sync_task_t, dst_node)); | |
34dc7c2f | 210 | |
64fc7762 MA |
211 | dp->dp_sync_taskq = taskq_create("dp_sync_taskq", |
212 | zfs_sync_taskq_batch_pct, minclsyspri, 1, INT_MAX, | |
213 | TASKQ_THREADS_CPU_PCT); | |
214 | ||
a032ac4b BB |
215 | dp->dp_zil_clean_taskq = taskq_create("dp_zil_clean_taskq", |
216 | zfs_zil_clean_taskq_nthr_pct, minclsyspri, | |
217 | zfs_zil_clean_taskq_minalloc, | |
218 | zfs_zil_clean_taskq_maxalloc, | |
219 | TASKQ_PREPOPULATE | TASKQ_THREADS_CPU_PCT); | |
220 | ||
34dc7c2f | 221 | mutex_init(&dp->dp_lock, NULL, MUTEX_DEFAULT, NULL); |
e8b96c60 | 222 | cv_init(&dp->dp_spaceavail_cv, NULL, CV_DEFAULT, NULL); |
34dc7c2f | 223 | |
1229323d | 224 | dp->dp_iput_taskq = taskq_create("z_iput", max_ncpus, defclsyspri, |
aa9af22c | 225 | max_ncpus * 8, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC); |
9babb374 | 226 | |
34dc7c2f BB |
227 | return (dp); |
228 | } | |
229 | ||
230 | int | |
9ae529ec | 231 | dsl_pool_init(spa_t *spa, uint64_t txg, dsl_pool_t **dpp) |
34dc7c2f BB |
232 | { |
233 | int err; | |
234 | dsl_pool_t *dp = dsl_pool_open_impl(spa, txg); | |
9ae529ec | 235 | |
b7faa7aa G |
236 | /* |
237 | * Initialize the caller's dsl_pool_t structure before we actually open | |
238 | * the meta objset. This is done because a self-healing write zio may | |
239 | * be issued as part of dmu_objset_open_impl() and the spa needs its | |
240 | * dsl_pool_t initialized in order to handle the write. | |
241 | */ | |
242 | *dpp = dp; | |
243 | ||
9ae529ec CS |
244 | err = dmu_objset_open_impl(spa, NULL, &dp->dp_meta_rootbp, |
245 | &dp->dp_meta_objset); | |
b7faa7aa | 246 | if (err != 0) { |
9ae529ec | 247 | dsl_pool_close(dp); |
b7faa7aa G |
248 | *dpp = NULL; |
249 | } | |
9ae529ec CS |
250 | |
251 | return (err); | |
252 | } | |
253 | ||
254 | int | |
255 | dsl_pool_open(dsl_pool_t *dp) | |
256 | { | |
257 | int err; | |
b128c09f BB |
258 | dsl_dir_t *dd; |
259 | dsl_dataset_t *ds; | |
428870ff | 260 | uint64_t obj; |
34dc7c2f | 261 | |
13fe0198 | 262 | rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); |
34dc7c2f BB |
263 | err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, |
264 | DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1, | |
265 | &dp->dp_root_dir_obj); | |
266 | if (err) | |
267 | goto out; | |
268 | ||
13fe0198 | 269 | err = dsl_dir_hold_obj(dp, dp->dp_root_dir_obj, |
34dc7c2f BB |
270 | NULL, dp, &dp->dp_root_dir); |
271 | if (err) | |
272 | goto out; | |
273 | ||
b128c09f | 274 | err = dsl_pool_open_special_dir(dp, MOS_DIR_NAME, &dp->dp_mos_dir); |
34dc7c2f BB |
275 | if (err) |
276 | goto out; | |
277 | ||
9ae529ec | 278 | if (spa_version(dp->dp_spa) >= SPA_VERSION_ORIGIN) { |
b128c09f BB |
279 | err = dsl_pool_open_special_dir(dp, ORIGIN_DIR_NAME, &dd); |
280 | if (err) | |
281 | goto out; | |
d683ddbb JG |
282 | err = dsl_dataset_hold_obj(dp, |
283 | dsl_dir_phys(dd)->dd_head_dataset_obj, FTAG, &ds); | |
9babb374 BB |
284 | if (err == 0) { |
285 | err = dsl_dataset_hold_obj(dp, | |
d683ddbb | 286 | dsl_dataset_phys(ds)->ds_prev_snap_obj, dp, |
9babb374 BB |
287 | &dp->dp_origin_snap); |
288 | dsl_dataset_rele(ds, FTAG); | |
289 | } | |
13fe0198 | 290 | dsl_dir_rele(dd, dp); |
b128c09f BB |
291 | if (err) |
292 | goto out; | |
b128c09f BB |
293 | } |
294 | ||
9ae529ec | 295 | if (spa_version(dp->dp_spa) >= SPA_VERSION_DEADLISTS) { |
428870ff BB |
296 | err = dsl_pool_open_special_dir(dp, FREE_DIR_NAME, |
297 | &dp->dp_free_dir); | |
b128c09f BB |
298 | if (err) |
299 | goto out; | |
428870ff | 300 | |
b128c09f | 301 | err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, |
428870ff | 302 | DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj); |
b128c09f BB |
303 | if (err) |
304 | goto out; | |
13fe0198 | 305 | VERIFY0(bpobj_open(&dp->dp_free_bpobj, |
428870ff | 306 | dp->dp_meta_objset, obj)); |
b128c09f BB |
307 | } |
308 | ||
a1d477c2 MA |
309 | if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS)) { |
310 | err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, | |
311 | DMU_POOL_OBSOLETE_BPOBJ, sizeof (uint64_t), 1, &obj); | |
312 | if (err == 0) { | |
313 | VERIFY0(bpobj_open(&dp->dp_obsolete_bpobj, | |
314 | dp->dp_meta_objset, obj)); | |
315 | } else if (err == ENOENT) { | |
316 | /* | |
317 | * We might not have created the remap bpobj yet. | |
318 | */ | |
319 | err = 0; | |
320 | } else { | |
321 | goto out; | |
322 | } | |
323 | } | |
324 | ||
fbeddd60 | 325 | /* |
a1d477c2 MA |
326 | * Note: errors ignored, because the these special dirs, used for |
327 | * space accounting, are only created on demand. | |
fbeddd60 MA |
328 | */ |
329 | (void) dsl_pool_open_special_dir(dp, LEAK_DIR_NAME, | |
330 | &dp->dp_leak_dir); | |
331 | ||
fa86b5db | 332 | if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY)) { |
9ae529ec CS |
333 | err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, |
334 | DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1, | |
335 | &dp->dp_bptree_obj); | |
336 | if (err != 0) | |
337 | goto out; | |
338 | } | |
339 | ||
fa86b5db | 340 | if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMPTY_BPOBJ)) { |
753c3839 MA |
341 | err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, |
342 | DMU_POOL_EMPTY_BPOBJ, sizeof (uint64_t), 1, | |
343 | &dp->dp_empty_bpobj); | |
344 | if (err != 0) | |
345 | goto out; | |
346 | } | |
347 | ||
428870ff BB |
348 | err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, |
349 | DMU_POOL_TMP_USERREFS, sizeof (uint64_t), 1, | |
350 | &dp->dp_tmp_userrefs_obj); | |
351 | if (err == ENOENT) | |
352 | err = 0; | |
353 | if (err) | |
354 | goto out; | |
355 | ||
9ae529ec | 356 | err = dsl_scan_init(dp, dp->dp_tx.tx_open_txg); |
428870ff | 357 | |
34dc7c2f | 358 | out: |
13fe0198 | 359 | rrw_exit(&dp->dp_config_rwlock, FTAG); |
34dc7c2f BB |
360 | return (err); |
361 | } | |
362 | ||
363 | void | |
364 | dsl_pool_close(dsl_pool_t *dp) | |
365 | { | |
b128c09f | 366 | /* |
e8b96c60 MA |
367 | * Drop our references from dsl_pool_open(). |
368 | * | |
b128c09f BB |
369 | * Since we held the origin_snap from "syncing" context (which |
370 | * includes pool-opening context), it actually only got a "ref" | |
371 | * and not a hold, so just drop that here. | |
372 | */ | |
a1d477c2 | 373 | if (dp->dp_origin_snap != NULL) |
13fe0198 | 374 | dsl_dataset_rele(dp->dp_origin_snap, dp); |
a1d477c2 | 375 | if (dp->dp_mos_dir != NULL) |
13fe0198 | 376 | dsl_dir_rele(dp->dp_mos_dir, dp); |
a1d477c2 | 377 | if (dp->dp_free_dir != NULL) |
13fe0198 | 378 | dsl_dir_rele(dp->dp_free_dir, dp); |
a1d477c2 | 379 | if (dp->dp_leak_dir != NULL) |
fbeddd60 | 380 | dsl_dir_rele(dp->dp_leak_dir, dp); |
a1d477c2 | 381 | if (dp->dp_root_dir != NULL) |
13fe0198 | 382 | dsl_dir_rele(dp->dp_root_dir, dp); |
34dc7c2f | 383 | |
428870ff | 384 | bpobj_close(&dp->dp_free_bpobj); |
a1d477c2 | 385 | bpobj_close(&dp->dp_obsolete_bpobj); |
428870ff | 386 | |
34dc7c2f | 387 | /* undo the dmu_objset_open_impl(mos) from dsl_pool_open() */ |
a1d477c2 | 388 | if (dp->dp_meta_objset != NULL) |
428870ff | 389 | dmu_objset_evict(dp->dp_meta_objset); |
34dc7c2f BB |
390 | |
391 | txg_list_destroy(&dp->dp_dirty_datasets); | |
29809a6c | 392 | txg_list_destroy(&dp->dp_dirty_zilogs); |
428870ff | 393 | txg_list_destroy(&dp->dp_sync_tasks); |
d2734cce | 394 | txg_list_destroy(&dp->dp_early_sync_tasks); |
34dc7c2f | 395 | txg_list_destroy(&dp->dp_dirty_dirs); |
34dc7c2f | 396 | |
a032ac4b | 397 | taskq_destroy(dp->dp_zil_clean_taskq); |
64fc7762 MA |
398 | taskq_destroy(dp->dp_sync_taskq); |
399 | ||
ca0bf58d PS |
400 | /* |
401 | * We can't set retry to TRUE since we're explicitly specifying | |
402 | * a spa to flush. This is good enough; any missed buffers for | |
403 | * this spa won't cause trouble, and they'll eventually fall | |
404 | * out of the ARC just like any other unused buffer. | |
405 | */ | |
406 | arc_flush(dp->dp_spa, FALSE); | |
407 | ||
379ca9cf | 408 | mmp_fini(dp->dp_spa); |
34dc7c2f | 409 | txg_fini(dp); |
428870ff | 410 | dsl_scan_fini(dp); |
0c66c32d JG |
411 | dmu_buf_user_evict_wait(); |
412 | ||
13fe0198 | 413 | rrw_destroy(&dp->dp_config_rwlock); |
34dc7c2f | 414 | mutex_destroy(&dp->dp_lock); |
c17486b2 | 415 | cv_destroy(&dp->dp_spaceavail_cv); |
3558fd73 | 416 | taskq_destroy(dp->dp_iput_taskq); |
a1d477c2 | 417 | if (dp->dp_blkstats != NULL) { |
d4a72f23 | 418 | mutex_destroy(&dp->dp_blkstats->zab_lock); |
79c76d5b | 419 | vmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t)); |
d4a72f23 | 420 | } |
34dc7c2f BB |
421 | kmem_free(dp, sizeof (dsl_pool_t)); |
422 | } | |
423 | ||
a1d477c2 MA |
424 | void |
425 | dsl_pool_create_obsolete_bpobj(dsl_pool_t *dp, dmu_tx_t *tx) | |
426 | { | |
427 | uint64_t obj; | |
428 | /* | |
429 | * Currently, we only create the obsolete_bpobj where there are | |
430 | * indirect vdevs with referenced mappings. | |
431 | */ | |
432 | ASSERT(spa_feature_is_active(dp->dp_spa, SPA_FEATURE_DEVICE_REMOVAL)); | |
433 | /* create and open the obsolete_bpobj */ | |
434 | obj = bpobj_alloc(dp->dp_meta_objset, SPA_OLD_MAXBLOCKSIZE, tx); | |
435 | VERIFY0(bpobj_open(&dp->dp_obsolete_bpobj, dp->dp_meta_objset, obj)); | |
436 | VERIFY0(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, | |
437 | DMU_POOL_OBSOLETE_BPOBJ, sizeof (uint64_t), 1, &obj, tx)); | |
438 | spa_feature_incr(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); | |
439 | } | |
440 | ||
441 | void | |
442 | dsl_pool_destroy_obsolete_bpobj(dsl_pool_t *dp, dmu_tx_t *tx) | |
443 | { | |
444 | spa_feature_decr(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); | |
445 | VERIFY0(zap_remove(dp->dp_meta_objset, | |
446 | DMU_POOL_DIRECTORY_OBJECT, | |
447 | DMU_POOL_OBSOLETE_BPOBJ, tx)); | |
448 | bpobj_free(dp->dp_meta_objset, | |
449 | dp->dp_obsolete_bpobj.bpo_object, tx); | |
450 | bpobj_close(&dp->dp_obsolete_bpobj); | |
451 | } | |
452 | ||
34dc7c2f | 453 | dsl_pool_t * |
b5256303 TC |
454 | dsl_pool_create(spa_t *spa, nvlist_t *zplprops, dsl_crypto_params_t *dcp, |
455 | uint64_t txg) | |
34dc7c2f BB |
456 | { |
457 | int err; | |
458 | dsl_pool_t *dp = dsl_pool_open_impl(spa, txg); | |
459 | dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg); | |
b128c09f | 460 | dsl_dataset_t *ds; |
428870ff | 461 | uint64_t obj; |
b128c09f | 462 | |
13fe0198 MA |
463 | rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); |
464 | ||
b128c09f | 465 | /* create and open the MOS (meta-objset) */ |
428870ff BB |
466 | dp->dp_meta_objset = dmu_objset_create_impl(spa, |
467 | NULL, &dp->dp_meta_rootbp, DMU_OST_META, tx); | |
b5256303 | 468 | spa->spa_meta_objset = dp->dp_meta_objset; |
34dc7c2f BB |
469 | |
470 | /* create the pool directory */ | |
471 | err = zap_create_claim(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, | |
472 | DMU_OT_OBJECT_DIRECTORY, DMU_OT_NONE, 0, tx); | |
c99c9001 | 473 | ASSERT0(err); |
34dc7c2f | 474 | |
428870ff | 475 | /* Initialize scan structures */ |
13fe0198 | 476 | VERIFY0(dsl_scan_init(dp, txg)); |
428870ff | 477 | |
34dc7c2f | 478 | /* create and open the root dir */ |
b128c09f | 479 | dp->dp_root_dir_obj = dsl_dir_create_sync(dp, NULL, NULL, tx); |
13fe0198 | 480 | VERIFY0(dsl_dir_hold_obj(dp, dp->dp_root_dir_obj, |
34dc7c2f BB |
481 | NULL, dp, &dp->dp_root_dir)); |
482 | ||
483 | /* create and open the meta-objset dir */ | |
b128c09f | 484 | (void) dsl_dir_create_sync(dp, dp->dp_root_dir, MOS_DIR_NAME, tx); |
13fe0198 | 485 | VERIFY0(dsl_pool_open_special_dir(dp, |
b128c09f BB |
486 | MOS_DIR_NAME, &dp->dp_mos_dir)); |
487 | ||
428870ff BB |
488 | if (spa_version(spa) >= SPA_VERSION_DEADLISTS) { |
489 | /* create and open the free dir */ | |
490 | (void) dsl_dir_create_sync(dp, dp->dp_root_dir, | |
491 | FREE_DIR_NAME, tx); | |
13fe0198 | 492 | VERIFY0(dsl_pool_open_special_dir(dp, |
428870ff BB |
493 | FREE_DIR_NAME, &dp->dp_free_dir)); |
494 | ||
495 | /* create and open the free_bplist */ | |
f1512ee6 | 496 | obj = bpobj_alloc(dp->dp_meta_objset, SPA_OLD_MAXBLOCKSIZE, tx); |
428870ff BB |
497 | VERIFY(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, |
498 | DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx) == 0); | |
13fe0198 | 499 | VERIFY0(bpobj_open(&dp->dp_free_bpobj, |
428870ff BB |
500 | dp->dp_meta_objset, obj)); |
501 | } | |
502 | ||
b128c09f BB |
503 | if (spa_version(spa) >= SPA_VERSION_DSL_SCRUB) |
504 | dsl_pool_create_origin(dp, tx); | |
505 | ||
b5256303 TC |
506 | /* |
507 | * Some features may be needed when creating the root dataset, so we | |
508 | * create the feature objects here. | |
509 | */ | |
510 | if (spa_version(spa) >= SPA_VERSION_FEATURES) | |
511 | spa_feature_create_zap_objects(spa, tx); | |
512 | ||
513 | if (dcp != NULL && dcp->cp_crypt != ZIO_CRYPT_OFF && | |
514 | dcp->cp_crypt != ZIO_CRYPT_INHERIT) | |
515 | spa_feature_enable(spa, SPA_FEATURE_ENCRYPTION, tx); | |
516 | ||
b128c09f | 517 | /* create the root dataset */ |
b5256303 | 518 | obj = dsl_dataset_create_sync_dd(dp->dp_root_dir, NULL, dcp, 0, tx); |
b128c09f BB |
519 | |
520 | /* create the root objset */ | |
52ce99dd TC |
521 | VERIFY0(dsl_dataset_hold_obj_flags(dp, obj, |
522 | DS_HOLD_FLAG_DECRYPT, FTAG, &ds)); | |
b128c09f | 523 | #ifdef _KERNEL |
d8fdfc2d BB |
524 | { |
525 | objset_t *os; | |
526 | rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); | |
527 | os = dmu_objset_create_impl(dp->dp_spa, ds, | |
528 | dsl_dataset_get_blkptr(ds), DMU_OST_ZFS, tx); | |
529 | rrw_exit(&ds->ds_bp_rwlock, FTAG); | |
530 | zfs_create_fs(os, kcred, zplprops, tx); | |
531 | } | |
b128c09f | 532 | #endif |
52ce99dd | 533 | dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG); |
34dc7c2f BB |
534 | |
535 | dmu_tx_commit(tx); | |
536 | ||
13fe0198 MA |
537 | rrw_exit(&dp->dp_config_rwlock, FTAG); |
538 | ||
34dc7c2f BB |
539 | return (dp); |
540 | } | |
541 | ||
29809a6c MA |
542 | /* |
543 | * Account for the meta-objset space in its placeholder dsl_dir. | |
544 | */ | |
545 | void | |
546 | dsl_pool_mos_diduse_space(dsl_pool_t *dp, | |
547 | int64_t used, int64_t comp, int64_t uncomp) | |
548 | { | |
549 | ASSERT3U(comp, ==, uncomp); /* it's all metadata */ | |
550 | mutex_enter(&dp->dp_lock); | |
551 | dp->dp_mos_used_delta += used; | |
552 | dp->dp_mos_compressed_delta += comp; | |
553 | dp->dp_mos_uncompressed_delta += uncomp; | |
554 | mutex_exit(&dp->dp_lock); | |
555 | } | |
556 | ||
e8b96c60 MA |
557 | static void |
558 | dsl_pool_sync_mos(dsl_pool_t *dp, dmu_tx_t *tx) | |
559 | { | |
560 | zio_t *zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); | |
561 | dmu_objset_sync(dp->dp_meta_objset, zio, tx); | |
562 | VERIFY0(zio_wait(zio)); | |
563 | dprintf_bp(&dp->dp_meta_rootbp, "meta objset rootbp is %s", ""); | |
564 | spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp); | |
565 | } | |
566 | ||
567 | static void | |
568 | dsl_pool_dirty_delta(dsl_pool_t *dp, int64_t delta) | |
569 | { | |
570 | ASSERT(MUTEX_HELD(&dp->dp_lock)); | |
571 | ||
572 | if (delta < 0) | |
573 | ASSERT3U(-delta, <=, dp->dp_dirty_total); | |
574 | ||
575 | dp->dp_dirty_total += delta; | |
576 | ||
577 | /* | |
578 | * Note: we signal even when increasing dp_dirty_total. | |
579 | * This ensures forward progress -- each thread wakes the next waiter. | |
580 | */ | |
c0c8cc7b | 581 | if (dp->dp_dirty_total < zfs_dirty_data_max) |
e8b96c60 MA |
582 | cv_signal(&dp->dp_spaceavail_cv); |
583 | } | |
584 | ||
d2734cce SD |
585 | #ifdef ZFS_DEBUG |
586 | static boolean_t | |
587 | dsl_early_sync_task_verify(dsl_pool_t *dp, uint64_t txg) | |
588 | { | |
589 | spa_t *spa = dp->dp_spa; | |
590 | vdev_t *rvd = spa->spa_root_vdev; | |
591 | ||
592 | for (uint64_t c = 0; c < rvd->vdev_children; c++) { | |
593 | vdev_t *vd = rvd->vdev_child[c]; | |
594 | txg_list_t *tl = &vd->vdev_ms_list; | |
595 | metaslab_t *ms; | |
596 | ||
597 | for (ms = txg_list_head(tl, TXG_CLEAN(txg)); ms; | |
598 | ms = txg_list_next(tl, ms, TXG_CLEAN(txg))) { | |
599 | VERIFY(range_tree_is_empty(ms->ms_freeing)); | |
600 | VERIFY(range_tree_is_empty(ms->ms_checkpointing)); | |
601 | } | |
602 | } | |
603 | ||
604 | return (B_TRUE); | |
605 | } | |
606 | #endif | |
607 | ||
34dc7c2f BB |
608 | void |
609 | dsl_pool_sync(dsl_pool_t *dp, uint64_t txg) | |
610 | { | |
611 | zio_t *zio; | |
612 | dmu_tx_t *tx; | |
613 | dsl_dir_t *dd; | |
614 | dsl_dataset_t *ds; | |
428870ff | 615 | objset_t *mos = dp->dp_meta_objset; |
29809a6c MA |
616 | list_t synced_datasets; |
617 | ||
618 | list_create(&synced_datasets, sizeof (dsl_dataset_t), | |
619 | offsetof(dsl_dataset_t, ds_synced_link)); | |
34dc7c2f BB |
620 | |
621 | tx = dmu_tx_create_assigned(dp, txg); | |
622 | ||
d2734cce SD |
623 | /* |
624 | * Run all early sync tasks before writing out any dirty blocks. | |
625 | * For more info on early sync tasks see block comment in | |
626 | * dsl_early_sync_task(). | |
627 | */ | |
628 | if (!txg_list_empty(&dp->dp_early_sync_tasks, txg)) { | |
629 | dsl_sync_task_t *dst; | |
630 | ||
631 | ASSERT3U(spa_sync_pass(dp->dp_spa), ==, 1); | |
632 | while ((dst = | |
633 | txg_list_remove(&dp->dp_early_sync_tasks, txg)) != NULL) { | |
634 | ASSERT(dsl_early_sync_task_verify(dp, txg)); | |
635 | dsl_sync_task_sync(dst, tx); | |
636 | } | |
637 | ASSERT(dsl_early_sync_task_verify(dp, txg)); | |
638 | } | |
639 | ||
e8b96c60 MA |
640 | /* |
641 | * Write out all dirty blocks of dirty datasets. | |
642 | */ | |
34dc7c2f | 643 | zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); |
e8b96c60 | 644 | while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) { |
9babb374 BB |
645 | /* |
646 | * We must not sync any non-MOS datasets twice, because | |
647 | * we may have taken a snapshot of them. However, we | |
648 | * may sync newly-created datasets on pass 2. | |
649 | */ | |
650 | ASSERT(!list_link_active(&ds->ds_synced_link)); | |
29809a6c | 651 | list_insert_tail(&synced_datasets, ds); |
34dc7c2f BB |
652 | dsl_dataset_sync(ds, zio, tx); |
653 | } | |
e8b96c60 | 654 | VERIFY0(zio_wait(zio)); |
9babb374 | 655 | |
e8b96c60 MA |
656 | /* |
657 | * We have written all of the accounted dirty data, so our | |
658 | * dp_space_towrite should now be zero. However, some seldom-used | |
659 | * code paths do not adhere to this (e.g. dbuf_undirty(), also | |
660 | * rounding error in dbuf_write_physdone). | |
661 | * Shore up the accounting of any dirtied space now. | |
662 | */ | |
663 | dsl_pool_undirty_space(dp, dp->dp_dirty_pertxg[txg & TXG_MASK], txg); | |
34dc7c2f | 664 | |
539d33c7 GM |
665 | /* |
666 | * Update the long range free counter after | |
667 | * we're done syncing user data | |
668 | */ | |
669 | mutex_enter(&dp->dp_lock); | |
670 | ASSERT(spa_sync_pass(dp->dp_spa) == 1 || | |
671 | dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] == 0); | |
672 | dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] = 0; | |
673 | mutex_exit(&dp->dp_lock); | |
674 | ||
29809a6c MA |
675 | /* |
676 | * After the data blocks have been written (ensured by the zio_wait() | |
9c5167d1 | 677 | * above), update the user/group/project space accounting. This happens |
64fc7762 MA |
678 | * in tasks dispatched to dp_sync_taskq, so wait for them before |
679 | * continuing. | |
29809a6c | 680 | */ |
e8b96c60 MA |
681 | for (ds = list_head(&synced_datasets); ds != NULL; |
682 | ds = list_next(&synced_datasets, ds)) { | |
428870ff | 683 | dmu_objset_do_userquota_updates(ds->ds_objset, tx); |
e8b96c60 | 684 | } |
64fc7762 | 685 | taskq_wait(dp->dp_sync_taskq); |
9babb374 BB |
686 | |
687 | /* | |
688 | * Sync the datasets again to push out the changes due to | |
428870ff | 689 | * userspace updates. This must be done before we process the |
29809a6c MA |
690 | * sync tasks, so that any snapshots will have the correct |
691 | * user accounting information (and we won't get confused | |
692 | * about which blocks are part of the snapshot). | |
9babb374 BB |
693 | */ |
694 | zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); | |
e8b96c60 | 695 | while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) { |
52ce99dd TC |
696 | objset_t *os = ds->ds_objset; |
697 | ||
9babb374 BB |
698 | ASSERT(list_link_active(&ds->ds_synced_link)); |
699 | dmu_buf_rele(ds->ds_dbuf, ds); | |
700 | dsl_dataset_sync(ds, zio, tx); | |
52ce99dd TC |
701 | |
702 | /* | |
703 | * Release any key mappings created by calls to | |
704 | * dsl_dataset_dirty() from the userquota accounting | |
705 | * code paths. | |
706 | */ | |
707 | if (os->os_encrypted && !os->os_raw_receive && | |
708 | !os->os_next_write_raw[txg & TXG_MASK]) { | |
709 | ASSERT3P(ds->ds_key_mapping, !=, NULL); | |
710 | key_mapping_rele(dp->dp_spa, ds->ds_key_mapping, ds); | |
711 | } | |
9babb374 | 712 | } |
e8b96c60 | 713 | VERIFY0(zio_wait(zio)); |
9babb374 | 714 | |
428870ff | 715 | /* |
29809a6c MA |
716 | * Now that the datasets have been completely synced, we can |
717 | * clean up our in-memory structures accumulated while syncing: | |
718 | * | |
719 | * - move dead blocks from the pending deadlist to the on-disk deadlist | |
29809a6c | 720 | * - release hold from dsl_dataset_dirty() |
52ce99dd | 721 | * - release key mapping hold from dsl_dataset_dirty() |
428870ff | 722 | */ |
e8b96c60 | 723 | while ((ds = list_remove_head(&synced_datasets)) != NULL) { |
52ce99dd TC |
724 | objset_t *os = ds->ds_objset; |
725 | ||
726 | if (os->os_encrypted && !os->os_raw_receive && | |
727 | !os->os_next_write_raw[txg & TXG_MASK]) { | |
728 | ASSERT3P(ds->ds_key_mapping, !=, NULL); | |
729 | key_mapping_rele(dp->dp_spa, ds->ds_key_mapping, ds); | |
730 | } | |
731 | ||
0efd9791 | 732 | dsl_dataset_sync_done(ds, tx); |
428870ff BB |
733 | } |
734 | ||
e8b96c60 | 735 | while ((dd = txg_list_remove(&dp->dp_dirty_dirs, txg)) != NULL) { |
34dc7c2f | 736 | dsl_dir_sync(dd, tx); |
e8b96c60 | 737 | } |
b128c09f | 738 | |
29809a6c MA |
739 | /* |
740 | * The MOS's space is accounted for in the pool/$MOS | |
741 | * (dp_mos_dir). We can't modify the mos while we're syncing | |
742 | * it, so we remember the deltas and apply them here. | |
743 | */ | |
744 | if (dp->dp_mos_used_delta != 0 || dp->dp_mos_compressed_delta != 0 || | |
745 | dp->dp_mos_uncompressed_delta != 0) { | |
746 | dsl_dir_diduse_space(dp->dp_mos_dir, DD_USED_HEAD, | |
747 | dp->dp_mos_used_delta, | |
748 | dp->dp_mos_compressed_delta, | |
749 | dp->dp_mos_uncompressed_delta, tx); | |
750 | dp->dp_mos_used_delta = 0; | |
751 | dp->dp_mos_compressed_delta = 0; | |
752 | dp->dp_mos_uncompressed_delta = 0; | |
753 | } | |
754 | ||
64fc7762 | 755 | if (!multilist_is_empty(mos->os_dirty_dnodes[txg & TXG_MASK])) { |
e8b96c60 | 756 | dsl_pool_sync_mos(dp, tx); |
34dc7c2f BB |
757 | } |
758 | ||
29809a6c MA |
759 | /* |
760 | * If we modify a dataset in the same txg that we want to destroy it, | |
761 | * its dsl_dir's dd_dbuf will be dirty, and thus have a hold on it. | |
762 | * dsl_dir_destroy_check() will fail if there are unexpected holds. | |
763 | * Therefore, we want to sync the MOS (thus syncing the dd_dbuf | |
764 | * and clearing the hold on it) before we process the sync_tasks. | |
765 | * The MOS data dirtied by the sync_tasks will be synced on the next | |
766 | * pass. | |
767 | */ | |
29809a6c | 768 | if (!txg_list_empty(&dp->dp_sync_tasks, txg)) { |
13fe0198 | 769 | dsl_sync_task_t *dst; |
29809a6c MA |
770 | /* |
771 | * No more sync tasks should have been added while we | |
772 | * were syncing. | |
773 | */ | |
e8b96c60 MA |
774 | ASSERT3U(spa_sync_pass(dp->dp_spa), ==, 1); |
775 | while ((dst = txg_list_remove(&dp->dp_sync_tasks, txg)) != NULL) | |
13fe0198 | 776 | dsl_sync_task_sync(dst, tx); |
29809a6c MA |
777 | } |
778 | ||
34dc7c2f | 779 | dmu_tx_commit(tx); |
b128c09f | 780 | |
e8b96c60 | 781 | DTRACE_PROBE2(dsl_pool_sync__done, dsl_pool_t *dp, dp, uint64_t, txg); |
34dc7c2f BB |
782 | } |
783 | ||
784 | void | |
428870ff | 785 | dsl_pool_sync_done(dsl_pool_t *dp, uint64_t txg) |
34dc7c2f | 786 | { |
29809a6c | 787 | zilog_t *zilog; |
34dc7c2f | 788 | |
55922e73 | 789 | while ((zilog = txg_list_head(&dp->dp_dirty_zilogs, txg))) { |
e8b96c60 | 790 | dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os); |
55922e73 GW |
791 | /* |
792 | * We don't remove the zilog from the dp_dirty_zilogs | |
793 | * list until after we've cleaned it. This ensures that | |
794 | * callers of zilog_is_dirty() receive an accurate | |
795 | * answer when they are racing with the spa sync thread. | |
796 | */ | |
29809a6c | 797 | zil_clean(zilog, txg); |
55922e73 | 798 | (void) txg_list_remove_this(&dp->dp_dirty_zilogs, zilog, txg); |
29809a6c MA |
799 | ASSERT(!dmu_objset_is_dirty(zilog->zl_os, txg)); |
800 | dmu_buf_rele(ds->ds_dbuf, zilog); | |
34dc7c2f | 801 | } |
428870ff | 802 | ASSERT(!dmu_objset_is_dirty(dp->dp_meta_objset, txg)); |
34dc7c2f BB |
803 | } |
804 | ||
805 | /* | |
806 | * TRUE if the current thread is the tx_sync_thread or if we | |
807 | * are being called from SPA context during pool initialization. | |
808 | */ | |
809 | int | |
810 | dsl_pool_sync_context(dsl_pool_t *dp) | |
811 | { | |
812 | return (curthread == dp->dp_tx.tx_sync_thread || | |
64fc7762 MA |
813 | spa_is_initializing(dp->dp_spa) || |
814 | taskq_member(dp->dp_sync_taskq, curthread)); | |
34dc7c2f BB |
815 | } |
816 | ||
d2734cce SD |
817 | /* |
818 | * This function returns the amount of allocatable space in the pool | |
819 | * minus whatever space is currently reserved by ZFS for specific | |
820 | * purposes. Specifically: | |
821 | * | |
822 | * 1] Any reserved SLOP space | |
823 | * 2] Any space used by the checkpoint | |
824 | * 3] Any space used for deferred frees | |
825 | * | |
826 | * The latter 2 are especially important because they are needed to | |
827 | * rectify the SPA's and DMU's different understanding of how much space | |
828 | * is used. Now the DMU is aware of that extra space tracked by the SPA | |
829 | * without having to maintain a separate special dir (e.g similar to | |
830 | * $MOS, $FREEING, and $LEAKED). | |
831 | * | |
832 | * Note: By deferred frees here, we mean the frees that were deferred | |
833 | * in spa_sync() after sync pass 1 (spa_deferred_bpobj), and not the | |
834 | * segments placed in ms_defer trees during metaslab_sync_done(). | |
835 | */ | |
34dc7c2f | 836 | uint64_t |
d2734cce | 837 | dsl_pool_adjustedsize(dsl_pool_t *dp, zfs_space_check_t slop_policy) |
34dc7c2f | 838 | { |
d2734cce SD |
839 | spa_t *spa = dp->dp_spa; |
840 | uint64_t space, resv, adjustedsize; | |
841 | uint64_t spa_deferred_frees = | |
842 | spa->spa_deferred_bpobj.bpo_phys->bpo_bytes; | |
843 | ||
844 | space = spa_get_dspace(spa) | |
845 | - spa_get_checkpoint_space(spa) - spa_deferred_frees; | |
846 | resv = spa_get_slop_space(spa); | |
847 | ||
848 | switch (slop_policy) { | |
849 | case ZFS_SPACE_CHECK_NORMAL: | |
850 | break; | |
851 | case ZFS_SPACE_CHECK_RESERVED: | |
34dc7c2f | 852 | resv >>= 1; |
d2734cce SD |
853 | break; |
854 | case ZFS_SPACE_CHECK_EXTRA_RESERVED: | |
855 | resv >>= 2; | |
856 | break; | |
857 | case ZFS_SPACE_CHECK_NONE: | |
858 | resv = 0; | |
859 | break; | |
860 | default: | |
861 | panic("invalid slop policy value: %d", slop_policy); | |
862 | break; | |
863 | } | |
864 | adjustedsize = (space >= resv) ? (space - resv) : 0; | |
34dc7c2f | 865 | |
d2734cce SD |
866 | return (adjustedsize); |
867 | } | |
868 | ||
869 | uint64_t | |
870 | dsl_pool_unreserved_space(dsl_pool_t *dp, zfs_space_check_t slop_policy) | |
871 | { | |
872 | uint64_t poolsize = dsl_pool_adjustedsize(dp, slop_policy); | |
873 | uint64_t deferred = | |
874 | metaslab_class_get_deferred(spa_normal_class(dp->dp_spa)); | |
875 | uint64_t quota = (poolsize >= deferred) ? (poolsize - deferred) : 0; | |
876 | return (quota); | |
34dc7c2f BB |
877 | } |
878 | ||
e8b96c60 MA |
879 | boolean_t |
880 | dsl_pool_need_dirty_delay(dsl_pool_t *dp) | |
34dc7c2f | 881 | { |
e8b96c60 MA |
882 | uint64_t delay_min_bytes = |
883 | zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100; | |
dfbe2675 MA |
884 | uint64_t dirty_min_bytes = |
885 | zfs_dirty_data_max * zfs_dirty_data_sync_percent / 100; | |
e8b96c60 | 886 | boolean_t rv; |
34dc7c2f | 887 | |
e8b96c60 | 888 | mutex_enter(&dp->dp_lock); |
dfbe2675 | 889 | if (dp->dp_dirty_total > dirty_min_bytes) |
e8b96c60 MA |
890 | txg_kick(dp); |
891 | rv = (dp->dp_dirty_total > delay_min_bytes); | |
892 | mutex_exit(&dp->dp_lock); | |
893 | return (rv); | |
34dc7c2f BB |
894 | } |
895 | ||
896 | void | |
e8b96c60 | 897 | dsl_pool_dirty_space(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx) |
34dc7c2f | 898 | { |
e8b96c60 MA |
899 | if (space > 0) { |
900 | mutex_enter(&dp->dp_lock); | |
901 | dp->dp_dirty_pertxg[tx->tx_txg & TXG_MASK] += space; | |
902 | dsl_pool_dirty_delta(dp, space); | |
903 | mutex_exit(&dp->dp_lock); | |
904 | } | |
34dc7c2f BB |
905 | } |
906 | ||
907 | void | |
e8b96c60 | 908 | dsl_pool_undirty_space(dsl_pool_t *dp, int64_t space, uint64_t txg) |
34dc7c2f | 909 | { |
e8b96c60 MA |
910 | ASSERT3S(space, >=, 0); |
911 | if (space == 0) | |
34dc7c2f BB |
912 | return; |
913 | ||
e8b96c60 MA |
914 | mutex_enter(&dp->dp_lock); |
915 | if (dp->dp_dirty_pertxg[txg & TXG_MASK] < space) { | |
916 | /* XXX writing something we didn't dirty? */ | |
917 | space = dp->dp_dirty_pertxg[txg & TXG_MASK]; | |
34dc7c2f | 918 | } |
e8b96c60 MA |
919 | ASSERT3U(dp->dp_dirty_pertxg[txg & TXG_MASK], >=, space); |
920 | dp->dp_dirty_pertxg[txg & TXG_MASK] -= space; | |
921 | ASSERT3U(dp->dp_dirty_total, >=, space); | |
922 | dsl_pool_dirty_delta(dp, -space); | |
923 | mutex_exit(&dp->dp_lock); | |
34dc7c2f | 924 | } |
b128c09f BB |
925 | |
926 | /* ARGSUSED */ | |
927 | static int | |
13fe0198 | 928 | upgrade_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) |
b128c09f BB |
929 | { |
930 | dmu_tx_t *tx = arg; | |
931 | dsl_dataset_t *ds, *prev = NULL; | |
932 | int err; | |
b128c09f | 933 | |
13fe0198 | 934 | err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds); |
b128c09f BB |
935 | if (err) |
936 | return (err); | |
937 | ||
d683ddbb JG |
938 | while (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) { |
939 | err = dsl_dataset_hold_obj(dp, | |
940 | dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev); | |
b128c09f BB |
941 | if (err) { |
942 | dsl_dataset_rele(ds, FTAG); | |
943 | return (err); | |
944 | } | |
945 | ||
d683ddbb | 946 | if (dsl_dataset_phys(prev)->ds_next_snap_obj != ds->ds_object) |
b128c09f BB |
947 | break; |
948 | dsl_dataset_rele(ds, FTAG); | |
949 | ds = prev; | |
950 | prev = NULL; | |
951 | } | |
952 | ||
953 | if (prev == NULL) { | |
954 | prev = dp->dp_origin_snap; | |
955 | ||
956 | /* | |
957 | * The $ORIGIN can't have any data, or the accounting | |
958 | * will be wrong. | |
959 | */ | |
cc9bb3e5 | 960 | rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); |
d683ddbb | 961 | ASSERT0(dsl_dataset_phys(prev)->ds_bp.blk_birth); |
cc9bb3e5 | 962 | rrw_exit(&ds->ds_bp_rwlock, FTAG); |
b128c09f BB |
963 | |
964 | /* The origin doesn't get attached to itself */ | |
965 | if (ds->ds_object == prev->ds_object) { | |
966 | dsl_dataset_rele(ds, FTAG); | |
967 | return (0); | |
968 | } | |
969 | ||
970 | dmu_buf_will_dirty(ds->ds_dbuf, tx); | |
d683ddbb JG |
971 | dsl_dataset_phys(ds)->ds_prev_snap_obj = prev->ds_object; |
972 | dsl_dataset_phys(ds)->ds_prev_snap_txg = | |
973 | dsl_dataset_phys(prev)->ds_creation_txg; | |
b128c09f BB |
974 | |
975 | dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx); | |
d683ddbb | 976 | dsl_dir_phys(ds->ds_dir)->dd_origin_obj = prev->ds_object; |
b128c09f BB |
977 | |
978 | dmu_buf_will_dirty(prev->ds_dbuf, tx); | |
d683ddbb | 979 | dsl_dataset_phys(prev)->ds_num_children++; |
b128c09f | 980 | |
d683ddbb | 981 | if (dsl_dataset_phys(ds)->ds_next_snap_obj == 0) { |
b128c09f | 982 | ASSERT(ds->ds_prev == NULL); |
13fe0198 | 983 | VERIFY0(dsl_dataset_hold_obj(dp, |
d683ddbb JG |
984 | dsl_dataset_phys(ds)->ds_prev_snap_obj, |
985 | ds, &ds->ds_prev)); | |
b128c09f BB |
986 | } |
987 | } | |
988 | ||
d683ddbb JG |
989 | ASSERT3U(dsl_dir_phys(ds->ds_dir)->dd_origin_obj, ==, prev->ds_object); |
990 | ASSERT3U(dsl_dataset_phys(ds)->ds_prev_snap_obj, ==, prev->ds_object); | |
b128c09f | 991 | |
d683ddbb | 992 | if (dsl_dataset_phys(prev)->ds_next_clones_obj == 0) { |
428870ff | 993 | dmu_buf_will_dirty(prev->ds_dbuf, tx); |
d683ddbb | 994 | dsl_dataset_phys(prev)->ds_next_clones_obj = |
b128c09f BB |
995 | zap_create(dp->dp_meta_objset, |
996 | DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx); | |
997 | } | |
13fe0198 | 998 | VERIFY0(zap_add_int(dp->dp_meta_objset, |
d683ddbb | 999 | dsl_dataset_phys(prev)->ds_next_clones_obj, ds->ds_object, tx)); |
b128c09f BB |
1000 | |
1001 | dsl_dataset_rele(ds, FTAG); | |
1002 | if (prev != dp->dp_origin_snap) | |
1003 | dsl_dataset_rele(prev, FTAG); | |
1004 | return (0); | |
1005 | } | |
1006 | ||
1007 | void | |
1008 | dsl_pool_upgrade_clones(dsl_pool_t *dp, dmu_tx_t *tx) | |
1009 | { | |
1010 | ASSERT(dmu_tx_is_syncing(tx)); | |
1011 | ASSERT(dp->dp_origin_snap != NULL); | |
1012 | ||
13fe0198 | 1013 | VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, upgrade_clones_cb, |
9c43027b | 1014 | tx, DS_FIND_CHILDREN | DS_FIND_SERIALIZE)); |
428870ff BB |
1015 | } |
1016 | ||
1017 | /* ARGSUSED */ | |
1018 | static int | |
13fe0198 | 1019 | upgrade_dir_clones_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg) |
428870ff BB |
1020 | { |
1021 | dmu_tx_t *tx = arg; | |
428870ff BB |
1022 | objset_t *mos = dp->dp_meta_objset; |
1023 | ||
d683ddbb | 1024 | if (dsl_dir_phys(ds->ds_dir)->dd_origin_obj != 0) { |
428870ff BB |
1025 | dsl_dataset_t *origin; |
1026 | ||
13fe0198 | 1027 | VERIFY0(dsl_dataset_hold_obj(dp, |
d683ddbb | 1028 | dsl_dir_phys(ds->ds_dir)->dd_origin_obj, FTAG, &origin)); |
428870ff | 1029 | |
d683ddbb | 1030 | if (dsl_dir_phys(origin->ds_dir)->dd_clones == 0) { |
428870ff | 1031 | dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx); |
d683ddbb JG |
1032 | dsl_dir_phys(origin->ds_dir)->dd_clones = |
1033 | zap_create(mos, DMU_OT_DSL_CLONES, DMU_OT_NONE, | |
1034 | 0, tx); | |
428870ff BB |
1035 | } |
1036 | ||
13fe0198 | 1037 | VERIFY0(zap_add_int(dp->dp_meta_objset, |
d683ddbb JG |
1038 | dsl_dir_phys(origin->ds_dir)->dd_clones, |
1039 | ds->ds_object, tx)); | |
428870ff BB |
1040 | |
1041 | dsl_dataset_rele(origin, FTAG); | |
1042 | } | |
428870ff BB |
1043 | return (0); |
1044 | } | |
1045 | ||
1046 | void | |
1047 | dsl_pool_upgrade_dir_clones(dsl_pool_t *dp, dmu_tx_t *tx) | |
1048 | { | |
428870ff BB |
1049 | uint64_t obj; |
1050 | ||
d6320ddb BB |
1051 | ASSERT(dmu_tx_is_syncing(tx)); |
1052 | ||
428870ff | 1053 | (void) dsl_dir_create_sync(dp, dp->dp_root_dir, FREE_DIR_NAME, tx); |
13fe0198 | 1054 | VERIFY0(dsl_pool_open_special_dir(dp, |
428870ff BB |
1055 | FREE_DIR_NAME, &dp->dp_free_dir)); |
1056 | ||
1057 | /* | |
1058 | * We can't use bpobj_alloc(), because spa_version() still | |
1059 | * returns the old version, and we need a new-version bpobj with | |
1060 | * subobj support. So call dmu_object_alloc() directly. | |
1061 | */ | |
1062 | obj = dmu_object_alloc(dp->dp_meta_objset, DMU_OT_BPOBJ, | |
f1512ee6 | 1063 | SPA_OLD_MAXBLOCKSIZE, DMU_OT_BPOBJ_HDR, sizeof (bpobj_phys_t), tx); |
13fe0198 | 1064 | VERIFY0(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, |
428870ff | 1065 | DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx)); |
13fe0198 | 1066 | VERIFY0(bpobj_open(&dp->dp_free_bpobj, dp->dp_meta_objset, obj)); |
428870ff | 1067 | |
13fe0198 | 1068 | VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, |
9c43027b | 1069 | upgrade_dir_clones_cb, tx, DS_FIND_CHILDREN | DS_FIND_SERIALIZE)); |
b128c09f BB |
1070 | } |
1071 | ||
1072 | void | |
1073 | dsl_pool_create_origin(dsl_pool_t *dp, dmu_tx_t *tx) | |
1074 | { | |
1075 | uint64_t dsobj; | |
1076 | dsl_dataset_t *ds; | |
1077 | ||
1078 | ASSERT(dmu_tx_is_syncing(tx)); | |
1079 | ASSERT(dp->dp_origin_snap == NULL); | |
13fe0198 | 1080 | ASSERT(rrw_held(&dp->dp_config_rwlock, RW_WRITER)); |
b128c09f BB |
1081 | |
1082 | /* create the origin dir, ds, & snap-ds */ | |
b128c09f | 1083 | dsobj = dsl_dataset_create_sync(dp->dp_root_dir, ORIGIN_DIR_NAME, |
b5256303 | 1084 | NULL, 0, kcred, NULL, tx); |
13fe0198 MA |
1085 | VERIFY0(dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); |
1086 | dsl_dataset_snapshot_sync_impl(ds, ORIGIN_DIR_NAME, tx); | |
d683ddbb | 1087 | VERIFY0(dsl_dataset_hold_obj(dp, dsl_dataset_phys(ds)->ds_prev_snap_obj, |
b128c09f BB |
1088 | dp, &dp->dp_origin_snap)); |
1089 | dsl_dataset_rele(ds, FTAG); | |
b128c09f | 1090 | } |
9babb374 BB |
1091 | |
1092 | taskq_t * | |
3558fd73 | 1093 | dsl_pool_iput_taskq(dsl_pool_t *dp) |
9babb374 | 1094 | { |
3558fd73 | 1095 | return (dp->dp_iput_taskq); |
9babb374 | 1096 | } |
428870ff BB |
1097 | |
1098 | /* | |
1099 | * Walk through the pool-wide zap object of temporary snapshot user holds | |
1100 | * and release them. | |
1101 | */ | |
1102 | void | |
1103 | dsl_pool_clean_tmp_userrefs(dsl_pool_t *dp) | |
1104 | { | |
1105 | zap_attribute_t za; | |
1106 | zap_cursor_t zc; | |
1107 | objset_t *mos = dp->dp_meta_objset; | |
1108 | uint64_t zapobj = dp->dp_tmp_userrefs_obj; | |
95fd54a1 | 1109 | nvlist_t *holds; |
428870ff BB |
1110 | |
1111 | if (zapobj == 0) | |
1112 | return; | |
1113 | ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS); | |
1114 | ||
95fd54a1 SH |
1115 | holds = fnvlist_alloc(); |
1116 | ||
428870ff BB |
1117 | for (zap_cursor_init(&zc, mos, zapobj); |
1118 | zap_cursor_retrieve(&zc, &za) == 0; | |
1119 | zap_cursor_advance(&zc)) { | |
1120 | char *htag; | |
95fd54a1 | 1121 | nvlist_t *tags; |
428870ff BB |
1122 | |
1123 | htag = strchr(za.za_name, '-'); | |
1124 | *htag = '\0'; | |
1125 | ++htag; | |
95fd54a1 SH |
1126 | if (nvlist_lookup_nvlist(holds, za.za_name, &tags) != 0) { |
1127 | tags = fnvlist_alloc(); | |
1128 | fnvlist_add_boolean(tags, htag); | |
1129 | fnvlist_add_nvlist(holds, za.za_name, tags); | |
1130 | fnvlist_free(tags); | |
1131 | } else { | |
1132 | fnvlist_add_boolean(tags, htag); | |
1133 | } | |
428870ff | 1134 | } |
95fd54a1 SH |
1135 | dsl_dataset_user_release_tmp(dp, holds); |
1136 | fnvlist_free(holds); | |
428870ff BB |
1137 | zap_cursor_fini(&zc); |
1138 | } | |
1139 | ||
1140 | /* | |
1141 | * Create the pool-wide zap object for storing temporary snapshot holds. | |
1142 | */ | |
1143 | void | |
1144 | dsl_pool_user_hold_create_obj(dsl_pool_t *dp, dmu_tx_t *tx) | |
1145 | { | |
1146 | objset_t *mos = dp->dp_meta_objset; | |
1147 | ||
1148 | ASSERT(dp->dp_tmp_userrefs_obj == 0); | |
1149 | ASSERT(dmu_tx_is_syncing(tx)); | |
1150 | ||
9ae529ec CS |
1151 | dp->dp_tmp_userrefs_obj = zap_create_link(mos, DMU_OT_USERREFS, |
1152 | DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_TMP_USERREFS, tx); | |
428870ff BB |
1153 | } |
1154 | ||
1155 | static int | |
1156 | dsl_pool_user_hold_rele_impl(dsl_pool_t *dp, uint64_t dsobj, | |
13fe0198 | 1157 | const char *tag, uint64_t now, dmu_tx_t *tx, boolean_t holding) |
428870ff BB |
1158 | { |
1159 | objset_t *mos = dp->dp_meta_objset; | |
1160 | uint64_t zapobj = dp->dp_tmp_userrefs_obj; | |
1161 | char *name; | |
1162 | int error; | |
1163 | ||
1164 | ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS); | |
1165 | ASSERT(dmu_tx_is_syncing(tx)); | |
1166 | ||
1167 | /* | |
1168 | * If the pool was created prior to SPA_VERSION_USERREFS, the | |
1169 | * zap object for temporary holds might not exist yet. | |
1170 | */ | |
1171 | if (zapobj == 0) { | |
1172 | if (holding) { | |
1173 | dsl_pool_user_hold_create_obj(dp, tx); | |
1174 | zapobj = dp->dp_tmp_userrefs_obj; | |
1175 | } else { | |
2e528b49 | 1176 | return (SET_ERROR(ENOENT)); |
428870ff BB |
1177 | } |
1178 | } | |
1179 | ||
1180 | name = kmem_asprintf("%llx-%s", (u_longlong_t)dsobj, tag); | |
1181 | if (holding) | |
13fe0198 | 1182 | error = zap_add(mos, zapobj, name, 8, 1, &now, tx); |
428870ff BB |
1183 | else |
1184 | error = zap_remove(mos, zapobj, name, tx); | |
1185 | strfree(name); | |
1186 | ||
1187 | return (error); | |
1188 | } | |
1189 | ||
1190 | /* | |
1191 | * Add a temporary hold for the given dataset object and tag. | |
1192 | */ | |
1193 | int | |
1194 | dsl_pool_user_hold(dsl_pool_t *dp, uint64_t dsobj, const char *tag, | |
13fe0198 | 1195 | uint64_t now, dmu_tx_t *tx) |
428870ff BB |
1196 | { |
1197 | return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, now, tx, B_TRUE)); | |
1198 | } | |
1199 | ||
1200 | /* | |
1201 | * Release a temporary hold for the given dataset object and tag. | |
1202 | */ | |
1203 | int | |
1204 | dsl_pool_user_release(dsl_pool_t *dp, uint64_t dsobj, const char *tag, | |
1205 | dmu_tx_t *tx) | |
1206 | { | |
13fe0198 | 1207 | return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, 0, |
428870ff BB |
1208 | tx, B_FALSE)); |
1209 | } | |
c409e464 | 1210 | |
13fe0198 MA |
1211 | /* |
1212 | * DSL Pool Configuration Lock | |
1213 | * | |
1214 | * The dp_config_rwlock protects against changes to DSL state (e.g. dataset | |
1215 | * creation / destruction / rename / property setting). It must be held for | |
1216 | * read to hold a dataset or dsl_dir. I.e. you must call | |
1217 | * dsl_pool_config_enter() or dsl_pool_hold() before calling | |
1218 | * dsl_{dataset,dir}_hold{_obj}. In most circumstances, the dp_config_rwlock | |
1219 | * must be held continuously until all datasets and dsl_dirs are released. | |
1220 | * | |
1221 | * The only exception to this rule is that if a "long hold" is placed on | |
1222 | * a dataset, then the dp_config_rwlock may be dropped while the dataset | |
1223 | * is still held. The long hold will prevent the dataset from being | |
1224 | * destroyed -- the destroy will fail with EBUSY. A long hold can be | |
1225 | * obtained by calling dsl_dataset_long_hold(), or by "owning" a dataset | |
1226 | * (by calling dsl_{dataset,objset}_{try}own{_obj}). | |
1227 | * | |
1228 | * Legitimate long-holders (including owners) should be long-running, cancelable | |
1229 | * tasks that should cause "zfs destroy" to fail. This includes DMU | |
1230 | * consumers (i.e. a ZPL filesystem being mounted or ZVOL being open), | |
1231 | * "zfs send", and "zfs diff". There are several other long-holders whose | |
1232 | * uses are suboptimal (e.g. "zfs promote", and zil_suspend()). | |
1233 | * | |
1234 | * The usual formula for long-holding would be: | |
1235 | * dsl_pool_hold() | |
1236 | * dsl_dataset_hold() | |
1237 | * ... perform checks ... | |
1238 | * dsl_dataset_long_hold() | |
1239 | * dsl_pool_rele() | |
1240 | * ... perform long-running task ... | |
1241 | * dsl_dataset_long_rele() | |
1242 | * dsl_dataset_rele() | |
1243 | * | |
1244 | * Note that when the long hold is released, the dataset is still held but | |
1245 | * the pool is not held. The dataset may change arbitrarily during this time | |
1246 | * (e.g. it could be destroyed). Therefore you shouldn't do anything to the | |
1247 | * dataset except release it. | |
1248 | * | |
1249 | * User-initiated operations (e.g. ioctls, zfs_ioc_*()) are either read-only | |
1250 | * or modifying operations. | |
1251 | * | |
1252 | * Modifying operations should generally use dsl_sync_task(). The synctask | |
1253 | * infrastructure enforces proper locking strategy with respect to the | |
1254 | * dp_config_rwlock. See the comment above dsl_sync_task() for details. | |
1255 | * | |
1256 | * Read-only operations will manually hold the pool, then the dataset, obtain | |
1257 | * information from the dataset, then release the pool and dataset. | |
1258 | * dmu_objset_{hold,rele}() are convenience routines that also do the pool | |
1259 | * hold/rele. | |
1260 | */ | |
1261 | ||
1262 | int | |
1263 | dsl_pool_hold(const char *name, void *tag, dsl_pool_t **dp) | |
1264 | { | |
1265 | spa_t *spa; | |
1266 | int error; | |
1267 | ||
1268 | error = spa_open(name, &spa, tag); | |
1269 | if (error == 0) { | |
1270 | *dp = spa_get_dsl(spa); | |
1271 | dsl_pool_config_enter(*dp, tag); | |
1272 | } | |
1273 | return (error); | |
1274 | } | |
1275 | ||
1276 | void | |
1277 | dsl_pool_rele(dsl_pool_t *dp, void *tag) | |
1278 | { | |
1279 | dsl_pool_config_exit(dp, tag); | |
1280 | spa_close(dp->dp_spa, tag); | |
1281 | } | |
1282 | ||
1283 | void | |
1284 | dsl_pool_config_enter(dsl_pool_t *dp, void *tag) | |
1285 | { | |
1286 | /* | |
1287 | * We use a "reentrant" reader-writer lock, but not reentrantly. | |
1288 | * | |
1289 | * The rrwlock can (with the track_all flag) track all reading threads, | |
1290 | * which is very useful for debugging which code path failed to release | |
1291 | * the lock, and for verifying that the *current* thread does hold | |
1292 | * the lock. | |
1293 | * | |
1294 | * (Unlike a rwlock, which knows that N threads hold it for | |
1295 | * read, but not *which* threads, so rw_held(RW_READER) returns TRUE | |
1296 | * if any thread holds it for read, even if this thread doesn't). | |
1297 | */ | |
1298 | ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER)); | |
1299 | rrw_enter(&dp->dp_config_rwlock, RW_READER, tag); | |
1300 | } | |
1301 | ||
5e8cd5d1 AJ |
1302 | void |
1303 | dsl_pool_config_enter_prio(dsl_pool_t *dp, void *tag) | |
1304 | { | |
1305 | ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER)); | |
1306 | rrw_enter_read_prio(&dp->dp_config_rwlock, tag); | |
1307 | } | |
1308 | ||
13fe0198 MA |
1309 | void |
1310 | dsl_pool_config_exit(dsl_pool_t *dp, void *tag) | |
1311 | { | |
1312 | rrw_exit(&dp->dp_config_rwlock, tag); | |
1313 | } | |
1314 | ||
1315 | boolean_t | |
1316 | dsl_pool_config_held(dsl_pool_t *dp) | |
1317 | { | |
1318 | return (RRW_LOCK_HELD(&dp->dp_config_rwlock)); | |
1319 | } | |
1320 | ||
9c43027b AJ |
1321 | boolean_t |
1322 | dsl_pool_config_held_writer(dsl_pool_t *dp) | |
1323 | { | |
1324 | return (RRW_WRITE_HELD(&dp->dp_config_rwlock)); | |
1325 | } | |
1326 | ||
93ce2b4c | 1327 | #if defined(_KERNEL) |
40a806df NB |
1328 | EXPORT_SYMBOL(dsl_pool_config_enter); |
1329 | EXPORT_SYMBOL(dsl_pool_config_exit); | |
1330 | ||
02730c33 | 1331 | /* BEGIN CSTYLED */ |
d1d7e268 | 1332 | /* zfs_dirty_data_max_percent only applied at module load in arc_init(). */ |
e8b96c60 MA |
1333 | module_param(zfs_dirty_data_max_percent, int, 0444); |
1334 | MODULE_PARM_DESC(zfs_dirty_data_max_percent, "percent of ram can be dirty"); | |
c409e464 | 1335 | |
d1d7e268 | 1336 | /* zfs_dirty_data_max_max_percent only applied at module load in arc_init(). */ |
e8b96c60 MA |
1337 | module_param(zfs_dirty_data_max_max_percent, int, 0444); |
1338 | MODULE_PARM_DESC(zfs_dirty_data_max_max_percent, | |
d1d7e268 | 1339 | "zfs_dirty_data_max upper bound as % of RAM"); |
c409e464 | 1340 | |
e8b96c60 MA |
1341 | module_param(zfs_delay_min_dirty_percent, int, 0644); |
1342 | MODULE_PARM_DESC(zfs_delay_min_dirty_percent, "transaction delay threshold"); | |
c409e464 | 1343 | |
e8b96c60 MA |
1344 | module_param(zfs_dirty_data_max, ulong, 0644); |
1345 | MODULE_PARM_DESC(zfs_dirty_data_max, "determines the dirty space limit"); | |
c409e464 | 1346 | |
d1d7e268 | 1347 | /* zfs_dirty_data_max_max only applied at module load in arc_init(). */ |
e8b96c60 MA |
1348 | module_param(zfs_dirty_data_max_max, ulong, 0444); |
1349 | MODULE_PARM_DESC(zfs_dirty_data_max_max, | |
d1d7e268 | 1350 | "zfs_dirty_data_max upper bound in bytes"); |
c409e464 | 1351 | |
dfbe2675 | 1352 | module_param(zfs_dirty_data_sync_percent, int, 0644); |
00f198de TC |
1353 | MODULE_PARM_DESC(zfs_dirty_data_sync_percent, |
1354 | "dirty data txg sync threshold as a percentage of zfs_dirty_data_max"); | |
c409e464 | 1355 | |
e8b96c60 MA |
1356 | module_param(zfs_delay_scale, ulong, 0644); |
1357 | MODULE_PARM_DESC(zfs_delay_scale, "how quickly delay approaches infinity"); | |
64fc7762 MA |
1358 | |
1359 | module_param(zfs_sync_taskq_batch_pct, int, 0644); | |
1360 | MODULE_PARM_DESC(zfs_sync_taskq_batch_pct, | |
1361 | "max percent of CPUs that are used to sync dirty data"); | |
a032ac4b BB |
1362 | |
1363 | module_param(zfs_zil_clean_taskq_nthr_pct, int, 0644); | |
1364 | MODULE_PARM_DESC(zfs_zil_clean_taskq_nthr_pct, | |
1365 | "max percent of CPUs that are used per dp_sync_taskq"); | |
1366 | ||
1367 | module_param(zfs_zil_clean_taskq_minalloc, int, 0644); | |
1368 | MODULE_PARM_DESC(zfs_zil_clean_taskq_minalloc, | |
1369 | "number of taskq entries that are pre-populated"); | |
1370 | ||
1371 | module_param(zfs_zil_clean_taskq_maxalloc, int, 0644); | |
1372 | MODULE_PARM_DESC(zfs_zil_clean_taskq_maxalloc, | |
1373 | "max number of taskq entries that are cached"); | |
1374 | ||
02730c33 | 1375 | /* END CSTYLED */ |
c409e464 | 1376 | #endif |