]>
Commit | Line | Data |
---|---|---|
34dc7c2f BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
428870ff | 22 | * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. |
ba67d821 | 23 | * Copyright (c) 2011, 2020 by Delphix. All rights reserved. |
95fd54a1 | 24 | * Copyright (c) 2013 Steven Hartland. All rights reserved. |
0c66c32d | 25 | * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. |
539d33c7 | 26 | * Copyright 2016 Nexenta Systems, Inc. All rights reserved. |
34dc7c2f BB |
27 | */ |
28 | ||
34dc7c2f BB |
29 | #include <sys/dsl_pool.h> |
30 | #include <sys/dsl_dataset.h> | |
428870ff | 31 | #include <sys/dsl_prop.h> |
34dc7c2f BB |
32 | #include <sys/dsl_dir.h> |
33 | #include <sys/dsl_synctask.h> | |
428870ff BB |
34 | #include <sys/dsl_scan.h> |
35 | #include <sys/dnode.h> | |
34dc7c2f BB |
36 | #include <sys/dmu_tx.h> |
37 | #include <sys/dmu_objset.h> | |
38 | #include <sys/arc.h> | |
39 | #include <sys/zap.h> | |
40 | #include <sys/zio.h> | |
41 | #include <sys/zfs_context.h> | |
42 | #include <sys/fs/zfs.h> | |
b128c09f BB |
43 | #include <sys/zfs_znode.h> |
44 | #include <sys/spa_impl.h> | |
d2734cce SD |
45 | #include <sys/vdev_impl.h> |
46 | #include <sys/metaslab_impl.h> | |
9ae529ec CS |
47 | #include <sys/bptree.h> |
48 | #include <sys/zfeature.h> | |
29809a6c | 49 | #include <sys/zil_impl.h> |
13fe0198 | 50 | #include <sys/dsl_userhold.h> |
e5d1c27e | 51 | #include <sys/trace_zfs.h> |
379ca9cf | 52 | #include <sys/mmp.h> |
34dc7c2f | 53 | |
e8b96c60 MA |
54 | /* |
55 | * ZFS Write Throttle | |
56 | * ------------------ | |
57 | * | |
58 | * ZFS must limit the rate of incoming writes to the rate at which it is able | |
59 | * to sync data modifications to the backend storage. Throttling by too much | |
60 | * creates an artificial limit; throttling by too little can only be sustained | |
61 | * for short periods and would lead to highly lumpy performance. On a per-pool | |
62 | * basis, ZFS tracks the amount of modified (dirty) data. As operations change | |
63 | * data, the amount of dirty data increases; as ZFS syncs out data, the amount | |
64 | * of dirty data decreases. When the amount of dirty data exceeds a | |
65 | * predetermined threshold further modifications are blocked until the amount | |
66 | * of dirty data decreases (as data is synced out). | |
67 | * | |
68 | * The limit on dirty data is tunable, and should be adjusted according to | |
69 | * both the IO capacity and available memory of the system. The larger the | |
70 | * window, the more ZFS is able to aggregate and amortize metadata (and data) | |
71 | * changes. However, memory is a limited resource, and allowing for more dirty | |
72 | * data comes at the cost of keeping other useful data in memory (for example | |
73 | * ZFS data cached by the ARC). | |
74 | * | |
75 | * Implementation | |
76 | * | |
77 | * As buffers are modified dsl_pool_willuse_space() increments both the per- | |
78 | * txg (dp_dirty_pertxg[]) and poolwide (dp_dirty_total) accounting of | |
79 | * dirty space used; dsl_pool_dirty_space() decrements those values as data | |
80 | * is synced out from dsl_pool_sync(). While only the poolwide value is | |
81 | * relevant, the per-txg value is useful for debugging. The tunable | |
82 | * zfs_dirty_data_max determines the dirty space limit. Once that value is | |
83 | * exceeded, new writes are halted until space frees up. | |
84 | * | |
00f198de | 85 | * The zfs_dirty_data_sync_percent tunable dictates the threshold at which we |
e8b96c60 MA |
86 | * ensure that there is a txg syncing (see the comment in txg.c for a full |
87 | * description of transaction group stages). | |
88 | * | |
89 | * The IO scheduler uses both the dirty space limit and current amount of | |
90 | * dirty data as inputs. Those values affect the number of concurrent IOs ZFS | |
91 | * issues. See the comment in vdev_queue.c for details of the IO scheduler. | |
92 | * | |
93 | * The delay is also calculated based on the amount of dirty data. See the | |
94 | * comment above dmu_tx_delay() for details. | |
95 | */ | |
96 | ||
97 | /* | |
98 | * zfs_dirty_data_max will be set to zfs_dirty_data_max_percent% of all memory, | |
99 | * capped at zfs_dirty_data_max_max. It can also be overridden with a module | |
100 | * parameter. | |
101 | */ | |
102 | unsigned long zfs_dirty_data_max = 0; | |
103 | unsigned long zfs_dirty_data_max_max = 0; | |
104 | int zfs_dirty_data_max_percent = 10; | |
105 | int zfs_dirty_data_max_max_percent = 25; | |
b128c09f | 106 | |
a7bd20e3 KJ |
107 | /* |
108 | * zfs_wrlog_data_max, the upper limit of TX_WRITE log data. | |
109 | * Once it is reached, write operation is blocked, | |
110 | * until log data is cleared out after txg sync. | |
111 | * It only counts TX_WRITE log with WR_COPIED or WR_NEED_COPY. | |
112 | */ | |
113 | unsigned long zfs_wrlog_data_max = 0; | |
114 | ||
e8b96c60 | 115 | /* |
dfbe2675 MA |
116 | * If there's at least this much dirty data (as a percentage of |
117 | * zfs_dirty_data_max), push out a txg. This should be less than | |
118 | * zfs_vdev_async_write_active_min_dirty_percent. | |
e8b96c60 | 119 | */ |
18168da7 | 120 | static int zfs_dirty_data_sync_percent = 20; |
34dc7c2f | 121 | |
e8b96c60 MA |
122 | /* |
123 | * Once there is this amount of dirty data, the dmu_tx_delay() will kick in | |
124 | * and delay each transaction. | |
125 | * This value should be >= zfs_vdev_async_write_active_max_dirty_percent. | |
126 | */ | |
127 | int zfs_delay_min_dirty_percent = 60; | |
b128c09f | 128 | |
e8b96c60 MA |
129 | /* |
130 | * This controls how quickly the delay approaches infinity. | |
131 | * Larger values cause it to delay more for a given amount of dirty data. | |
132 | * Therefore larger values will cause there to be less dirty data for a | |
133 | * given throughput. | |
134 | * | |
135 | * For the smoothest delay, this value should be about 1 billion divided | |
136 | * by the maximum number of operations per second. This will smoothly | |
137 | * handle between 10x and 1/10th this number. | |
138 | * | |
139 | * Note: zfs_delay_scale * zfs_dirty_data_max must be < 2^64, due to the | |
140 | * multiply in dmu_tx_delay(). | |
141 | */ | |
142 | unsigned long zfs_delay_scale = 1000 * 1000 * 1000 / 2000; | |
b128c09f | 143 | |
64fc7762 MA |
144 | /* |
145 | * This determines the number of threads used by the dp_sync_taskq. | |
146 | */ | |
18168da7 | 147 | static int zfs_sync_taskq_batch_pct = 75; |
64fc7762 | 148 | |
a032ac4b BB |
149 | /* |
150 | * These tunables determine the behavior of how zil_itxg_clean() is | |
151 | * called via zil_clean() in the context of spa_sync(). When an itxg | |
152 | * list needs to be cleaned, TQ_NOSLEEP will be used when dispatching. | |
153 | * If the dispatch fails, the call to zil_itxg_clean() will occur | |
154 | * synchronously in the context of spa_sync(), which can negatively | |
155 | * impact the performance of spa_sync() (e.g. in the case of the itxg | |
156 | * list having a large number of itxs that needs to be cleaned). | |
157 | * | |
158 | * Thus, these tunables can be used to manipulate the behavior of the | |
159 | * taskq used by zil_clean(); they determine the number of taskq entries | |
160 | * that are pre-populated when the taskq is first created (via the | |
161 | * "zfs_zil_clean_taskq_minalloc" tunable) and the maximum number of | |
162 | * taskq entries that are cached after an on-demand allocation (via the | |
163 | * "zfs_zil_clean_taskq_maxalloc"). | |
164 | * | |
165 | * The idea being, we want to try reasonably hard to ensure there will | |
166 | * already be a taskq entry pre-allocated by the time that it is needed | |
167 | * by zil_clean(). This way, we can avoid the possibility of an | |
168 | * on-demand allocation of a new taskq entry from failing, which would | |
169 | * result in zil_itxg_clean() being called synchronously from zil_clean() | |
170 | * (which can adversely affect performance of spa_sync()). | |
171 | * | |
172 | * Additionally, the number of threads used by the taskq can be | |
173 | * configured via the "zfs_zil_clean_taskq_nthr_pct" tunable. | |
174 | */ | |
18168da7 AZ |
175 | static int zfs_zil_clean_taskq_nthr_pct = 100; |
176 | static int zfs_zil_clean_taskq_minalloc = 1024; | |
177 | static int zfs_zil_clean_taskq_maxalloc = 1024 * 1024; | |
a032ac4b | 178 | |
428870ff | 179 | int |
b128c09f | 180 | dsl_pool_open_special_dir(dsl_pool_t *dp, const char *name, dsl_dir_t **ddp) |
34dc7c2f BB |
181 | { |
182 | uint64_t obj; | |
183 | int err; | |
184 | ||
185 | err = zap_lookup(dp->dp_meta_objset, | |
d683ddbb | 186 | dsl_dir_phys(dp->dp_root_dir)->dd_child_dir_zapobj, |
b128c09f | 187 | name, sizeof (obj), 1, &obj); |
34dc7c2f BB |
188 | if (err) |
189 | return (err); | |
190 | ||
13fe0198 | 191 | return (dsl_dir_hold_obj(dp, obj, name, dp, ddp)); |
34dc7c2f BB |
192 | } |
193 | ||
194 | static dsl_pool_t * | |
195 | dsl_pool_open_impl(spa_t *spa, uint64_t txg) | |
196 | { | |
197 | dsl_pool_t *dp; | |
198 | blkptr_t *bp = spa_get_rootblkptr(spa); | |
34dc7c2f BB |
199 | |
200 | dp = kmem_zalloc(sizeof (dsl_pool_t), KM_SLEEP); | |
201 | dp->dp_spa = spa; | |
202 | dp->dp_meta_rootbp = *bp; | |
13fe0198 | 203 | rrw_init(&dp->dp_config_rwlock, B_TRUE); |
34dc7c2f | 204 | txg_init(dp, txg); |
379ca9cf | 205 | mmp_init(spa); |
34dc7c2f | 206 | |
4747a7d3 | 207 | txg_list_create(&dp->dp_dirty_datasets, spa, |
34dc7c2f | 208 | offsetof(dsl_dataset_t, ds_dirty_link)); |
4747a7d3 | 209 | txg_list_create(&dp->dp_dirty_zilogs, spa, |
29809a6c | 210 | offsetof(zilog_t, zl_dirty_link)); |
4747a7d3 | 211 | txg_list_create(&dp->dp_dirty_dirs, spa, |
34dc7c2f | 212 | offsetof(dsl_dir_t, dd_dirty_link)); |
4747a7d3 | 213 | txg_list_create(&dp->dp_sync_tasks, spa, |
13fe0198 | 214 | offsetof(dsl_sync_task_t, dst_node)); |
d2734cce SD |
215 | txg_list_create(&dp->dp_early_sync_tasks, spa, |
216 | offsetof(dsl_sync_task_t, dst_node)); | |
34dc7c2f | 217 | |
64fc7762 MA |
218 | dp->dp_sync_taskq = taskq_create("dp_sync_taskq", |
219 | zfs_sync_taskq_batch_pct, minclsyspri, 1, INT_MAX, | |
220 | TASKQ_THREADS_CPU_PCT); | |
221 | ||
a032ac4b BB |
222 | dp->dp_zil_clean_taskq = taskq_create("dp_zil_clean_taskq", |
223 | zfs_zil_clean_taskq_nthr_pct, minclsyspri, | |
224 | zfs_zil_clean_taskq_minalloc, | |
225 | zfs_zil_clean_taskq_maxalloc, | |
226 | TASKQ_PREPOPULATE | TASKQ_THREADS_CPU_PCT); | |
227 | ||
34dc7c2f | 228 | mutex_init(&dp->dp_lock, NULL, MUTEX_DEFAULT, NULL); |
e8b96c60 | 229 | cv_init(&dp->dp_spaceavail_cv, NULL, CV_DEFAULT, NULL); |
34dc7c2f | 230 | |
a7bd20e3 KJ |
231 | aggsum_init(&dp->dp_wrlog_total, 0); |
232 | for (int i = 0; i < TXG_SIZE; i++) { | |
233 | aggsum_init(&dp->dp_wrlog_pertxg[i], 0); | |
234 | } | |
235 | ||
60a4c7d2 PD |
236 | dp->dp_zrele_taskq = taskq_create("z_zrele", 100, defclsyspri, |
237 | boot_ncpus * 8, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC | | |
238 | TASKQ_THREADS_CPU_PCT); | |
dcec0a12 | 239 | dp->dp_unlinked_drain_taskq = taskq_create("z_unlinked_drain", |
60a4c7d2 PD |
240 | 100, defclsyspri, boot_ncpus, INT_MAX, |
241 | TASKQ_PREPOPULATE | TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT); | |
9babb374 | 242 | |
34dc7c2f BB |
243 | return (dp); |
244 | } | |
245 | ||
246 | int | |
9ae529ec | 247 | dsl_pool_init(spa_t *spa, uint64_t txg, dsl_pool_t **dpp) |
34dc7c2f BB |
248 | { |
249 | int err; | |
250 | dsl_pool_t *dp = dsl_pool_open_impl(spa, txg); | |
9ae529ec | 251 | |
b7faa7aa G |
252 | /* |
253 | * Initialize the caller's dsl_pool_t structure before we actually open | |
254 | * the meta objset. This is done because a self-healing write zio may | |
255 | * be issued as part of dmu_objset_open_impl() and the spa needs its | |
256 | * dsl_pool_t initialized in order to handle the write. | |
257 | */ | |
258 | *dpp = dp; | |
259 | ||
9ae529ec CS |
260 | err = dmu_objset_open_impl(spa, NULL, &dp->dp_meta_rootbp, |
261 | &dp->dp_meta_objset); | |
b7faa7aa | 262 | if (err != 0) { |
9ae529ec | 263 | dsl_pool_close(dp); |
b7faa7aa G |
264 | *dpp = NULL; |
265 | } | |
9ae529ec CS |
266 | |
267 | return (err); | |
268 | } | |
269 | ||
270 | int | |
271 | dsl_pool_open(dsl_pool_t *dp) | |
272 | { | |
273 | int err; | |
b128c09f BB |
274 | dsl_dir_t *dd; |
275 | dsl_dataset_t *ds; | |
428870ff | 276 | uint64_t obj; |
34dc7c2f | 277 | |
13fe0198 | 278 | rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); |
34dc7c2f BB |
279 | err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, |
280 | DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1, | |
281 | &dp->dp_root_dir_obj); | |
282 | if (err) | |
283 | goto out; | |
284 | ||
13fe0198 | 285 | err = dsl_dir_hold_obj(dp, dp->dp_root_dir_obj, |
34dc7c2f BB |
286 | NULL, dp, &dp->dp_root_dir); |
287 | if (err) | |
288 | goto out; | |
289 | ||
b128c09f | 290 | err = dsl_pool_open_special_dir(dp, MOS_DIR_NAME, &dp->dp_mos_dir); |
34dc7c2f BB |
291 | if (err) |
292 | goto out; | |
293 | ||
9ae529ec | 294 | if (spa_version(dp->dp_spa) >= SPA_VERSION_ORIGIN) { |
b128c09f BB |
295 | err = dsl_pool_open_special_dir(dp, ORIGIN_DIR_NAME, &dd); |
296 | if (err) | |
297 | goto out; | |
d683ddbb JG |
298 | err = dsl_dataset_hold_obj(dp, |
299 | dsl_dir_phys(dd)->dd_head_dataset_obj, FTAG, &ds); | |
9babb374 BB |
300 | if (err == 0) { |
301 | err = dsl_dataset_hold_obj(dp, | |
d683ddbb | 302 | dsl_dataset_phys(ds)->ds_prev_snap_obj, dp, |
9babb374 BB |
303 | &dp->dp_origin_snap); |
304 | dsl_dataset_rele(ds, FTAG); | |
305 | } | |
13fe0198 | 306 | dsl_dir_rele(dd, dp); |
b128c09f BB |
307 | if (err) |
308 | goto out; | |
b128c09f BB |
309 | } |
310 | ||
9ae529ec | 311 | if (spa_version(dp->dp_spa) >= SPA_VERSION_DEADLISTS) { |
428870ff BB |
312 | err = dsl_pool_open_special_dir(dp, FREE_DIR_NAME, |
313 | &dp->dp_free_dir); | |
b128c09f BB |
314 | if (err) |
315 | goto out; | |
428870ff | 316 | |
b128c09f | 317 | err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, |
428870ff | 318 | DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj); |
b128c09f BB |
319 | if (err) |
320 | goto out; | |
13fe0198 | 321 | VERIFY0(bpobj_open(&dp->dp_free_bpobj, |
428870ff | 322 | dp->dp_meta_objset, obj)); |
b128c09f BB |
323 | } |
324 | ||
a1d477c2 MA |
325 | if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS)) { |
326 | err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, | |
327 | DMU_POOL_OBSOLETE_BPOBJ, sizeof (uint64_t), 1, &obj); | |
328 | if (err == 0) { | |
329 | VERIFY0(bpobj_open(&dp->dp_obsolete_bpobj, | |
330 | dp->dp_meta_objset, obj)); | |
331 | } else if (err == ENOENT) { | |
332 | /* | |
333 | * We might not have created the remap bpobj yet. | |
334 | */ | |
335 | err = 0; | |
336 | } else { | |
337 | goto out; | |
338 | } | |
339 | } | |
340 | ||
fbeddd60 | 341 | /* |
a1d477c2 MA |
342 | * Note: errors ignored, because the these special dirs, used for |
343 | * space accounting, are only created on demand. | |
fbeddd60 MA |
344 | */ |
345 | (void) dsl_pool_open_special_dir(dp, LEAK_DIR_NAME, | |
346 | &dp->dp_leak_dir); | |
347 | ||
fa86b5db | 348 | if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY)) { |
9ae529ec CS |
349 | err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, |
350 | DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1, | |
351 | &dp->dp_bptree_obj); | |
352 | if (err != 0) | |
353 | goto out; | |
354 | } | |
355 | ||
fa86b5db | 356 | if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMPTY_BPOBJ)) { |
753c3839 MA |
357 | err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, |
358 | DMU_POOL_EMPTY_BPOBJ, sizeof (uint64_t), 1, | |
359 | &dp->dp_empty_bpobj); | |
360 | if (err != 0) | |
361 | goto out; | |
362 | } | |
363 | ||
428870ff BB |
364 | err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, |
365 | DMU_POOL_TMP_USERREFS, sizeof (uint64_t), 1, | |
366 | &dp->dp_tmp_userrefs_obj); | |
367 | if (err == ENOENT) | |
368 | err = 0; | |
369 | if (err) | |
370 | goto out; | |
371 | ||
9ae529ec | 372 | err = dsl_scan_init(dp, dp->dp_tx.tx_open_txg); |
428870ff | 373 | |
34dc7c2f | 374 | out: |
13fe0198 | 375 | rrw_exit(&dp->dp_config_rwlock, FTAG); |
34dc7c2f BB |
376 | return (err); |
377 | } | |
378 | ||
379 | void | |
380 | dsl_pool_close(dsl_pool_t *dp) | |
381 | { | |
b128c09f | 382 | /* |
e8b96c60 MA |
383 | * Drop our references from dsl_pool_open(). |
384 | * | |
b128c09f BB |
385 | * Since we held the origin_snap from "syncing" context (which |
386 | * includes pool-opening context), it actually only got a "ref" | |
387 | * and not a hold, so just drop that here. | |
388 | */ | |
a1d477c2 | 389 | if (dp->dp_origin_snap != NULL) |
13fe0198 | 390 | dsl_dataset_rele(dp->dp_origin_snap, dp); |
a1d477c2 | 391 | if (dp->dp_mos_dir != NULL) |
13fe0198 | 392 | dsl_dir_rele(dp->dp_mos_dir, dp); |
a1d477c2 | 393 | if (dp->dp_free_dir != NULL) |
13fe0198 | 394 | dsl_dir_rele(dp->dp_free_dir, dp); |
a1d477c2 | 395 | if (dp->dp_leak_dir != NULL) |
fbeddd60 | 396 | dsl_dir_rele(dp->dp_leak_dir, dp); |
a1d477c2 | 397 | if (dp->dp_root_dir != NULL) |
13fe0198 | 398 | dsl_dir_rele(dp->dp_root_dir, dp); |
34dc7c2f | 399 | |
428870ff | 400 | bpobj_close(&dp->dp_free_bpobj); |
a1d477c2 | 401 | bpobj_close(&dp->dp_obsolete_bpobj); |
428870ff | 402 | |
34dc7c2f | 403 | /* undo the dmu_objset_open_impl(mos) from dsl_pool_open() */ |
a1d477c2 | 404 | if (dp->dp_meta_objset != NULL) |
428870ff | 405 | dmu_objset_evict(dp->dp_meta_objset); |
34dc7c2f BB |
406 | |
407 | txg_list_destroy(&dp->dp_dirty_datasets); | |
29809a6c | 408 | txg_list_destroy(&dp->dp_dirty_zilogs); |
428870ff | 409 | txg_list_destroy(&dp->dp_sync_tasks); |
d2734cce | 410 | txg_list_destroy(&dp->dp_early_sync_tasks); |
34dc7c2f | 411 | txg_list_destroy(&dp->dp_dirty_dirs); |
34dc7c2f | 412 | |
a032ac4b | 413 | taskq_destroy(dp->dp_zil_clean_taskq); |
64fc7762 MA |
414 | taskq_destroy(dp->dp_sync_taskq); |
415 | ||
ca0bf58d PS |
416 | /* |
417 | * We can't set retry to TRUE since we're explicitly specifying | |
418 | * a spa to flush. This is good enough; any missed buffers for | |
419 | * this spa won't cause trouble, and they'll eventually fall | |
420 | * out of the ARC just like any other unused buffer. | |
421 | */ | |
422 | arc_flush(dp->dp_spa, FALSE); | |
423 | ||
379ca9cf | 424 | mmp_fini(dp->dp_spa); |
34dc7c2f | 425 | txg_fini(dp); |
428870ff | 426 | dsl_scan_fini(dp); |
0c66c32d JG |
427 | dmu_buf_user_evict_wait(); |
428 | ||
13fe0198 | 429 | rrw_destroy(&dp->dp_config_rwlock); |
34dc7c2f | 430 | mutex_destroy(&dp->dp_lock); |
c17486b2 | 431 | cv_destroy(&dp->dp_spaceavail_cv); |
a7bd20e3 KJ |
432 | |
433 | ASSERT0(aggsum_value(&dp->dp_wrlog_total)); | |
434 | aggsum_fini(&dp->dp_wrlog_total); | |
435 | for (int i = 0; i < TXG_SIZE; i++) { | |
436 | ASSERT0(aggsum_value(&dp->dp_wrlog_pertxg[i])); | |
437 | aggsum_fini(&dp->dp_wrlog_pertxg[i]); | |
438 | } | |
439 | ||
dcec0a12 | 440 | taskq_destroy(dp->dp_unlinked_drain_taskq); |
657ce253 | 441 | taskq_destroy(dp->dp_zrele_taskq); |
a1d477c2 | 442 | if (dp->dp_blkstats != NULL) { |
d4a72f23 | 443 | mutex_destroy(&dp->dp_blkstats->zab_lock); |
79c76d5b | 444 | vmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t)); |
d4a72f23 | 445 | } |
34dc7c2f BB |
446 | kmem_free(dp, sizeof (dsl_pool_t)); |
447 | } | |
448 | ||
a1d477c2 MA |
449 | void |
450 | dsl_pool_create_obsolete_bpobj(dsl_pool_t *dp, dmu_tx_t *tx) | |
451 | { | |
452 | uint64_t obj; | |
453 | /* | |
454 | * Currently, we only create the obsolete_bpobj where there are | |
455 | * indirect vdevs with referenced mappings. | |
456 | */ | |
457 | ASSERT(spa_feature_is_active(dp->dp_spa, SPA_FEATURE_DEVICE_REMOVAL)); | |
458 | /* create and open the obsolete_bpobj */ | |
459 | obj = bpobj_alloc(dp->dp_meta_objset, SPA_OLD_MAXBLOCKSIZE, tx); | |
460 | VERIFY0(bpobj_open(&dp->dp_obsolete_bpobj, dp->dp_meta_objset, obj)); | |
461 | VERIFY0(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, | |
462 | DMU_POOL_OBSOLETE_BPOBJ, sizeof (uint64_t), 1, &obj, tx)); | |
463 | spa_feature_incr(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); | |
464 | } | |
465 | ||
466 | void | |
467 | dsl_pool_destroy_obsolete_bpobj(dsl_pool_t *dp, dmu_tx_t *tx) | |
468 | { | |
469 | spa_feature_decr(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); | |
470 | VERIFY0(zap_remove(dp->dp_meta_objset, | |
471 | DMU_POOL_DIRECTORY_OBJECT, | |
472 | DMU_POOL_OBSOLETE_BPOBJ, tx)); | |
473 | bpobj_free(dp->dp_meta_objset, | |
474 | dp->dp_obsolete_bpobj.bpo_object, tx); | |
475 | bpobj_close(&dp->dp_obsolete_bpobj); | |
476 | } | |
477 | ||
34dc7c2f | 478 | dsl_pool_t * |
14e4e3cb AZ |
479 | dsl_pool_create(spa_t *spa, nvlist_t *zplprops __attribute__((unused)), |
480 | dsl_crypto_params_t *dcp, uint64_t txg) | |
34dc7c2f BB |
481 | { |
482 | int err; | |
483 | dsl_pool_t *dp = dsl_pool_open_impl(spa, txg); | |
484 | dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg); | |
0a108631 | 485 | #ifdef _KERNEL |
486 | objset_t *os; | |
487 | #else | |
488 | objset_t *os __attribute__((unused)); | |
489 | #endif | |
b128c09f | 490 | dsl_dataset_t *ds; |
428870ff | 491 | uint64_t obj; |
b128c09f | 492 | |
13fe0198 MA |
493 | rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); |
494 | ||
b128c09f | 495 | /* create and open the MOS (meta-objset) */ |
428870ff BB |
496 | dp->dp_meta_objset = dmu_objset_create_impl(spa, |
497 | NULL, &dp->dp_meta_rootbp, DMU_OST_META, tx); | |
b5256303 | 498 | spa->spa_meta_objset = dp->dp_meta_objset; |
34dc7c2f BB |
499 | |
500 | /* create the pool directory */ | |
501 | err = zap_create_claim(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, | |
502 | DMU_OT_OBJECT_DIRECTORY, DMU_OT_NONE, 0, tx); | |
c99c9001 | 503 | ASSERT0(err); |
34dc7c2f | 504 | |
428870ff | 505 | /* Initialize scan structures */ |
13fe0198 | 506 | VERIFY0(dsl_scan_init(dp, txg)); |
428870ff | 507 | |
34dc7c2f | 508 | /* create and open the root dir */ |
b128c09f | 509 | dp->dp_root_dir_obj = dsl_dir_create_sync(dp, NULL, NULL, tx); |
13fe0198 | 510 | VERIFY0(dsl_dir_hold_obj(dp, dp->dp_root_dir_obj, |
34dc7c2f BB |
511 | NULL, dp, &dp->dp_root_dir)); |
512 | ||
513 | /* create and open the meta-objset dir */ | |
b128c09f | 514 | (void) dsl_dir_create_sync(dp, dp->dp_root_dir, MOS_DIR_NAME, tx); |
13fe0198 | 515 | VERIFY0(dsl_pool_open_special_dir(dp, |
b128c09f BB |
516 | MOS_DIR_NAME, &dp->dp_mos_dir)); |
517 | ||
428870ff BB |
518 | if (spa_version(spa) >= SPA_VERSION_DEADLISTS) { |
519 | /* create and open the free dir */ | |
520 | (void) dsl_dir_create_sync(dp, dp->dp_root_dir, | |
521 | FREE_DIR_NAME, tx); | |
13fe0198 | 522 | VERIFY0(dsl_pool_open_special_dir(dp, |
428870ff BB |
523 | FREE_DIR_NAME, &dp->dp_free_dir)); |
524 | ||
525 | /* create and open the free_bplist */ | |
f1512ee6 | 526 | obj = bpobj_alloc(dp->dp_meta_objset, SPA_OLD_MAXBLOCKSIZE, tx); |
428870ff BB |
527 | VERIFY(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, |
528 | DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx) == 0); | |
13fe0198 | 529 | VERIFY0(bpobj_open(&dp->dp_free_bpobj, |
428870ff BB |
530 | dp->dp_meta_objset, obj)); |
531 | } | |
532 | ||
b128c09f BB |
533 | if (spa_version(spa) >= SPA_VERSION_DSL_SCRUB) |
534 | dsl_pool_create_origin(dp, tx); | |
535 | ||
b5256303 TC |
536 | /* |
537 | * Some features may be needed when creating the root dataset, so we | |
538 | * create the feature objects here. | |
539 | */ | |
540 | if (spa_version(spa) >= SPA_VERSION_FEATURES) | |
541 | spa_feature_create_zap_objects(spa, tx); | |
542 | ||
543 | if (dcp != NULL && dcp->cp_crypt != ZIO_CRYPT_OFF && | |
544 | dcp->cp_crypt != ZIO_CRYPT_INHERIT) | |
545 | spa_feature_enable(spa, SPA_FEATURE_ENCRYPTION, tx); | |
546 | ||
b128c09f | 547 | /* create the root dataset */ |
b5256303 | 548 | obj = dsl_dataset_create_sync_dd(dp->dp_root_dir, NULL, dcp, 0, tx); |
b128c09f BB |
549 | |
550 | /* create the root objset */ | |
52ce99dd TC |
551 | VERIFY0(dsl_dataset_hold_obj_flags(dp, obj, |
552 | DS_HOLD_FLAG_DECRYPT, FTAG, &ds)); | |
0a108631 | 553 | rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); |
554 | os = dmu_objset_create_impl(dp->dp_spa, ds, | |
555 | dsl_dataset_get_blkptr(ds), DMU_OST_ZFS, tx); | |
556 | rrw_exit(&ds->ds_bp_rwlock, FTAG); | |
b128c09f | 557 | #ifdef _KERNEL |
0a108631 | 558 | zfs_create_fs(os, kcred, zplprops, tx); |
b128c09f | 559 | #endif |
52ce99dd | 560 | dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG); |
34dc7c2f BB |
561 | |
562 | dmu_tx_commit(tx); | |
563 | ||
13fe0198 MA |
564 | rrw_exit(&dp->dp_config_rwlock, FTAG); |
565 | ||
34dc7c2f BB |
566 | return (dp); |
567 | } | |
568 | ||
29809a6c MA |
569 | /* |
570 | * Account for the meta-objset space in its placeholder dsl_dir. | |
571 | */ | |
572 | void | |
573 | dsl_pool_mos_diduse_space(dsl_pool_t *dp, | |
574 | int64_t used, int64_t comp, int64_t uncomp) | |
575 | { | |
576 | ASSERT3U(comp, ==, uncomp); /* it's all metadata */ | |
577 | mutex_enter(&dp->dp_lock); | |
578 | dp->dp_mos_used_delta += used; | |
579 | dp->dp_mos_compressed_delta += comp; | |
580 | dp->dp_mos_uncompressed_delta += uncomp; | |
581 | mutex_exit(&dp->dp_lock); | |
582 | } | |
583 | ||
e8b96c60 MA |
584 | static void |
585 | dsl_pool_sync_mos(dsl_pool_t *dp, dmu_tx_t *tx) | |
586 | { | |
587 | zio_t *zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); | |
588 | dmu_objset_sync(dp->dp_meta_objset, zio, tx); | |
589 | VERIFY0(zio_wait(zio)); | |
ba67d821 MA |
590 | dmu_objset_sync_done(dp->dp_meta_objset, tx); |
591 | taskq_wait(dp->dp_sync_taskq); | |
ffdf019c | 592 | multilist_destroy(&dp->dp_meta_objset->os_synced_dnodes); |
ba67d821 | 593 | |
e8b96c60 MA |
594 | dprintf_bp(&dp->dp_meta_rootbp, "meta objset rootbp is %s", ""); |
595 | spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp); | |
596 | } | |
597 | ||
598 | static void | |
599 | dsl_pool_dirty_delta(dsl_pool_t *dp, int64_t delta) | |
600 | { | |
601 | ASSERT(MUTEX_HELD(&dp->dp_lock)); | |
602 | ||
603 | if (delta < 0) | |
604 | ASSERT3U(-delta, <=, dp->dp_dirty_total); | |
605 | ||
606 | dp->dp_dirty_total += delta; | |
607 | ||
608 | /* | |
609 | * Note: we signal even when increasing dp_dirty_total. | |
610 | * This ensures forward progress -- each thread wakes the next waiter. | |
611 | */ | |
c0c8cc7b | 612 | if (dp->dp_dirty_total < zfs_dirty_data_max) |
e8b96c60 MA |
613 | cv_signal(&dp->dp_spaceavail_cv); |
614 | } | |
615 | ||
a7bd20e3 KJ |
616 | void |
617 | dsl_pool_wrlog_count(dsl_pool_t *dp, int64_t size, uint64_t txg) | |
618 | { | |
619 | ASSERT3S(size, >=, 0); | |
620 | ||
621 | aggsum_add(&dp->dp_wrlog_pertxg[txg & TXG_MASK], size); | |
622 | aggsum_add(&dp->dp_wrlog_total, size); | |
623 | ||
624 | /* Choose a value slightly bigger than min dirty sync bytes */ | |
625 | uint64_t sync_min = | |
626 | zfs_dirty_data_max * (zfs_dirty_data_sync_percent + 10) / 100; | |
627 | if (aggsum_compare(&dp->dp_wrlog_pertxg[txg & TXG_MASK], sync_min) > 0) | |
628 | txg_kick(dp, txg); | |
629 | } | |
630 | ||
631 | boolean_t | |
632 | dsl_pool_wrlog_over_max(dsl_pool_t *dp) | |
633 | { | |
634 | return (aggsum_compare(&dp->dp_wrlog_total, zfs_wrlog_data_max) > 0); | |
635 | } | |
636 | ||
637 | static void | |
638 | dsl_pool_wrlog_clear(dsl_pool_t *dp, uint64_t txg) | |
639 | { | |
640 | int64_t delta; | |
641 | delta = -(int64_t)aggsum_value(&dp->dp_wrlog_pertxg[txg & TXG_MASK]); | |
642 | aggsum_add(&dp->dp_wrlog_pertxg[txg & TXG_MASK], delta); | |
643 | aggsum_add(&dp->dp_wrlog_total, delta); | |
644 | } | |
645 | ||
d2734cce SD |
646 | #ifdef ZFS_DEBUG |
647 | static boolean_t | |
648 | dsl_early_sync_task_verify(dsl_pool_t *dp, uint64_t txg) | |
649 | { | |
650 | spa_t *spa = dp->dp_spa; | |
651 | vdev_t *rvd = spa->spa_root_vdev; | |
652 | ||
653 | for (uint64_t c = 0; c < rvd->vdev_children; c++) { | |
654 | vdev_t *vd = rvd->vdev_child[c]; | |
655 | txg_list_t *tl = &vd->vdev_ms_list; | |
656 | metaslab_t *ms; | |
657 | ||
658 | for (ms = txg_list_head(tl, TXG_CLEAN(txg)); ms; | |
659 | ms = txg_list_next(tl, ms, TXG_CLEAN(txg))) { | |
660 | VERIFY(range_tree_is_empty(ms->ms_freeing)); | |
661 | VERIFY(range_tree_is_empty(ms->ms_checkpointing)); | |
662 | } | |
663 | } | |
664 | ||
665 | return (B_TRUE); | |
666 | } | |
89495a42 AZ |
667 | #else |
668 | #define dsl_early_sync_task_verify(dp, txg) \ | |
669 | ((void) sizeof (dp), (void) sizeof (txg), B_TRUE) | |
d2734cce SD |
670 | #endif |
671 | ||
34dc7c2f BB |
672 | void |
673 | dsl_pool_sync(dsl_pool_t *dp, uint64_t txg) | |
674 | { | |
675 | zio_t *zio; | |
676 | dmu_tx_t *tx; | |
677 | dsl_dir_t *dd; | |
678 | dsl_dataset_t *ds; | |
428870ff | 679 | objset_t *mos = dp->dp_meta_objset; |
29809a6c MA |
680 | list_t synced_datasets; |
681 | ||
682 | list_create(&synced_datasets, sizeof (dsl_dataset_t), | |
683 | offsetof(dsl_dataset_t, ds_synced_link)); | |
34dc7c2f BB |
684 | |
685 | tx = dmu_tx_create_assigned(dp, txg); | |
686 | ||
d2734cce SD |
687 | /* |
688 | * Run all early sync tasks before writing out any dirty blocks. | |
689 | * For more info on early sync tasks see block comment in | |
690 | * dsl_early_sync_task(). | |
691 | */ | |
692 | if (!txg_list_empty(&dp->dp_early_sync_tasks, txg)) { | |
693 | dsl_sync_task_t *dst; | |
694 | ||
695 | ASSERT3U(spa_sync_pass(dp->dp_spa), ==, 1); | |
696 | while ((dst = | |
697 | txg_list_remove(&dp->dp_early_sync_tasks, txg)) != NULL) { | |
698 | ASSERT(dsl_early_sync_task_verify(dp, txg)); | |
699 | dsl_sync_task_sync(dst, tx); | |
700 | } | |
701 | ASSERT(dsl_early_sync_task_verify(dp, txg)); | |
702 | } | |
703 | ||
e8b96c60 MA |
704 | /* |
705 | * Write out all dirty blocks of dirty datasets. | |
706 | */ | |
34dc7c2f | 707 | zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); |
e8b96c60 | 708 | while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) { |
9babb374 BB |
709 | /* |
710 | * We must not sync any non-MOS datasets twice, because | |
711 | * we may have taken a snapshot of them. However, we | |
712 | * may sync newly-created datasets on pass 2. | |
713 | */ | |
714 | ASSERT(!list_link_active(&ds->ds_synced_link)); | |
29809a6c | 715 | list_insert_tail(&synced_datasets, ds); |
34dc7c2f BB |
716 | dsl_dataset_sync(ds, zio, tx); |
717 | } | |
e8b96c60 | 718 | VERIFY0(zio_wait(zio)); |
9babb374 | 719 | |
539d33c7 GM |
720 | /* |
721 | * Update the long range free counter after | |
722 | * we're done syncing user data | |
723 | */ | |
724 | mutex_enter(&dp->dp_lock); | |
725 | ASSERT(spa_sync_pass(dp->dp_spa) == 1 || | |
726 | dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] == 0); | |
727 | dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] = 0; | |
728 | mutex_exit(&dp->dp_lock); | |
729 | ||
29809a6c MA |
730 | /* |
731 | * After the data blocks have been written (ensured by the zio_wait() | |
9c5167d1 | 732 | * above), update the user/group/project space accounting. This happens |
64fc7762 MA |
733 | * in tasks dispatched to dp_sync_taskq, so wait for them before |
734 | * continuing. | |
29809a6c | 735 | */ |
e8b96c60 MA |
736 | for (ds = list_head(&synced_datasets); ds != NULL; |
737 | ds = list_next(&synced_datasets, ds)) { | |
ba67d821 | 738 | dmu_objset_sync_done(ds->ds_objset, tx); |
e8b96c60 | 739 | } |
64fc7762 | 740 | taskq_wait(dp->dp_sync_taskq); |
9babb374 BB |
741 | |
742 | /* | |
743 | * Sync the datasets again to push out the changes due to | |
428870ff | 744 | * userspace updates. This must be done before we process the |
29809a6c MA |
745 | * sync tasks, so that any snapshots will have the correct |
746 | * user accounting information (and we won't get confused | |
747 | * about which blocks are part of the snapshot). | |
9babb374 BB |
748 | */ |
749 | zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); | |
e8b96c60 | 750 | while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) { |
52ce99dd TC |
751 | objset_t *os = ds->ds_objset; |
752 | ||
9babb374 BB |
753 | ASSERT(list_link_active(&ds->ds_synced_link)); |
754 | dmu_buf_rele(ds->ds_dbuf, ds); | |
755 | dsl_dataset_sync(ds, zio, tx); | |
52ce99dd TC |
756 | |
757 | /* | |
758 | * Release any key mappings created by calls to | |
759 | * dsl_dataset_dirty() from the userquota accounting | |
760 | * code paths. | |
761 | */ | |
762 | if (os->os_encrypted && !os->os_raw_receive && | |
763 | !os->os_next_write_raw[txg & TXG_MASK]) { | |
764 | ASSERT3P(ds->ds_key_mapping, !=, NULL); | |
765 | key_mapping_rele(dp->dp_spa, ds->ds_key_mapping, ds); | |
766 | } | |
9babb374 | 767 | } |
e8b96c60 | 768 | VERIFY0(zio_wait(zio)); |
9babb374 | 769 | |
428870ff | 770 | /* |
29809a6c MA |
771 | * Now that the datasets have been completely synced, we can |
772 | * clean up our in-memory structures accumulated while syncing: | |
773 | * | |
37f03da8 SH |
774 | * - move dead blocks from the pending deadlist and livelists |
775 | * to the on-disk versions | |
29809a6c | 776 | * - release hold from dsl_dataset_dirty() |
52ce99dd | 777 | * - release key mapping hold from dsl_dataset_dirty() |
428870ff | 778 | */ |
e8b96c60 | 779 | while ((ds = list_remove_head(&synced_datasets)) != NULL) { |
52ce99dd TC |
780 | objset_t *os = ds->ds_objset; |
781 | ||
782 | if (os->os_encrypted && !os->os_raw_receive && | |
783 | !os->os_next_write_raw[txg & TXG_MASK]) { | |
784 | ASSERT3P(ds->ds_key_mapping, !=, NULL); | |
785 | key_mapping_rele(dp->dp_spa, ds->ds_key_mapping, ds); | |
786 | } | |
787 | ||
0efd9791 | 788 | dsl_dataset_sync_done(ds, tx); |
428870ff BB |
789 | } |
790 | ||
e8b96c60 | 791 | while ((dd = txg_list_remove(&dp->dp_dirty_dirs, txg)) != NULL) { |
34dc7c2f | 792 | dsl_dir_sync(dd, tx); |
e8b96c60 | 793 | } |
b128c09f | 794 | |
29809a6c MA |
795 | /* |
796 | * The MOS's space is accounted for in the pool/$MOS | |
797 | * (dp_mos_dir). We can't modify the mos while we're syncing | |
798 | * it, so we remember the deltas and apply them here. | |
799 | */ | |
800 | if (dp->dp_mos_used_delta != 0 || dp->dp_mos_compressed_delta != 0 || | |
801 | dp->dp_mos_uncompressed_delta != 0) { | |
802 | dsl_dir_diduse_space(dp->dp_mos_dir, DD_USED_HEAD, | |
803 | dp->dp_mos_used_delta, | |
804 | dp->dp_mos_compressed_delta, | |
805 | dp->dp_mos_uncompressed_delta, tx); | |
806 | dp->dp_mos_used_delta = 0; | |
807 | dp->dp_mos_compressed_delta = 0; | |
808 | dp->dp_mos_uncompressed_delta = 0; | |
809 | } | |
810 | ||
93e28d66 | 811 | if (dmu_objset_is_dirty(mos, txg)) { |
e8b96c60 | 812 | dsl_pool_sync_mos(dp, tx); |
34dc7c2f BB |
813 | } |
814 | ||
0f8ff49e SD |
815 | /* |
816 | * We have written all of the accounted dirty data, so our | |
817 | * dp_space_towrite should now be zero. However, some seldom-used | |
818 | * code paths do not adhere to this (e.g. dbuf_undirty()). Shore up | |
819 | * the accounting of any dirtied space now. | |
820 | * | |
821 | * Note that, besides any dirty data from datasets, the amount of | |
822 | * dirty data in the MOS is also accounted by the pool. Therefore, | |
823 | * we want to do this cleanup after dsl_pool_sync_mos() so we don't | |
824 | * attempt to update the accounting for the same dirty data twice. | |
825 | * (i.e. at this point we only update the accounting for the space | |
826 | * that we know that we "leaked"). | |
827 | */ | |
828 | dsl_pool_undirty_space(dp, dp->dp_dirty_pertxg[txg & TXG_MASK], txg); | |
829 | ||
29809a6c MA |
830 | /* |
831 | * If we modify a dataset in the same txg that we want to destroy it, | |
832 | * its dsl_dir's dd_dbuf will be dirty, and thus have a hold on it. | |
833 | * dsl_dir_destroy_check() will fail if there are unexpected holds. | |
834 | * Therefore, we want to sync the MOS (thus syncing the dd_dbuf | |
835 | * and clearing the hold on it) before we process the sync_tasks. | |
836 | * The MOS data dirtied by the sync_tasks will be synced on the next | |
837 | * pass. | |
838 | */ | |
29809a6c | 839 | if (!txg_list_empty(&dp->dp_sync_tasks, txg)) { |
13fe0198 | 840 | dsl_sync_task_t *dst; |
29809a6c MA |
841 | /* |
842 | * No more sync tasks should have been added while we | |
843 | * were syncing. | |
844 | */ | |
e8b96c60 MA |
845 | ASSERT3U(spa_sync_pass(dp->dp_spa), ==, 1); |
846 | while ((dst = txg_list_remove(&dp->dp_sync_tasks, txg)) != NULL) | |
13fe0198 | 847 | dsl_sync_task_sync(dst, tx); |
29809a6c MA |
848 | } |
849 | ||
34dc7c2f | 850 | dmu_tx_commit(tx); |
b128c09f | 851 | |
e8b96c60 | 852 | DTRACE_PROBE2(dsl_pool_sync__done, dsl_pool_t *dp, dp, uint64_t, txg); |
34dc7c2f BB |
853 | } |
854 | ||
855 | void | |
428870ff | 856 | dsl_pool_sync_done(dsl_pool_t *dp, uint64_t txg) |
34dc7c2f | 857 | { |
29809a6c | 858 | zilog_t *zilog; |
34dc7c2f | 859 | |
55922e73 | 860 | while ((zilog = txg_list_head(&dp->dp_dirty_zilogs, txg))) { |
e8b96c60 | 861 | dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os); |
55922e73 GW |
862 | /* |
863 | * We don't remove the zilog from the dp_dirty_zilogs | |
864 | * list until after we've cleaned it. This ensures that | |
865 | * callers of zilog_is_dirty() receive an accurate | |
866 | * answer when they are racing with the spa sync thread. | |
867 | */ | |
29809a6c | 868 | zil_clean(zilog, txg); |
55922e73 | 869 | (void) txg_list_remove_this(&dp->dp_dirty_zilogs, zilog, txg); |
29809a6c MA |
870 | ASSERT(!dmu_objset_is_dirty(zilog->zl_os, txg)); |
871 | dmu_buf_rele(ds->ds_dbuf, zilog); | |
34dc7c2f | 872 | } |
a7bd20e3 KJ |
873 | |
874 | dsl_pool_wrlog_clear(dp, txg); | |
875 | ||
428870ff | 876 | ASSERT(!dmu_objset_is_dirty(dp->dp_meta_objset, txg)); |
34dc7c2f BB |
877 | } |
878 | ||
879 | /* | |
880 | * TRUE if the current thread is the tx_sync_thread or if we | |
881 | * are being called from SPA context during pool initialization. | |
882 | */ | |
883 | int | |
884 | dsl_pool_sync_context(dsl_pool_t *dp) | |
885 | { | |
886 | return (curthread == dp->dp_tx.tx_sync_thread || | |
64fc7762 MA |
887 | spa_is_initializing(dp->dp_spa) || |
888 | taskq_member(dp->dp_sync_taskq, curthread)); | |
34dc7c2f BB |
889 | } |
890 | ||
d2734cce SD |
891 | /* |
892 | * This function returns the amount of allocatable space in the pool | |
893 | * minus whatever space is currently reserved by ZFS for specific | |
894 | * purposes. Specifically: | |
895 | * | |
896 | * 1] Any reserved SLOP space | |
897 | * 2] Any space used by the checkpoint | |
898 | * 3] Any space used for deferred frees | |
899 | * | |
900 | * The latter 2 are especially important because they are needed to | |
901 | * rectify the SPA's and DMU's different understanding of how much space | |
902 | * is used. Now the DMU is aware of that extra space tracked by the SPA | |
903 | * without having to maintain a separate special dir (e.g similar to | |
904 | * $MOS, $FREEING, and $LEAKED). | |
905 | * | |
906 | * Note: By deferred frees here, we mean the frees that were deferred | |
907 | * in spa_sync() after sync pass 1 (spa_deferred_bpobj), and not the | |
908 | * segments placed in ms_defer trees during metaslab_sync_done(). | |
909 | */ | |
34dc7c2f | 910 | uint64_t |
d2734cce | 911 | dsl_pool_adjustedsize(dsl_pool_t *dp, zfs_space_check_t slop_policy) |
34dc7c2f | 912 | { |
d2734cce SD |
913 | spa_t *spa = dp->dp_spa; |
914 | uint64_t space, resv, adjustedsize; | |
915 | uint64_t spa_deferred_frees = | |
916 | spa->spa_deferred_bpobj.bpo_phys->bpo_bytes; | |
917 | ||
918 | space = spa_get_dspace(spa) | |
919 | - spa_get_checkpoint_space(spa) - spa_deferred_frees; | |
920 | resv = spa_get_slop_space(spa); | |
921 | ||
922 | switch (slop_policy) { | |
923 | case ZFS_SPACE_CHECK_NORMAL: | |
924 | break; | |
925 | case ZFS_SPACE_CHECK_RESERVED: | |
34dc7c2f | 926 | resv >>= 1; |
d2734cce SD |
927 | break; |
928 | case ZFS_SPACE_CHECK_EXTRA_RESERVED: | |
929 | resv >>= 2; | |
930 | break; | |
931 | case ZFS_SPACE_CHECK_NONE: | |
932 | resv = 0; | |
933 | break; | |
934 | default: | |
935 | panic("invalid slop policy value: %d", slop_policy); | |
936 | break; | |
937 | } | |
938 | adjustedsize = (space >= resv) ? (space - resv) : 0; | |
34dc7c2f | 939 | |
d2734cce SD |
940 | return (adjustedsize); |
941 | } | |
942 | ||
943 | uint64_t | |
944 | dsl_pool_unreserved_space(dsl_pool_t *dp, zfs_space_check_t slop_policy) | |
945 | { | |
946 | uint64_t poolsize = dsl_pool_adjustedsize(dp, slop_policy); | |
947 | uint64_t deferred = | |
948 | metaslab_class_get_deferred(spa_normal_class(dp->dp_spa)); | |
949 | uint64_t quota = (poolsize >= deferred) ? (poolsize - deferred) : 0; | |
950 | return (quota); | |
34dc7c2f BB |
951 | } |
952 | ||
e8b96c60 MA |
953 | boolean_t |
954 | dsl_pool_need_dirty_delay(dsl_pool_t *dp) | |
34dc7c2f | 955 | { |
e8b96c60 MA |
956 | uint64_t delay_min_bytes = |
957 | zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100; | |
34dc7c2f | 958 | |
e8b96c60 | 959 | mutex_enter(&dp->dp_lock); |
50e09edd | 960 | uint64_t dirty = dp->dp_dirty_total; |
e8b96c60 | 961 | mutex_exit(&dp->dp_lock); |
50e09edd | 962 | |
48be0dfb | 963 | return (dirty > delay_min_bytes); |
34dc7c2f BB |
964 | } |
965 | ||
50e09edd KJ |
966 | static boolean_t |
967 | dsl_pool_need_dirty_sync(dsl_pool_t *dp, uint64_t txg) | |
968 | { | |
969 | ASSERT(MUTEX_HELD(&dp->dp_lock)); | |
970 | ||
971 | uint64_t dirty_min_bytes = | |
972 | zfs_dirty_data_max * zfs_dirty_data_sync_percent / 100; | |
973 | uint64_t dirty = dp->dp_dirty_pertxg[txg & TXG_MASK]; | |
974 | ||
975 | return (dirty > dirty_min_bytes); | |
976 | } | |
977 | ||
34dc7c2f | 978 | void |
e8b96c60 | 979 | dsl_pool_dirty_space(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx) |
34dc7c2f | 980 | { |
e8b96c60 MA |
981 | if (space > 0) { |
982 | mutex_enter(&dp->dp_lock); | |
983 | dp->dp_dirty_pertxg[tx->tx_txg & TXG_MASK] += space; | |
984 | dsl_pool_dirty_delta(dp, space); | |
50e09edd KJ |
985 | boolean_t needsync = !dmu_tx_is_syncing(tx) && |
986 | dsl_pool_need_dirty_sync(dp, tx->tx_txg); | |
e8b96c60 | 987 | mutex_exit(&dp->dp_lock); |
50e09edd KJ |
988 | |
989 | if (needsync) | |
990 | txg_kick(dp, tx->tx_txg); | |
e8b96c60 | 991 | } |
34dc7c2f BB |
992 | } |
993 | ||
994 | void | |
e8b96c60 | 995 | dsl_pool_undirty_space(dsl_pool_t *dp, int64_t space, uint64_t txg) |
34dc7c2f | 996 | { |
e8b96c60 MA |
997 | ASSERT3S(space, >=, 0); |
998 | if (space == 0) | |
34dc7c2f BB |
999 | return; |
1000 | ||
e8b96c60 MA |
1001 | mutex_enter(&dp->dp_lock); |
1002 | if (dp->dp_dirty_pertxg[txg & TXG_MASK] < space) { | |
1003 | /* XXX writing something we didn't dirty? */ | |
1004 | space = dp->dp_dirty_pertxg[txg & TXG_MASK]; | |
34dc7c2f | 1005 | } |
e8b96c60 MA |
1006 | ASSERT3U(dp->dp_dirty_pertxg[txg & TXG_MASK], >=, space); |
1007 | dp->dp_dirty_pertxg[txg & TXG_MASK] -= space; | |
1008 | ASSERT3U(dp->dp_dirty_total, >=, space); | |
1009 | dsl_pool_dirty_delta(dp, -space); | |
1010 | mutex_exit(&dp->dp_lock); | |
34dc7c2f | 1011 | } |
b128c09f BB |
1012 | |
1013 | /* ARGSUSED */ | |
1014 | static int | |
13fe0198 | 1015 | upgrade_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) |
b128c09f BB |
1016 | { |
1017 | dmu_tx_t *tx = arg; | |
1018 | dsl_dataset_t *ds, *prev = NULL; | |
1019 | int err; | |
b128c09f | 1020 | |
13fe0198 | 1021 | err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds); |
b128c09f BB |
1022 | if (err) |
1023 | return (err); | |
1024 | ||
d683ddbb JG |
1025 | while (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) { |
1026 | err = dsl_dataset_hold_obj(dp, | |
1027 | dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev); | |
b128c09f BB |
1028 | if (err) { |
1029 | dsl_dataset_rele(ds, FTAG); | |
1030 | return (err); | |
1031 | } | |
1032 | ||
d683ddbb | 1033 | if (dsl_dataset_phys(prev)->ds_next_snap_obj != ds->ds_object) |
b128c09f BB |
1034 | break; |
1035 | dsl_dataset_rele(ds, FTAG); | |
1036 | ds = prev; | |
1037 | prev = NULL; | |
1038 | } | |
1039 | ||
1040 | if (prev == NULL) { | |
1041 | prev = dp->dp_origin_snap; | |
1042 | ||
1043 | /* | |
1044 | * The $ORIGIN can't have any data, or the accounting | |
1045 | * will be wrong. | |
1046 | */ | |
cc9bb3e5 | 1047 | rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); |
d683ddbb | 1048 | ASSERT0(dsl_dataset_phys(prev)->ds_bp.blk_birth); |
cc9bb3e5 | 1049 | rrw_exit(&ds->ds_bp_rwlock, FTAG); |
b128c09f BB |
1050 | |
1051 | /* The origin doesn't get attached to itself */ | |
1052 | if (ds->ds_object == prev->ds_object) { | |
1053 | dsl_dataset_rele(ds, FTAG); | |
1054 | return (0); | |
1055 | } | |
1056 | ||
1057 | dmu_buf_will_dirty(ds->ds_dbuf, tx); | |
d683ddbb JG |
1058 | dsl_dataset_phys(ds)->ds_prev_snap_obj = prev->ds_object; |
1059 | dsl_dataset_phys(ds)->ds_prev_snap_txg = | |
1060 | dsl_dataset_phys(prev)->ds_creation_txg; | |
b128c09f BB |
1061 | |
1062 | dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx); | |
d683ddbb | 1063 | dsl_dir_phys(ds->ds_dir)->dd_origin_obj = prev->ds_object; |
b128c09f BB |
1064 | |
1065 | dmu_buf_will_dirty(prev->ds_dbuf, tx); | |
d683ddbb | 1066 | dsl_dataset_phys(prev)->ds_num_children++; |
b128c09f | 1067 | |
d683ddbb | 1068 | if (dsl_dataset_phys(ds)->ds_next_snap_obj == 0) { |
b128c09f | 1069 | ASSERT(ds->ds_prev == NULL); |
13fe0198 | 1070 | VERIFY0(dsl_dataset_hold_obj(dp, |
d683ddbb JG |
1071 | dsl_dataset_phys(ds)->ds_prev_snap_obj, |
1072 | ds, &ds->ds_prev)); | |
b128c09f BB |
1073 | } |
1074 | } | |
1075 | ||
d683ddbb JG |
1076 | ASSERT3U(dsl_dir_phys(ds->ds_dir)->dd_origin_obj, ==, prev->ds_object); |
1077 | ASSERT3U(dsl_dataset_phys(ds)->ds_prev_snap_obj, ==, prev->ds_object); | |
b128c09f | 1078 | |
d683ddbb | 1079 | if (dsl_dataset_phys(prev)->ds_next_clones_obj == 0) { |
428870ff | 1080 | dmu_buf_will_dirty(prev->ds_dbuf, tx); |
d683ddbb | 1081 | dsl_dataset_phys(prev)->ds_next_clones_obj = |
b128c09f BB |
1082 | zap_create(dp->dp_meta_objset, |
1083 | DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx); | |
1084 | } | |
13fe0198 | 1085 | VERIFY0(zap_add_int(dp->dp_meta_objset, |
d683ddbb | 1086 | dsl_dataset_phys(prev)->ds_next_clones_obj, ds->ds_object, tx)); |
b128c09f BB |
1087 | |
1088 | dsl_dataset_rele(ds, FTAG); | |
1089 | if (prev != dp->dp_origin_snap) | |
1090 | dsl_dataset_rele(prev, FTAG); | |
1091 | return (0); | |
1092 | } | |
1093 | ||
1094 | void | |
1095 | dsl_pool_upgrade_clones(dsl_pool_t *dp, dmu_tx_t *tx) | |
1096 | { | |
1097 | ASSERT(dmu_tx_is_syncing(tx)); | |
1098 | ASSERT(dp->dp_origin_snap != NULL); | |
1099 | ||
13fe0198 | 1100 | VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, upgrade_clones_cb, |
9c43027b | 1101 | tx, DS_FIND_CHILDREN | DS_FIND_SERIALIZE)); |
428870ff BB |
1102 | } |
1103 | ||
1104 | /* ARGSUSED */ | |
1105 | static int | |
13fe0198 | 1106 | upgrade_dir_clones_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg) |
428870ff BB |
1107 | { |
1108 | dmu_tx_t *tx = arg; | |
428870ff BB |
1109 | objset_t *mos = dp->dp_meta_objset; |
1110 | ||
d683ddbb | 1111 | if (dsl_dir_phys(ds->ds_dir)->dd_origin_obj != 0) { |
428870ff BB |
1112 | dsl_dataset_t *origin; |
1113 | ||
13fe0198 | 1114 | VERIFY0(dsl_dataset_hold_obj(dp, |
d683ddbb | 1115 | dsl_dir_phys(ds->ds_dir)->dd_origin_obj, FTAG, &origin)); |
428870ff | 1116 | |
d683ddbb | 1117 | if (dsl_dir_phys(origin->ds_dir)->dd_clones == 0) { |
428870ff | 1118 | dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx); |
d683ddbb JG |
1119 | dsl_dir_phys(origin->ds_dir)->dd_clones = |
1120 | zap_create(mos, DMU_OT_DSL_CLONES, DMU_OT_NONE, | |
1121 | 0, tx); | |
428870ff BB |
1122 | } |
1123 | ||
13fe0198 | 1124 | VERIFY0(zap_add_int(dp->dp_meta_objset, |
d683ddbb JG |
1125 | dsl_dir_phys(origin->ds_dir)->dd_clones, |
1126 | ds->ds_object, tx)); | |
428870ff BB |
1127 | |
1128 | dsl_dataset_rele(origin, FTAG); | |
1129 | } | |
428870ff BB |
1130 | return (0); |
1131 | } | |
1132 | ||
1133 | void | |
1134 | dsl_pool_upgrade_dir_clones(dsl_pool_t *dp, dmu_tx_t *tx) | |
1135 | { | |
428870ff BB |
1136 | uint64_t obj; |
1137 | ||
d6320ddb BB |
1138 | ASSERT(dmu_tx_is_syncing(tx)); |
1139 | ||
428870ff | 1140 | (void) dsl_dir_create_sync(dp, dp->dp_root_dir, FREE_DIR_NAME, tx); |
13fe0198 | 1141 | VERIFY0(dsl_pool_open_special_dir(dp, |
428870ff BB |
1142 | FREE_DIR_NAME, &dp->dp_free_dir)); |
1143 | ||
1144 | /* | |
1145 | * We can't use bpobj_alloc(), because spa_version() still | |
1146 | * returns the old version, and we need a new-version bpobj with | |
1147 | * subobj support. So call dmu_object_alloc() directly. | |
1148 | */ | |
1149 | obj = dmu_object_alloc(dp->dp_meta_objset, DMU_OT_BPOBJ, | |
f1512ee6 | 1150 | SPA_OLD_MAXBLOCKSIZE, DMU_OT_BPOBJ_HDR, sizeof (bpobj_phys_t), tx); |
13fe0198 | 1151 | VERIFY0(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, |
428870ff | 1152 | DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx)); |
13fe0198 | 1153 | VERIFY0(bpobj_open(&dp->dp_free_bpobj, dp->dp_meta_objset, obj)); |
428870ff | 1154 | |
13fe0198 | 1155 | VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, |
9c43027b | 1156 | upgrade_dir_clones_cb, tx, DS_FIND_CHILDREN | DS_FIND_SERIALIZE)); |
b128c09f BB |
1157 | } |
1158 | ||
1159 | void | |
1160 | dsl_pool_create_origin(dsl_pool_t *dp, dmu_tx_t *tx) | |
1161 | { | |
1162 | uint64_t dsobj; | |
1163 | dsl_dataset_t *ds; | |
1164 | ||
1165 | ASSERT(dmu_tx_is_syncing(tx)); | |
1166 | ASSERT(dp->dp_origin_snap == NULL); | |
13fe0198 | 1167 | ASSERT(rrw_held(&dp->dp_config_rwlock, RW_WRITER)); |
b128c09f BB |
1168 | |
1169 | /* create the origin dir, ds, & snap-ds */ | |
b128c09f | 1170 | dsobj = dsl_dataset_create_sync(dp->dp_root_dir, ORIGIN_DIR_NAME, |
b5256303 | 1171 | NULL, 0, kcred, NULL, tx); |
13fe0198 MA |
1172 | VERIFY0(dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); |
1173 | dsl_dataset_snapshot_sync_impl(ds, ORIGIN_DIR_NAME, tx); | |
d683ddbb | 1174 | VERIFY0(dsl_dataset_hold_obj(dp, dsl_dataset_phys(ds)->ds_prev_snap_obj, |
b128c09f BB |
1175 | dp, &dp->dp_origin_snap)); |
1176 | dsl_dataset_rele(ds, FTAG); | |
b128c09f | 1177 | } |
9babb374 BB |
1178 | |
1179 | taskq_t * | |
657ce253 | 1180 | dsl_pool_zrele_taskq(dsl_pool_t *dp) |
9babb374 | 1181 | { |
657ce253 | 1182 | return (dp->dp_zrele_taskq); |
9babb374 | 1183 | } |
428870ff | 1184 | |
dcec0a12 AP |
1185 | taskq_t * |
1186 | dsl_pool_unlinked_drain_taskq(dsl_pool_t *dp) | |
1187 | { | |
1188 | return (dp->dp_unlinked_drain_taskq); | |
1189 | } | |
1190 | ||
428870ff BB |
1191 | /* |
1192 | * Walk through the pool-wide zap object of temporary snapshot user holds | |
1193 | * and release them. | |
1194 | */ | |
1195 | void | |
1196 | dsl_pool_clean_tmp_userrefs(dsl_pool_t *dp) | |
1197 | { | |
1198 | zap_attribute_t za; | |
1199 | zap_cursor_t zc; | |
1200 | objset_t *mos = dp->dp_meta_objset; | |
1201 | uint64_t zapobj = dp->dp_tmp_userrefs_obj; | |
95fd54a1 | 1202 | nvlist_t *holds; |
428870ff BB |
1203 | |
1204 | if (zapobj == 0) | |
1205 | return; | |
1206 | ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS); | |
1207 | ||
95fd54a1 SH |
1208 | holds = fnvlist_alloc(); |
1209 | ||
428870ff BB |
1210 | for (zap_cursor_init(&zc, mos, zapobj); |
1211 | zap_cursor_retrieve(&zc, &za) == 0; | |
1212 | zap_cursor_advance(&zc)) { | |
1213 | char *htag; | |
95fd54a1 | 1214 | nvlist_t *tags; |
428870ff BB |
1215 | |
1216 | htag = strchr(za.za_name, '-'); | |
1217 | *htag = '\0'; | |
1218 | ++htag; | |
95fd54a1 SH |
1219 | if (nvlist_lookup_nvlist(holds, za.za_name, &tags) != 0) { |
1220 | tags = fnvlist_alloc(); | |
1221 | fnvlist_add_boolean(tags, htag); | |
1222 | fnvlist_add_nvlist(holds, za.za_name, tags); | |
1223 | fnvlist_free(tags); | |
1224 | } else { | |
1225 | fnvlist_add_boolean(tags, htag); | |
1226 | } | |
428870ff | 1227 | } |
95fd54a1 SH |
1228 | dsl_dataset_user_release_tmp(dp, holds); |
1229 | fnvlist_free(holds); | |
428870ff BB |
1230 | zap_cursor_fini(&zc); |
1231 | } | |
1232 | ||
1233 | /* | |
1234 | * Create the pool-wide zap object for storing temporary snapshot holds. | |
1235 | */ | |
65c7cc49 | 1236 | static void |
428870ff BB |
1237 | dsl_pool_user_hold_create_obj(dsl_pool_t *dp, dmu_tx_t *tx) |
1238 | { | |
1239 | objset_t *mos = dp->dp_meta_objset; | |
1240 | ||
1241 | ASSERT(dp->dp_tmp_userrefs_obj == 0); | |
1242 | ASSERT(dmu_tx_is_syncing(tx)); | |
1243 | ||
9ae529ec CS |
1244 | dp->dp_tmp_userrefs_obj = zap_create_link(mos, DMU_OT_USERREFS, |
1245 | DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_TMP_USERREFS, tx); | |
428870ff BB |
1246 | } |
1247 | ||
1248 | static int | |
1249 | dsl_pool_user_hold_rele_impl(dsl_pool_t *dp, uint64_t dsobj, | |
13fe0198 | 1250 | const char *tag, uint64_t now, dmu_tx_t *tx, boolean_t holding) |
428870ff BB |
1251 | { |
1252 | objset_t *mos = dp->dp_meta_objset; | |
1253 | uint64_t zapobj = dp->dp_tmp_userrefs_obj; | |
1254 | char *name; | |
1255 | int error; | |
1256 | ||
1257 | ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS); | |
1258 | ASSERT(dmu_tx_is_syncing(tx)); | |
1259 | ||
1260 | /* | |
1261 | * If the pool was created prior to SPA_VERSION_USERREFS, the | |
1262 | * zap object for temporary holds might not exist yet. | |
1263 | */ | |
1264 | if (zapobj == 0) { | |
1265 | if (holding) { | |
1266 | dsl_pool_user_hold_create_obj(dp, tx); | |
1267 | zapobj = dp->dp_tmp_userrefs_obj; | |
1268 | } else { | |
2e528b49 | 1269 | return (SET_ERROR(ENOENT)); |
428870ff BB |
1270 | } |
1271 | } | |
1272 | ||
1273 | name = kmem_asprintf("%llx-%s", (u_longlong_t)dsobj, tag); | |
1274 | if (holding) | |
13fe0198 | 1275 | error = zap_add(mos, zapobj, name, 8, 1, &now, tx); |
428870ff BB |
1276 | else |
1277 | error = zap_remove(mos, zapobj, name, tx); | |
e4f5fa12 | 1278 | kmem_strfree(name); |
428870ff BB |
1279 | |
1280 | return (error); | |
1281 | } | |
1282 | ||
1283 | /* | |
1284 | * Add a temporary hold for the given dataset object and tag. | |
1285 | */ | |
1286 | int | |
1287 | dsl_pool_user_hold(dsl_pool_t *dp, uint64_t dsobj, const char *tag, | |
13fe0198 | 1288 | uint64_t now, dmu_tx_t *tx) |
428870ff BB |
1289 | { |
1290 | return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, now, tx, B_TRUE)); | |
1291 | } | |
1292 | ||
1293 | /* | |
1294 | * Release a temporary hold for the given dataset object and tag. | |
1295 | */ | |
1296 | int | |
1297 | dsl_pool_user_release(dsl_pool_t *dp, uint64_t dsobj, const char *tag, | |
1298 | dmu_tx_t *tx) | |
1299 | { | |
13fe0198 | 1300 | return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, 0, |
428870ff BB |
1301 | tx, B_FALSE)); |
1302 | } | |
c409e464 | 1303 | |
13fe0198 MA |
1304 | /* |
1305 | * DSL Pool Configuration Lock | |
1306 | * | |
1307 | * The dp_config_rwlock protects against changes to DSL state (e.g. dataset | |
1308 | * creation / destruction / rename / property setting). It must be held for | |
1309 | * read to hold a dataset or dsl_dir. I.e. you must call | |
1310 | * dsl_pool_config_enter() or dsl_pool_hold() before calling | |
1311 | * dsl_{dataset,dir}_hold{_obj}. In most circumstances, the dp_config_rwlock | |
1312 | * must be held continuously until all datasets and dsl_dirs are released. | |
1313 | * | |
1314 | * The only exception to this rule is that if a "long hold" is placed on | |
1315 | * a dataset, then the dp_config_rwlock may be dropped while the dataset | |
1316 | * is still held. The long hold will prevent the dataset from being | |
1317 | * destroyed -- the destroy will fail with EBUSY. A long hold can be | |
1318 | * obtained by calling dsl_dataset_long_hold(), or by "owning" a dataset | |
1319 | * (by calling dsl_{dataset,objset}_{try}own{_obj}). | |
1320 | * | |
1321 | * Legitimate long-holders (including owners) should be long-running, cancelable | |
1322 | * tasks that should cause "zfs destroy" to fail. This includes DMU | |
1323 | * consumers (i.e. a ZPL filesystem being mounted or ZVOL being open), | |
1324 | * "zfs send", and "zfs diff". There are several other long-holders whose | |
1325 | * uses are suboptimal (e.g. "zfs promote", and zil_suspend()). | |
1326 | * | |
1327 | * The usual formula for long-holding would be: | |
1328 | * dsl_pool_hold() | |
1329 | * dsl_dataset_hold() | |
1330 | * ... perform checks ... | |
1331 | * dsl_dataset_long_hold() | |
1332 | * dsl_pool_rele() | |
1333 | * ... perform long-running task ... | |
1334 | * dsl_dataset_long_rele() | |
1335 | * dsl_dataset_rele() | |
1336 | * | |
1337 | * Note that when the long hold is released, the dataset is still held but | |
1338 | * the pool is not held. The dataset may change arbitrarily during this time | |
1339 | * (e.g. it could be destroyed). Therefore you shouldn't do anything to the | |
1340 | * dataset except release it. | |
1341 | * | |
49c482fd CS |
1342 | * Operations generally fall somewhere into the following taxonomy: |
1343 | * | |
1344 | * Read-Only Modifying | |
1345 | * | |
1346 | * Dataset Layer / MOS zfs get zfs destroy | |
1347 | * | |
1348 | * Individual Dataset read() write() | |
1349 | * | |
1350 | * | |
1351 | * Dataset Layer Operations | |
13fe0198 MA |
1352 | * |
1353 | * Modifying operations should generally use dsl_sync_task(). The synctask | |
1354 | * infrastructure enforces proper locking strategy with respect to the | |
1355 | * dp_config_rwlock. See the comment above dsl_sync_task() for details. | |
1356 | * | |
1357 | * Read-only operations will manually hold the pool, then the dataset, obtain | |
1358 | * information from the dataset, then release the pool and dataset. | |
1359 | * dmu_objset_{hold,rele}() are convenience routines that also do the pool | |
1360 | * hold/rele. | |
49c482fd CS |
1361 | * |
1362 | * | |
1363 | * Operations On Individual Datasets | |
1364 | * | |
1365 | * Objects _within_ an objset should only be modified by the current 'owner' | |
1366 | * of the objset to prevent incorrect concurrent modification. Thus, use | |
1367 | * {dmu_objset,dsl_dataset}_own to mark some entity as the current owner, | |
1368 | * and fail with EBUSY if there is already an owner. The owner can then | |
1369 | * implement its own locking strategy, independent of the dataset layer's | |
1370 | * locking infrastructure. | |
1371 | * (E.g., the ZPL has its own set of locks to control concurrency. A regular | |
1372 | * vnop will not reach into the dataset layer). | |
1373 | * | |
1374 | * Ideally, objects would also only be read by the objset’s owner, so that we | |
1375 | * don’t observe state mid-modification. | |
1376 | * (E.g. the ZPL is creating a new object and linking it into a directory; if | |
1377 | * you don’t coordinate with the ZPL to hold ZPL-level locks, you could see an | |
1378 | * intermediate state. The ioctl level violates this but in pretty benign | |
1379 | * ways, e.g. reading the zpl props object.) | |
13fe0198 MA |
1380 | */ |
1381 | ||
1382 | int | |
1383 | dsl_pool_hold(const char *name, void *tag, dsl_pool_t **dp) | |
1384 | { | |
1385 | spa_t *spa; | |
1386 | int error; | |
1387 | ||
1388 | error = spa_open(name, &spa, tag); | |
1389 | if (error == 0) { | |
1390 | *dp = spa_get_dsl(spa); | |
1391 | dsl_pool_config_enter(*dp, tag); | |
1392 | } | |
1393 | return (error); | |
1394 | } | |
1395 | ||
1396 | void | |
1397 | dsl_pool_rele(dsl_pool_t *dp, void *tag) | |
1398 | { | |
1399 | dsl_pool_config_exit(dp, tag); | |
1400 | spa_close(dp->dp_spa, tag); | |
1401 | } | |
1402 | ||
1403 | void | |
1404 | dsl_pool_config_enter(dsl_pool_t *dp, void *tag) | |
1405 | { | |
1406 | /* | |
1407 | * We use a "reentrant" reader-writer lock, but not reentrantly. | |
1408 | * | |
1409 | * The rrwlock can (with the track_all flag) track all reading threads, | |
1410 | * which is very useful for debugging which code path failed to release | |
1411 | * the lock, and for verifying that the *current* thread does hold | |
1412 | * the lock. | |
1413 | * | |
1414 | * (Unlike a rwlock, which knows that N threads hold it for | |
1415 | * read, but not *which* threads, so rw_held(RW_READER) returns TRUE | |
1416 | * if any thread holds it for read, even if this thread doesn't). | |
1417 | */ | |
1418 | ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER)); | |
1419 | rrw_enter(&dp->dp_config_rwlock, RW_READER, tag); | |
1420 | } | |
1421 | ||
5e8cd5d1 AJ |
1422 | void |
1423 | dsl_pool_config_enter_prio(dsl_pool_t *dp, void *tag) | |
1424 | { | |
1425 | ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER)); | |
1426 | rrw_enter_read_prio(&dp->dp_config_rwlock, tag); | |
1427 | } | |
1428 | ||
13fe0198 MA |
1429 | void |
1430 | dsl_pool_config_exit(dsl_pool_t *dp, void *tag) | |
1431 | { | |
1432 | rrw_exit(&dp->dp_config_rwlock, tag); | |
1433 | } | |
1434 | ||
1435 | boolean_t | |
1436 | dsl_pool_config_held(dsl_pool_t *dp) | |
1437 | { | |
1438 | return (RRW_LOCK_HELD(&dp->dp_config_rwlock)); | |
1439 | } | |
1440 | ||
9c43027b AJ |
1441 | boolean_t |
1442 | dsl_pool_config_held_writer(dsl_pool_t *dp) | |
1443 | { | |
1444 | return (RRW_WRITE_HELD(&dp->dp_config_rwlock)); | |
1445 | } | |
1446 | ||
40a806df NB |
1447 | EXPORT_SYMBOL(dsl_pool_config_enter); |
1448 | EXPORT_SYMBOL(dsl_pool_config_exit); | |
1449 | ||
d1d7e268 | 1450 | /* zfs_dirty_data_max_percent only applied at module load in arc_init(). */ |
03fdcb9a MM |
1451 | ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_max_percent, INT, ZMOD_RD, |
1452 | "Max percent of RAM allowed to be dirty"); | |
c409e464 | 1453 | |
d1d7e268 | 1454 | /* zfs_dirty_data_max_max_percent only applied at module load in arc_init(). */ |
03fdcb9a | 1455 | ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_max_max_percent, INT, ZMOD_RD, |
d1d7e268 | 1456 | "zfs_dirty_data_max upper bound as % of RAM"); |
c409e464 | 1457 | |
03fdcb9a MM |
1458 | ZFS_MODULE_PARAM(zfs, zfs_, delay_min_dirty_percent, INT, ZMOD_RW, |
1459 | "Transaction delay threshold"); | |
c409e464 | 1460 | |
03fdcb9a MM |
1461 | ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_max, ULONG, ZMOD_RW, |
1462 | "Determines the dirty space limit"); | |
c409e464 | 1463 | |
a7bd20e3 KJ |
1464 | ZFS_MODULE_PARAM(zfs, zfs_, wrlog_data_max, ULONG, ZMOD_RW, |
1465 | "The size limit of write-transaction zil log data"); | |
1466 | ||
d1d7e268 | 1467 | /* zfs_dirty_data_max_max only applied at module load in arc_init(). */ |
03fdcb9a | 1468 | ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_max_max, ULONG, ZMOD_RD, |
d1d7e268 | 1469 | "zfs_dirty_data_max upper bound in bytes"); |
c409e464 | 1470 | |
03fdcb9a MM |
1471 | ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_sync_percent, INT, ZMOD_RW, |
1472 | "Dirty data txg sync threshold as a percentage of zfs_dirty_data_max"); | |
c409e464 | 1473 | |
03fdcb9a MM |
1474 | ZFS_MODULE_PARAM(zfs, zfs_, delay_scale, ULONG, ZMOD_RW, |
1475 | "How quickly delay approaches infinity"); | |
64fc7762 | 1476 | |
03fdcb9a MM |
1477 | ZFS_MODULE_PARAM(zfs, zfs_, sync_taskq_batch_pct, INT, ZMOD_RW, |
1478 | "Max percent of CPUs that are used to sync dirty data"); | |
a032ac4b | 1479 | |
03fdcb9a MM |
1480 | ZFS_MODULE_PARAM(zfs_zil, zfs_zil_, clean_taskq_nthr_pct, INT, ZMOD_RW, |
1481 | "Max percent of CPUs that are used per dp_sync_taskq"); | |
a032ac4b | 1482 | |
03fdcb9a MM |
1483 | ZFS_MODULE_PARAM(zfs_zil, zfs_zil_, clean_taskq_minalloc, INT, ZMOD_RW, |
1484 | "Number of taskq entries that are pre-populated"); | |
a032ac4b | 1485 | |
03fdcb9a MM |
1486 | ZFS_MODULE_PARAM(zfs_zil, zfs_zil_, clean_taskq_maxalloc, INT, ZMOD_RW, |
1487 | "Max number of taskq entries that are cached"); |