]>
Commit | Line | Data |
---|---|---|
34dc7c2f BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
428870ff | 22 | * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. |
64fc7762 | 23 | * Copyright (c) 2011, 2017 by Delphix. All rights reserved. |
95fd54a1 | 24 | * Copyright (c) 2013 Steven Hartland. All rights reserved. |
0c66c32d | 25 | * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. |
539d33c7 | 26 | * Copyright 2016 Nexenta Systems, Inc. All rights reserved. |
34dc7c2f BB |
27 | */ |
28 | ||
34dc7c2f BB |
29 | #include <sys/dsl_pool.h> |
30 | #include <sys/dsl_dataset.h> | |
428870ff | 31 | #include <sys/dsl_prop.h> |
34dc7c2f BB |
32 | #include <sys/dsl_dir.h> |
33 | #include <sys/dsl_synctask.h> | |
428870ff BB |
34 | #include <sys/dsl_scan.h> |
35 | #include <sys/dnode.h> | |
34dc7c2f BB |
36 | #include <sys/dmu_tx.h> |
37 | #include <sys/dmu_objset.h> | |
38 | #include <sys/arc.h> | |
39 | #include <sys/zap.h> | |
40 | #include <sys/zio.h> | |
41 | #include <sys/zfs_context.h> | |
42 | #include <sys/fs/zfs.h> | |
b128c09f BB |
43 | #include <sys/zfs_znode.h> |
44 | #include <sys/spa_impl.h> | |
428870ff | 45 | #include <sys/dsl_deadlist.h> |
9ae529ec CS |
46 | #include <sys/bptree.h> |
47 | #include <sys/zfeature.h> | |
29809a6c | 48 | #include <sys/zil_impl.h> |
13fe0198 | 49 | #include <sys/dsl_userhold.h> |
49ee64e5 | 50 | #include <sys/trace_txg.h> |
379ca9cf | 51 | #include <sys/mmp.h> |
34dc7c2f | 52 | |
e8b96c60 MA |
53 | /* |
54 | * ZFS Write Throttle | |
55 | * ------------------ | |
56 | * | |
57 | * ZFS must limit the rate of incoming writes to the rate at which it is able | |
58 | * to sync data modifications to the backend storage. Throttling by too much | |
59 | * creates an artificial limit; throttling by too little can only be sustained | |
60 | * for short periods and would lead to highly lumpy performance. On a per-pool | |
61 | * basis, ZFS tracks the amount of modified (dirty) data. As operations change | |
62 | * data, the amount of dirty data increases; as ZFS syncs out data, the amount | |
63 | * of dirty data decreases. When the amount of dirty data exceeds a | |
64 | * predetermined threshold further modifications are blocked until the amount | |
65 | * of dirty data decreases (as data is synced out). | |
66 | * | |
67 | * The limit on dirty data is tunable, and should be adjusted according to | |
68 | * both the IO capacity and available memory of the system. The larger the | |
69 | * window, the more ZFS is able to aggregate and amortize metadata (and data) | |
70 | * changes. However, memory is a limited resource, and allowing for more dirty | |
71 | * data comes at the cost of keeping other useful data in memory (for example | |
72 | * ZFS data cached by the ARC). | |
73 | * | |
74 | * Implementation | |
75 | * | |
76 | * As buffers are modified dsl_pool_willuse_space() increments both the per- | |
77 | * txg (dp_dirty_pertxg[]) and poolwide (dp_dirty_total) accounting of | |
78 | * dirty space used; dsl_pool_dirty_space() decrements those values as data | |
79 | * is synced out from dsl_pool_sync(). While only the poolwide value is | |
80 | * relevant, the per-txg value is useful for debugging. The tunable | |
81 | * zfs_dirty_data_max determines the dirty space limit. Once that value is | |
82 | * exceeded, new writes are halted until space frees up. | |
83 | * | |
84 | * The zfs_dirty_data_sync tunable dictates the threshold at which we | |
85 | * ensure that there is a txg syncing (see the comment in txg.c for a full | |
86 | * description of transaction group stages). | |
87 | * | |
88 | * The IO scheduler uses both the dirty space limit and current amount of | |
89 | * dirty data as inputs. Those values affect the number of concurrent IOs ZFS | |
90 | * issues. See the comment in vdev_queue.c for details of the IO scheduler. | |
91 | * | |
92 | * The delay is also calculated based on the amount of dirty data. See the | |
93 | * comment above dmu_tx_delay() for details. | |
94 | */ | |
95 | ||
96 | /* | |
97 | * zfs_dirty_data_max will be set to zfs_dirty_data_max_percent% of all memory, | |
98 | * capped at zfs_dirty_data_max_max. It can also be overridden with a module | |
99 | * parameter. | |
100 | */ | |
101 | unsigned long zfs_dirty_data_max = 0; | |
102 | unsigned long zfs_dirty_data_max_max = 0; | |
103 | int zfs_dirty_data_max_percent = 10; | |
104 | int zfs_dirty_data_max_max_percent = 25; | |
b128c09f | 105 | |
e8b96c60 MA |
106 | /* |
107 | * If there is at least this much dirty data, push out a txg. | |
108 | */ | |
109 | unsigned long zfs_dirty_data_sync = 64 * 1024 * 1024; | |
34dc7c2f | 110 | |
e8b96c60 MA |
111 | /* |
112 | * Once there is this amount of dirty data, the dmu_tx_delay() will kick in | |
113 | * and delay each transaction. | |
114 | * This value should be >= zfs_vdev_async_write_active_max_dirty_percent. | |
115 | */ | |
116 | int zfs_delay_min_dirty_percent = 60; | |
b128c09f | 117 | |
e8b96c60 MA |
118 | /* |
119 | * This controls how quickly the delay approaches infinity. | |
120 | * Larger values cause it to delay more for a given amount of dirty data. | |
121 | * Therefore larger values will cause there to be less dirty data for a | |
122 | * given throughput. | |
123 | * | |
124 | * For the smoothest delay, this value should be about 1 billion divided | |
125 | * by the maximum number of operations per second. This will smoothly | |
126 | * handle between 10x and 1/10th this number. | |
127 | * | |
128 | * Note: zfs_delay_scale * zfs_dirty_data_max must be < 2^64, due to the | |
129 | * multiply in dmu_tx_delay(). | |
130 | */ | |
131 | unsigned long zfs_delay_scale = 1000 * 1000 * 1000 / 2000; | |
b128c09f | 132 | |
64fc7762 MA |
133 | /* |
134 | * This determines the number of threads used by the dp_sync_taskq. | |
135 | */ | |
136 | int zfs_sync_taskq_batch_pct = 75; | |
137 | ||
a032ac4b BB |
138 | /* |
139 | * These tunables determine the behavior of how zil_itxg_clean() is | |
140 | * called via zil_clean() in the context of spa_sync(). When an itxg | |
141 | * list needs to be cleaned, TQ_NOSLEEP will be used when dispatching. | |
142 | * If the dispatch fails, the call to zil_itxg_clean() will occur | |
143 | * synchronously in the context of spa_sync(), which can negatively | |
144 | * impact the performance of spa_sync() (e.g. in the case of the itxg | |
145 | * list having a large number of itxs that needs to be cleaned). | |
146 | * | |
147 | * Thus, these tunables can be used to manipulate the behavior of the | |
148 | * taskq used by zil_clean(); they determine the number of taskq entries | |
149 | * that are pre-populated when the taskq is first created (via the | |
150 | * "zfs_zil_clean_taskq_minalloc" tunable) and the maximum number of | |
151 | * taskq entries that are cached after an on-demand allocation (via the | |
152 | * "zfs_zil_clean_taskq_maxalloc"). | |
153 | * | |
154 | * The idea being, we want to try reasonably hard to ensure there will | |
155 | * already be a taskq entry pre-allocated by the time that it is needed | |
156 | * by zil_clean(). This way, we can avoid the possibility of an | |
157 | * on-demand allocation of a new taskq entry from failing, which would | |
158 | * result in zil_itxg_clean() being called synchronously from zil_clean() | |
159 | * (which can adversely affect performance of spa_sync()). | |
160 | * | |
161 | * Additionally, the number of threads used by the taskq can be | |
162 | * configured via the "zfs_zil_clean_taskq_nthr_pct" tunable. | |
163 | */ | |
164 | int zfs_zil_clean_taskq_nthr_pct = 100; | |
165 | int zfs_zil_clean_taskq_minalloc = 1024; | |
166 | int zfs_zil_clean_taskq_maxalloc = 1024 * 1024; | |
167 | ||
428870ff | 168 | int |
b128c09f | 169 | dsl_pool_open_special_dir(dsl_pool_t *dp, const char *name, dsl_dir_t **ddp) |
34dc7c2f BB |
170 | { |
171 | uint64_t obj; | |
172 | int err; | |
173 | ||
174 | err = zap_lookup(dp->dp_meta_objset, | |
d683ddbb | 175 | dsl_dir_phys(dp->dp_root_dir)->dd_child_dir_zapobj, |
b128c09f | 176 | name, sizeof (obj), 1, &obj); |
34dc7c2f BB |
177 | if (err) |
178 | return (err); | |
179 | ||
13fe0198 | 180 | return (dsl_dir_hold_obj(dp, obj, name, dp, ddp)); |
34dc7c2f BB |
181 | } |
182 | ||
183 | static dsl_pool_t * | |
184 | dsl_pool_open_impl(spa_t *spa, uint64_t txg) | |
185 | { | |
186 | dsl_pool_t *dp; | |
187 | blkptr_t *bp = spa_get_rootblkptr(spa); | |
34dc7c2f BB |
188 | |
189 | dp = kmem_zalloc(sizeof (dsl_pool_t), KM_SLEEP); | |
190 | dp->dp_spa = spa; | |
191 | dp->dp_meta_rootbp = *bp; | |
13fe0198 | 192 | rrw_init(&dp->dp_config_rwlock, B_TRUE); |
34dc7c2f | 193 | txg_init(dp, txg); |
379ca9cf | 194 | mmp_init(spa); |
34dc7c2f | 195 | |
4747a7d3 | 196 | txg_list_create(&dp->dp_dirty_datasets, spa, |
34dc7c2f | 197 | offsetof(dsl_dataset_t, ds_dirty_link)); |
4747a7d3 | 198 | txg_list_create(&dp->dp_dirty_zilogs, spa, |
29809a6c | 199 | offsetof(zilog_t, zl_dirty_link)); |
4747a7d3 | 200 | txg_list_create(&dp->dp_dirty_dirs, spa, |
34dc7c2f | 201 | offsetof(dsl_dir_t, dd_dirty_link)); |
4747a7d3 | 202 | txg_list_create(&dp->dp_sync_tasks, spa, |
13fe0198 | 203 | offsetof(dsl_sync_task_t, dst_node)); |
34dc7c2f | 204 | |
64fc7762 MA |
205 | dp->dp_sync_taskq = taskq_create("dp_sync_taskq", |
206 | zfs_sync_taskq_batch_pct, minclsyspri, 1, INT_MAX, | |
207 | TASKQ_THREADS_CPU_PCT); | |
208 | ||
a032ac4b BB |
209 | dp->dp_zil_clean_taskq = taskq_create("dp_zil_clean_taskq", |
210 | zfs_zil_clean_taskq_nthr_pct, minclsyspri, | |
211 | zfs_zil_clean_taskq_minalloc, | |
212 | zfs_zil_clean_taskq_maxalloc, | |
213 | TASKQ_PREPOPULATE | TASKQ_THREADS_CPU_PCT); | |
214 | ||
34dc7c2f | 215 | mutex_init(&dp->dp_lock, NULL, MUTEX_DEFAULT, NULL); |
e8b96c60 | 216 | cv_init(&dp->dp_spaceavail_cv, NULL, CV_DEFAULT, NULL); |
34dc7c2f | 217 | |
1229323d | 218 | dp->dp_iput_taskq = taskq_create("z_iput", max_ncpus, defclsyspri, |
aa9af22c | 219 | max_ncpus * 8, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC); |
9babb374 | 220 | |
34dc7c2f BB |
221 | return (dp); |
222 | } | |
223 | ||
224 | int | |
9ae529ec | 225 | dsl_pool_init(spa_t *spa, uint64_t txg, dsl_pool_t **dpp) |
34dc7c2f BB |
226 | { |
227 | int err; | |
228 | dsl_pool_t *dp = dsl_pool_open_impl(spa, txg); | |
9ae529ec | 229 | |
b7faa7aa G |
230 | /* |
231 | * Initialize the caller's dsl_pool_t structure before we actually open | |
232 | * the meta objset. This is done because a self-healing write zio may | |
233 | * be issued as part of dmu_objset_open_impl() and the spa needs its | |
234 | * dsl_pool_t initialized in order to handle the write. | |
235 | */ | |
236 | *dpp = dp; | |
237 | ||
9ae529ec CS |
238 | err = dmu_objset_open_impl(spa, NULL, &dp->dp_meta_rootbp, |
239 | &dp->dp_meta_objset); | |
b7faa7aa | 240 | if (err != 0) { |
9ae529ec | 241 | dsl_pool_close(dp); |
b7faa7aa G |
242 | *dpp = NULL; |
243 | } | |
9ae529ec CS |
244 | |
245 | return (err); | |
246 | } | |
247 | ||
248 | int | |
249 | dsl_pool_open(dsl_pool_t *dp) | |
250 | { | |
251 | int err; | |
b128c09f BB |
252 | dsl_dir_t *dd; |
253 | dsl_dataset_t *ds; | |
428870ff | 254 | uint64_t obj; |
34dc7c2f | 255 | |
13fe0198 | 256 | rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); |
34dc7c2f BB |
257 | err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, |
258 | DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1, | |
259 | &dp->dp_root_dir_obj); | |
260 | if (err) | |
261 | goto out; | |
262 | ||
13fe0198 | 263 | err = dsl_dir_hold_obj(dp, dp->dp_root_dir_obj, |
34dc7c2f BB |
264 | NULL, dp, &dp->dp_root_dir); |
265 | if (err) | |
266 | goto out; | |
267 | ||
b128c09f | 268 | err = dsl_pool_open_special_dir(dp, MOS_DIR_NAME, &dp->dp_mos_dir); |
34dc7c2f BB |
269 | if (err) |
270 | goto out; | |
271 | ||
9ae529ec | 272 | if (spa_version(dp->dp_spa) >= SPA_VERSION_ORIGIN) { |
b128c09f BB |
273 | err = dsl_pool_open_special_dir(dp, ORIGIN_DIR_NAME, &dd); |
274 | if (err) | |
275 | goto out; | |
d683ddbb JG |
276 | err = dsl_dataset_hold_obj(dp, |
277 | dsl_dir_phys(dd)->dd_head_dataset_obj, FTAG, &ds); | |
9babb374 BB |
278 | if (err == 0) { |
279 | err = dsl_dataset_hold_obj(dp, | |
d683ddbb | 280 | dsl_dataset_phys(ds)->ds_prev_snap_obj, dp, |
9babb374 BB |
281 | &dp->dp_origin_snap); |
282 | dsl_dataset_rele(ds, FTAG); | |
283 | } | |
13fe0198 | 284 | dsl_dir_rele(dd, dp); |
b128c09f BB |
285 | if (err) |
286 | goto out; | |
b128c09f BB |
287 | } |
288 | ||
9ae529ec | 289 | if (spa_version(dp->dp_spa) >= SPA_VERSION_DEADLISTS) { |
428870ff BB |
290 | err = dsl_pool_open_special_dir(dp, FREE_DIR_NAME, |
291 | &dp->dp_free_dir); | |
b128c09f BB |
292 | if (err) |
293 | goto out; | |
428870ff | 294 | |
b128c09f | 295 | err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, |
428870ff | 296 | DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj); |
b128c09f BB |
297 | if (err) |
298 | goto out; | |
13fe0198 | 299 | VERIFY0(bpobj_open(&dp->dp_free_bpobj, |
428870ff | 300 | dp->dp_meta_objset, obj)); |
b128c09f BB |
301 | } |
302 | ||
fbeddd60 MA |
303 | /* |
304 | * Note: errors ignored, because the leak dir will not exist if we | |
305 | * have not encountered a leak yet. | |
306 | */ | |
307 | (void) dsl_pool_open_special_dir(dp, LEAK_DIR_NAME, | |
308 | &dp->dp_leak_dir); | |
309 | ||
fa86b5db | 310 | if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY)) { |
9ae529ec CS |
311 | err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, |
312 | DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1, | |
313 | &dp->dp_bptree_obj); | |
314 | if (err != 0) | |
315 | goto out; | |
316 | } | |
317 | ||
fa86b5db | 318 | if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMPTY_BPOBJ)) { |
753c3839 MA |
319 | err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, |
320 | DMU_POOL_EMPTY_BPOBJ, sizeof (uint64_t), 1, | |
321 | &dp->dp_empty_bpobj); | |
322 | if (err != 0) | |
323 | goto out; | |
324 | } | |
325 | ||
428870ff BB |
326 | err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, |
327 | DMU_POOL_TMP_USERREFS, sizeof (uint64_t), 1, | |
328 | &dp->dp_tmp_userrefs_obj); | |
329 | if (err == ENOENT) | |
330 | err = 0; | |
331 | if (err) | |
332 | goto out; | |
333 | ||
9ae529ec | 334 | err = dsl_scan_init(dp, dp->dp_tx.tx_open_txg); |
428870ff | 335 | |
34dc7c2f | 336 | out: |
13fe0198 | 337 | rrw_exit(&dp->dp_config_rwlock, FTAG); |
34dc7c2f BB |
338 | return (err); |
339 | } | |
340 | ||
341 | void | |
342 | dsl_pool_close(dsl_pool_t *dp) | |
343 | { | |
b128c09f | 344 | /* |
e8b96c60 MA |
345 | * Drop our references from dsl_pool_open(). |
346 | * | |
b128c09f BB |
347 | * Since we held the origin_snap from "syncing" context (which |
348 | * includes pool-opening context), it actually only got a "ref" | |
349 | * and not a hold, so just drop that here. | |
350 | */ | |
351 | if (dp->dp_origin_snap) | |
13fe0198 | 352 | dsl_dataset_rele(dp->dp_origin_snap, dp); |
34dc7c2f | 353 | if (dp->dp_mos_dir) |
13fe0198 | 354 | dsl_dir_rele(dp->dp_mos_dir, dp); |
428870ff | 355 | if (dp->dp_free_dir) |
13fe0198 | 356 | dsl_dir_rele(dp->dp_free_dir, dp); |
fbeddd60 MA |
357 | if (dp->dp_leak_dir) |
358 | dsl_dir_rele(dp->dp_leak_dir, dp); | |
34dc7c2f | 359 | if (dp->dp_root_dir) |
13fe0198 | 360 | dsl_dir_rele(dp->dp_root_dir, dp); |
34dc7c2f | 361 | |
428870ff BB |
362 | bpobj_close(&dp->dp_free_bpobj); |
363 | ||
34dc7c2f BB |
364 | /* undo the dmu_objset_open_impl(mos) from dsl_pool_open() */ |
365 | if (dp->dp_meta_objset) | |
428870ff | 366 | dmu_objset_evict(dp->dp_meta_objset); |
34dc7c2f BB |
367 | |
368 | txg_list_destroy(&dp->dp_dirty_datasets); | |
29809a6c | 369 | txg_list_destroy(&dp->dp_dirty_zilogs); |
428870ff | 370 | txg_list_destroy(&dp->dp_sync_tasks); |
34dc7c2f | 371 | txg_list_destroy(&dp->dp_dirty_dirs); |
34dc7c2f | 372 | |
a032ac4b | 373 | taskq_destroy(dp->dp_zil_clean_taskq); |
64fc7762 MA |
374 | taskq_destroy(dp->dp_sync_taskq); |
375 | ||
ca0bf58d PS |
376 | /* |
377 | * We can't set retry to TRUE since we're explicitly specifying | |
378 | * a spa to flush. This is good enough; any missed buffers for | |
379 | * this spa won't cause trouble, and they'll eventually fall | |
380 | * out of the ARC just like any other unused buffer. | |
381 | */ | |
382 | arc_flush(dp->dp_spa, FALSE); | |
383 | ||
379ca9cf | 384 | mmp_fini(dp->dp_spa); |
34dc7c2f | 385 | txg_fini(dp); |
428870ff | 386 | dsl_scan_fini(dp); |
0c66c32d JG |
387 | dmu_buf_user_evict_wait(); |
388 | ||
13fe0198 | 389 | rrw_destroy(&dp->dp_config_rwlock); |
34dc7c2f | 390 | mutex_destroy(&dp->dp_lock); |
c17486b2 | 391 | cv_destroy(&dp->dp_spaceavail_cv); |
3558fd73 | 392 | taskq_destroy(dp->dp_iput_taskq); |
b128c09f | 393 | if (dp->dp_blkstats) |
79c76d5b | 394 | vmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t)); |
34dc7c2f BB |
395 | kmem_free(dp, sizeof (dsl_pool_t)); |
396 | } | |
397 | ||
398 | dsl_pool_t * | |
b5256303 TC |
399 | dsl_pool_create(spa_t *spa, nvlist_t *zplprops, dsl_crypto_params_t *dcp, |
400 | uint64_t txg) | |
34dc7c2f BB |
401 | { |
402 | int err; | |
403 | dsl_pool_t *dp = dsl_pool_open_impl(spa, txg); | |
404 | dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg); | |
428870ff | 405 | objset_t *os; |
b128c09f | 406 | dsl_dataset_t *ds; |
428870ff | 407 | uint64_t obj; |
b128c09f | 408 | |
13fe0198 MA |
409 | rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); |
410 | ||
b128c09f | 411 | /* create and open the MOS (meta-objset) */ |
428870ff BB |
412 | dp->dp_meta_objset = dmu_objset_create_impl(spa, |
413 | NULL, &dp->dp_meta_rootbp, DMU_OST_META, tx); | |
b5256303 | 414 | spa->spa_meta_objset = dp->dp_meta_objset; |
34dc7c2f BB |
415 | |
416 | /* create the pool directory */ | |
417 | err = zap_create_claim(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, | |
418 | DMU_OT_OBJECT_DIRECTORY, DMU_OT_NONE, 0, tx); | |
c99c9001 | 419 | ASSERT0(err); |
34dc7c2f | 420 | |
428870ff | 421 | /* Initialize scan structures */ |
13fe0198 | 422 | VERIFY0(dsl_scan_init(dp, txg)); |
428870ff | 423 | |
34dc7c2f | 424 | /* create and open the root dir */ |
b128c09f | 425 | dp->dp_root_dir_obj = dsl_dir_create_sync(dp, NULL, NULL, tx); |
13fe0198 | 426 | VERIFY0(dsl_dir_hold_obj(dp, dp->dp_root_dir_obj, |
34dc7c2f BB |
427 | NULL, dp, &dp->dp_root_dir)); |
428 | ||
429 | /* create and open the meta-objset dir */ | |
b128c09f | 430 | (void) dsl_dir_create_sync(dp, dp->dp_root_dir, MOS_DIR_NAME, tx); |
13fe0198 | 431 | VERIFY0(dsl_pool_open_special_dir(dp, |
b128c09f BB |
432 | MOS_DIR_NAME, &dp->dp_mos_dir)); |
433 | ||
428870ff BB |
434 | if (spa_version(spa) >= SPA_VERSION_DEADLISTS) { |
435 | /* create and open the free dir */ | |
436 | (void) dsl_dir_create_sync(dp, dp->dp_root_dir, | |
437 | FREE_DIR_NAME, tx); | |
13fe0198 | 438 | VERIFY0(dsl_pool_open_special_dir(dp, |
428870ff BB |
439 | FREE_DIR_NAME, &dp->dp_free_dir)); |
440 | ||
441 | /* create and open the free_bplist */ | |
f1512ee6 | 442 | obj = bpobj_alloc(dp->dp_meta_objset, SPA_OLD_MAXBLOCKSIZE, tx); |
428870ff BB |
443 | VERIFY(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, |
444 | DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx) == 0); | |
13fe0198 | 445 | VERIFY0(bpobj_open(&dp->dp_free_bpobj, |
428870ff BB |
446 | dp->dp_meta_objset, obj)); |
447 | } | |
448 | ||
b128c09f BB |
449 | if (spa_version(spa) >= SPA_VERSION_DSL_SCRUB) |
450 | dsl_pool_create_origin(dp, tx); | |
451 | ||
b5256303 TC |
452 | /* |
453 | * Some features may be needed when creating the root dataset, so we | |
454 | * create the feature objects here. | |
455 | */ | |
456 | if (spa_version(spa) >= SPA_VERSION_FEATURES) | |
457 | spa_feature_create_zap_objects(spa, tx); | |
458 | ||
459 | if (dcp != NULL && dcp->cp_crypt != ZIO_CRYPT_OFF && | |
460 | dcp->cp_crypt != ZIO_CRYPT_INHERIT) | |
461 | spa_feature_enable(spa, SPA_FEATURE_ENCRYPTION, tx); | |
462 | ||
b128c09f | 463 | /* create the root dataset */ |
b5256303 | 464 | obj = dsl_dataset_create_sync_dd(dp->dp_root_dir, NULL, dcp, 0, tx); |
b128c09f BB |
465 | |
466 | /* create the root objset */ | |
13fe0198 | 467 | VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG, &ds)); |
cc9bb3e5 | 468 | rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); |
0fe3d820 BB |
469 | VERIFY(NULL != (os = dmu_objset_create_impl(dp->dp_spa, ds, |
470 | dsl_dataset_get_blkptr(ds), DMU_OST_ZFS, tx))); | |
cc9bb3e5 | 471 | rrw_exit(&ds->ds_bp_rwlock, FTAG); |
b128c09f | 472 | #ifdef _KERNEL |
428870ff | 473 | zfs_create_fs(os, kcred, zplprops, tx); |
b128c09f BB |
474 | #endif |
475 | dsl_dataset_rele(ds, FTAG); | |
34dc7c2f BB |
476 | |
477 | dmu_tx_commit(tx); | |
478 | ||
13fe0198 MA |
479 | rrw_exit(&dp->dp_config_rwlock, FTAG); |
480 | ||
34dc7c2f BB |
481 | return (dp); |
482 | } | |
483 | ||
29809a6c MA |
484 | /* |
485 | * Account for the meta-objset space in its placeholder dsl_dir. | |
486 | */ | |
487 | void | |
488 | dsl_pool_mos_diduse_space(dsl_pool_t *dp, | |
489 | int64_t used, int64_t comp, int64_t uncomp) | |
490 | { | |
491 | ASSERT3U(comp, ==, uncomp); /* it's all metadata */ | |
492 | mutex_enter(&dp->dp_lock); | |
493 | dp->dp_mos_used_delta += used; | |
494 | dp->dp_mos_compressed_delta += comp; | |
495 | dp->dp_mos_uncompressed_delta += uncomp; | |
496 | mutex_exit(&dp->dp_lock); | |
497 | } | |
498 | ||
e8b96c60 MA |
499 | static void |
500 | dsl_pool_sync_mos(dsl_pool_t *dp, dmu_tx_t *tx) | |
501 | { | |
502 | zio_t *zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); | |
503 | dmu_objset_sync(dp->dp_meta_objset, zio, tx); | |
504 | VERIFY0(zio_wait(zio)); | |
505 | dprintf_bp(&dp->dp_meta_rootbp, "meta objset rootbp is %s", ""); | |
506 | spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp); | |
507 | } | |
508 | ||
509 | static void | |
510 | dsl_pool_dirty_delta(dsl_pool_t *dp, int64_t delta) | |
511 | { | |
512 | ASSERT(MUTEX_HELD(&dp->dp_lock)); | |
513 | ||
514 | if (delta < 0) | |
515 | ASSERT3U(-delta, <=, dp->dp_dirty_total); | |
516 | ||
517 | dp->dp_dirty_total += delta; | |
518 | ||
519 | /* | |
520 | * Note: we signal even when increasing dp_dirty_total. | |
521 | * This ensures forward progress -- each thread wakes the next waiter. | |
522 | */ | |
c0c8cc7b | 523 | if (dp->dp_dirty_total < zfs_dirty_data_max) |
e8b96c60 MA |
524 | cv_signal(&dp->dp_spaceavail_cv); |
525 | } | |
526 | ||
34dc7c2f BB |
527 | void |
528 | dsl_pool_sync(dsl_pool_t *dp, uint64_t txg) | |
529 | { | |
530 | zio_t *zio; | |
531 | dmu_tx_t *tx; | |
532 | dsl_dir_t *dd; | |
533 | dsl_dataset_t *ds; | |
428870ff | 534 | objset_t *mos = dp->dp_meta_objset; |
29809a6c MA |
535 | list_t synced_datasets; |
536 | ||
537 | list_create(&synced_datasets, sizeof (dsl_dataset_t), | |
538 | offsetof(dsl_dataset_t, ds_synced_link)); | |
34dc7c2f BB |
539 | |
540 | tx = dmu_tx_create_assigned(dp, txg); | |
541 | ||
e8b96c60 MA |
542 | /* |
543 | * Write out all dirty blocks of dirty datasets. | |
544 | */ | |
34dc7c2f | 545 | zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); |
e8b96c60 | 546 | while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) { |
9babb374 BB |
547 | /* |
548 | * We must not sync any non-MOS datasets twice, because | |
549 | * we may have taken a snapshot of them. However, we | |
550 | * may sync newly-created datasets on pass 2. | |
551 | */ | |
552 | ASSERT(!list_link_active(&ds->ds_synced_link)); | |
29809a6c | 553 | list_insert_tail(&synced_datasets, ds); |
34dc7c2f BB |
554 | dsl_dataset_sync(ds, zio, tx); |
555 | } | |
e8b96c60 | 556 | VERIFY0(zio_wait(zio)); |
9babb374 | 557 | |
e8b96c60 MA |
558 | /* |
559 | * We have written all of the accounted dirty data, so our | |
560 | * dp_space_towrite should now be zero. However, some seldom-used | |
561 | * code paths do not adhere to this (e.g. dbuf_undirty(), also | |
562 | * rounding error in dbuf_write_physdone). | |
563 | * Shore up the accounting of any dirtied space now. | |
564 | */ | |
565 | dsl_pool_undirty_space(dp, dp->dp_dirty_pertxg[txg & TXG_MASK], txg); | |
34dc7c2f | 566 | |
539d33c7 GM |
567 | /* |
568 | * Update the long range free counter after | |
569 | * we're done syncing user data | |
570 | */ | |
571 | mutex_enter(&dp->dp_lock); | |
572 | ASSERT(spa_sync_pass(dp->dp_spa) == 1 || | |
573 | dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] == 0); | |
574 | dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] = 0; | |
575 | mutex_exit(&dp->dp_lock); | |
576 | ||
29809a6c MA |
577 | /* |
578 | * After the data blocks have been written (ensured by the zio_wait() | |
64fc7762 MA |
579 | * above), update the user/group space accounting. This happens |
580 | * in tasks dispatched to dp_sync_taskq, so wait for them before | |
581 | * continuing. | |
29809a6c | 582 | */ |
e8b96c60 MA |
583 | for (ds = list_head(&synced_datasets); ds != NULL; |
584 | ds = list_next(&synced_datasets, ds)) { | |
428870ff | 585 | dmu_objset_do_userquota_updates(ds->ds_objset, tx); |
e8b96c60 | 586 | } |
64fc7762 | 587 | taskq_wait(dp->dp_sync_taskq); |
9babb374 BB |
588 | |
589 | /* | |
590 | * Sync the datasets again to push out the changes due to | |
428870ff | 591 | * userspace updates. This must be done before we process the |
29809a6c MA |
592 | * sync tasks, so that any snapshots will have the correct |
593 | * user accounting information (and we won't get confused | |
594 | * about which blocks are part of the snapshot). | |
9babb374 BB |
595 | */ |
596 | zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); | |
e8b96c60 | 597 | while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) { |
9babb374 BB |
598 | ASSERT(list_link_active(&ds->ds_synced_link)); |
599 | dmu_buf_rele(ds->ds_dbuf, ds); | |
600 | dsl_dataset_sync(ds, zio, tx); | |
601 | } | |
e8b96c60 | 602 | VERIFY0(zio_wait(zio)); |
9babb374 | 603 | |
428870ff | 604 | /* |
29809a6c MA |
605 | * Now that the datasets have been completely synced, we can |
606 | * clean up our in-memory structures accumulated while syncing: | |
607 | * | |
608 | * - move dead blocks from the pending deadlist to the on-disk deadlist | |
29809a6c | 609 | * - release hold from dsl_dataset_dirty() |
428870ff | 610 | */ |
e8b96c60 | 611 | while ((ds = list_remove_head(&synced_datasets)) != NULL) { |
0efd9791 | 612 | dsl_dataset_sync_done(ds, tx); |
428870ff BB |
613 | } |
614 | ||
e8b96c60 | 615 | while ((dd = txg_list_remove(&dp->dp_dirty_dirs, txg)) != NULL) { |
34dc7c2f | 616 | dsl_dir_sync(dd, tx); |
e8b96c60 | 617 | } |
b128c09f | 618 | |
29809a6c MA |
619 | /* |
620 | * The MOS's space is accounted for in the pool/$MOS | |
621 | * (dp_mos_dir). We can't modify the mos while we're syncing | |
622 | * it, so we remember the deltas and apply them here. | |
623 | */ | |
624 | if (dp->dp_mos_used_delta != 0 || dp->dp_mos_compressed_delta != 0 || | |
625 | dp->dp_mos_uncompressed_delta != 0) { | |
626 | dsl_dir_diduse_space(dp->dp_mos_dir, DD_USED_HEAD, | |
627 | dp->dp_mos_used_delta, | |
628 | dp->dp_mos_compressed_delta, | |
629 | dp->dp_mos_uncompressed_delta, tx); | |
630 | dp->dp_mos_used_delta = 0; | |
631 | dp->dp_mos_compressed_delta = 0; | |
632 | dp->dp_mos_uncompressed_delta = 0; | |
633 | } | |
634 | ||
64fc7762 | 635 | if (!multilist_is_empty(mos->os_dirty_dnodes[txg & TXG_MASK])) { |
e8b96c60 | 636 | dsl_pool_sync_mos(dp, tx); |
34dc7c2f BB |
637 | } |
638 | ||
29809a6c MA |
639 | /* |
640 | * If we modify a dataset in the same txg that we want to destroy it, | |
641 | * its dsl_dir's dd_dbuf will be dirty, and thus have a hold on it. | |
642 | * dsl_dir_destroy_check() will fail if there are unexpected holds. | |
643 | * Therefore, we want to sync the MOS (thus syncing the dd_dbuf | |
644 | * and clearing the hold on it) before we process the sync_tasks. | |
645 | * The MOS data dirtied by the sync_tasks will be synced on the next | |
646 | * pass. | |
647 | */ | |
29809a6c | 648 | if (!txg_list_empty(&dp->dp_sync_tasks, txg)) { |
13fe0198 | 649 | dsl_sync_task_t *dst; |
29809a6c MA |
650 | /* |
651 | * No more sync tasks should have been added while we | |
652 | * were syncing. | |
653 | */ | |
e8b96c60 MA |
654 | ASSERT3U(spa_sync_pass(dp->dp_spa), ==, 1); |
655 | while ((dst = txg_list_remove(&dp->dp_sync_tasks, txg)) != NULL) | |
13fe0198 | 656 | dsl_sync_task_sync(dst, tx); |
29809a6c MA |
657 | } |
658 | ||
34dc7c2f | 659 | dmu_tx_commit(tx); |
b128c09f | 660 | |
e8b96c60 | 661 | DTRACE_PROBE2(dsl_pool_sync__done, dsl_pool_t *dp, dp, uint64_t, txg); |
34dc7c2f BB |
662 | } |
663 | ||
664 | void | |
428870ff | 665 | dsl_pool_sync_done(dsl_pool_t *dp, uint64_t txg) |
34dc7c2f | 666 | { |
29809a6c | 667 | zilog_t *zilog; |
34dc7c2f | 668 | |
55922e73 | 669 | while ((zilog = txg_list_head(&dp->dp_dirty_zilogs, txg))) { |
e8b96c60 | 670 | dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os); |
55922e73 GW |
671 | /* |
672 | * We don't remove the zilog from the dp_dirty_zilogs | |
673 | * list until after we've cleaned it. This ensures that | |
674 | * callers of zilog_is_dirty() receive an accurate | |
675 | * answer when they are racing with the spa sync thread. | |
676 | */ | |
29809a6c | 677 | zil_clean(zilog, txg); |
55922e73 | 678 | (void) txg_list_remove_this(&dp->dp_dirty_zilogs, zilog, txg); |
29809a6c MA |
679 | ASSERT(!dmu_objset_is_dirty(zilog->zl_os, txg)); |
680 | dmu_buf_rele(ds->ds_dbuf, zilog); | |
34dc7c2f | 681 | } |
428870ff | 682 | ASSERT(!dmu_objset_is_dirty(dp->dp_meta_objset, txg)); |
34dc7c2f BB |
683 | } |
684 | ||
685 | /* | |
686 | * TRUE if the current thread is the tx_sync_thread or if we | |
687 | * are being called from SPA context during pool initialization. | |
688 | */ | |
689 | int | |
690 | dsl_pool_sync_context(dsl_pool_t *dp) | |
691 | { | |
692 | return (curthread == dp->dp_tx.tx_sync_thread || | |
64fc7762 MA |
693 | spa_is_initializing(dp->dp_spa) || |
694 | taskq_member(dp->dp_sync_taskq, curthread)); | |
34dc7c2f BB |
695 | } |
696 | ||
697 | uint64_t | |
698 | dsl_pool_adjustedsize(dsl_pool_t *dp, boolean_t netfree) | |
699 | { | |
700 | uint64_t space, resv; | |
701 | ||
702 | /* | |
34dc7c2f BB |
703 | * If we're trying to assess whether it's OK to do a free, |
704 | * cut the reservation in half to allow forward progress | |
705 | * (e.g. make it possible to rm(1) files from a full pool). | |
706 | */ | |
707 | space = spa_get_dspace(dp->dp_spa); | |
0c60cc32 | 708 | resv = spa_get_slop_space(dp->dp_spa); |
34dc7c2f BB |
709 | if (netfree) |
710 | resv >>= 1; | |
711 | ||
712 | return (space - resv); | |
713 | } | |
714 | ||
e8b96c60 MA |
715 | boolean_t |
716 | dsl_pool_need_dirty_delay(dsl_pool_t *dp) | |
34dc7c2f | 717 | { |
e8b96c60 MA |
718 | uint64_t delay_min_bytes = |
719 | zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100; | |
720 | boolean_t rv; | |
34dc7c2f | 721 | |
e8b96c60 MA |
722 | mutex_enter(&dp->dp_lock); |
723 | if (dp->dp_dirty_total > zfs_dirty_data_sync) | |
724 | txg_kick(dp); | |
725 | rv = (dp->dp_dirty_total > delay_min_bytes); | |
726 | mutex_exit(&dp->dp_lock); | |
727 | return (rv); | |
34dc7c2f BB |
728 | } |
729 | ||
730 | void | |
e8b96c60 | 731 | dsl_pool_dirty_space(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx) |
34dc7c2f | 732 | { |
e8b96c60 MA |
733 | if (space > 0) { |
734 | mutex_enter(&dp->dp_lock); | |
735 | dp->dp_dirty_pertxg[tx->tx_txg & TXG_MASK] += space; | |
736 | dsl_pool_dirty_delta(dp, space); | |
737 | mutex_exit(&dp->dp_lock); | |
738 | } | |
34dc7c2f BB |
739 | } |
740 | ||
741 | void | |
e8b96c60 | 742 | dsl_pool_undirty_space(dsl_pool_t *dp, int64_t space, uint64_t txg) |
34dc7c2f | 743 | { |
e8b96c60 MA |
744 | ASSERT3S(space, >=, 0); |
745 | if (space == 0) | |
34dc7c2f BB |
746 | return; |
747 | ||
e8b96c60 MA |
748 | mutex_enter(&dp->dp_lock); |
749 | if (dp->dp_dirty_pertxg[txg & TXG_MASK] < space) { | |
750 | /* XXX writing something we didn't dirty? */ | |
751 | space = dp->dp_dirty_pertxg[txg & TXG_MASK]; | |
34dc7c2f | 752 | } |
e8b96c60 MA |
753 | ASSERT3U(dp->dp_dirty_pertxg[txg & TXG_MASK], >=, space); |
754 | dp->dp_dirty_pertxg[txg & TXG_MASK] -= space; | |
755 | ASSERT3U(dp->dp_dirty_total, >=, space); | |
756 | dsl_pool_dirty_delta(dp, -space); | |
757 | mutex_exit(&dp->dp_lock); | |
34dc7c2f | 758 | } |
b128c09f BB |
759 | |
760 | /* ARGSUSED */ | |
761 | static int | |
13fe0198 | 762 | upgrade_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) |
b128c09f BB |
763 | { |
764 | dmu_tx_t *tx = arg; | |
765 | dsl_dataset_t *ds, *prev = NULL; | |
766 | int err; | |
b128c09f | 767 | |
13fe0198 | 768 | err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds); |
b128c09f BB |
769 | if (err) |
770 | return (err); | |
771 | ||
d683ddbb JG |
772 | while (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) { |
773 | err = dsl_dataset_hold_obj(dp, | |
774 | dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev); | |
b128c09f BB |
775 | if (err) { |
776 | dsl_dataset_rele(ds, FTAG); | |
777 | return (err); | |
778 | } | |
779 | ||
d683ddbb | 780 | if (dsl_dataset_phys(prev)->ds_next_snap_obj != ds->ds_object) |
b128c09f BB |
781 | break; |
782 | dsl_dataset_rele(ds, FTAG); | |
783 | ds = prev; | |
784 | prev = NULL; | |
785 | } | |
786 | ||
787 | if (prev == NULL) { | |
788 | prev = dp->dp_origin_snap; | |
789 | ||
790 | /* | |
791 | * The $ORIGIN can't have any data, or the accounting | |
792 | * will be wrong. | |
793 | */ | |
cc9bb3e5 | 794 | rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); |
d683ddbb | 795 | ASSERT0(dsl_dataset_phys(prev)->ds_bp.blk_birth); |
cc9bb3e5 | 796 | rrw_exit(&ds->ds_bp_rwlock, FTAG); |
b128c09f BB |
797 | |
798 | /* The origin doesn't get attached to itself */ | |
799 | if (ds->ds_object == prev->ds_object) { | |
800 | dsl_dataset_rele(ds, FTAG); | |
801 | return (0); | |
802 | } | |
803 | ||
804 | dmu_buf_will_dirty(ds->ds_dbuf, tx); | |
d683ddbb JG |
805 | dsl_dataset_phys(ds)->ds_prev_snap_obj = prev->ds_object; |
806 | dsl_dataset_phys(ds)->ds_prev_snap_txg = | |
807 | dsl_dataset_phys(prev)->ds_creation_txg; | |
b128c09f BB |
808 | |
809 | dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx); | |
d683ddbb | 810 | dsl_dir_phys(ds->ds_dir)->dd_origin_obj = prev->ds_object; |
b128c09f BB |
811 | |
812 | dmu_buf_will_dirty(prev->ds_dbuf, tx); | |
d683ddbb | 813 | dsl_dataset_phys(prev)->ds_num_children++; |
b128c09f | 814 | |
d683ddbb | 815 | if (dsl_dataset_phys(ds)->ds_next_snap_obj == 0) { |
b128c09f | 816 | ASSERT(ds->ds_prev == NULL); |
13fe0198 | 817 | VERIFY0(dsl_dataset_hold_obj(dp, |
d683ddbb JG |
818 | dsl_dataset_phys(ds)->ds_prev_snap_obj, |
819 | ds, &ds->ds_prev)); | |
b128c09f BB |
820 | } |
821 | } | |
822 | ||
d683ddbb JG |
823 | ASSERT3U(dsl_dir_phys(ds->ds_dir)->dd_origin_obj, ==, prev->ds_object); |
824 | ASSERT3U(dsl_dataset_phys(ds)->ds_prev_snap_obj, ==, prev->ds_object); | |
b128c09f | 825 | |
d683ddbb | 826 | if (dsl_dataset_phys(prev)->ds_next_clones_obj == 0) { |
428870ff | 827 | dmu_buf_will_dirty(prev->ds_dbuf, tx); |
d683ddbb | 828 | dsl_dataset_phys(prev)->ds_next_clones_obj = |
b128c09f BB |
829 | zap_create(dp->dp_meta_objset, |
830 | DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx); | |
831 | } | |
13fe0198 | 832 | VERIFY0(zap_add_int(dp->dp_meta_objset, |
d683ddbb | 833 | dsl_dataset_phys(prev)->ds_next_clones_obj, ds->ds_object, tx)); |
b128c09f BB |
834 | |
835 | dsl_dataset_rele(ds, FTAG); | |
836 | if (prev != dp->dp_origin_snap) | |
837 | dsl_dataset_rele(prev, FTAG); | |
838 | return (0); | |
839 | } | |
840 | ||
841 | void | |
842 | dsl_pool_upgrade_clones(dsl_pool_t *dp, dmu_tx_t *tx) | |
843 | { | |
844 | ASSERT(dmu_tx_is_syncing(tx)); | |
845 | ASSERT(dp->dp_origin_snap != NULL); | |
846 | ||
13fe0198 | 847 | VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, upgrade_clones_cb, |
9c43027b | 848 | tx, DS_FIND_CHILDREN | DS_FIND_SERIALIZE)); |
428870ff BB |
849 | } |
850 | ||
851 | /* ARGSUSED */ | |
852 | static int | |
13fe0198 | 853 | upgrade_dir_clones_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg) |
428870ff BB |
854 | { |
855 | dmu_tx_t *tx = arg; | |
428870ff BB |
856 | objset_t *mos = dp->dp_meta_objset; |
857 | ||
d683ddbb | 858 | if (dsl_dir_phys(ds->ds_dir)->dd_origin_obj != 0) { |
428870ff BB |
859 | dsl_dataset_t *origin; |
860 | ||
13fe0198 | 861 | VERIFY0(dsl_dataset_hold_obj(dp, |
d683ddbb | 862 | dsl_dir_phys(ds->ds_dir)->dd_origin_obj, FTAG, &origin)); |
428870ff | 863 | |
d683ddbb | 864 | if (dsl_dir_phys(origin->ds_dir)->dd_clones == 0) { |
428870ff | 865 | dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx); |
d683ddbb JG |
866 | dsl_dir_phys(origin->ds_dir)->dd_clones = |
867 | zap_create(mos, DMU_OT_DSL_CLONES, DMU_OT_NONE, | |
868 | 0, tx); | |
428870ff BB |
869 | } |
870 | ||
13fe0198 | 871 | VERIFY0(zap_add_int(dp->dp_meta_objset, |
d683ddbb JG |
872 | dsl_dir_phys(origin->ds_dir)->dd_clones, |
873 | ds->ds_object, tx)); | |
428870ff BB |
874 | |
875 | dsl_dataset_rele(origin, FTAG); | |
876 | } | |
428870ff BB |
877 | return (0); |
878 | } | |
879 | ||
880 | void | |
881 | dsl_pool_upgrade_dir_clones(dsl_pool_t *dp, dmu_tx_t *tx) | |
882 | { | |
428870ff BB |
883 | uint64_t obj; |
884 | ||
d6320ddb BB |
885 | ASSERT(dmu_tx_is_syncing(tx)); |
886 | ||
428870ff | 887 | (void) dsl_dir_create_sync(dp, dp->dp_root_dir, FREE_DIR_NAME, tx); |
13fe0198 | 888 | VERIFY0(dsl_pool_open_special_dir(dp, |
428870ff BB |
889 | FREE_DIR_NAME, &dp->dp_free_dir)); |
890 | ||
891 | /* | |
892 | * We can't use bpobj_alloc(), because spa_version() still | |
893 | * returns the old version, and we need a new-version bpobj with | |
894 | * subobj support. So call dmu_object_alloc() directly. | |
895 | */ | |
896 | obj = dmu_object_alloc(dp->dp_meta_objset, DMU_OT_BPOBJ, | |
f1512ee6 | 897 | SPA_OLD_MAXBLOCKSIZE, DMU_OT_BPOBJ_HDR, sizeof (bpobj_phys_t), tx); |
13fe0198 | 898 | VERIFY0(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, |
428870ff | 899 | DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx)); |
13fe0198 | 900 | VERIFY0(bpobj_open(&dp->dp_free_bpobj, dp->dp_meta_objset, obj)); |
428870ff | 901 | |
13fe0198 | 902 | VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, |
9c43027b | 903 | upgrade_dir_clones_cb, tx, DS_FIND_CHILDREN | DS_FIND_SERIALIZE)); |
b128c09f BB |
904 | } |
905 | ||
906 | void | |
907 | dsl_pool_create_origin(dsl_pool_t *dp, dmu_tx_t *tx) | |
908 | { | |
909 | uint64_t dsobj; | |
910 | dsl_dataset_t *ds; | |
911 | ||
912 | ASSERT(dmu_tx_is_syncing(tx)); | |
913 | ASSERT(dp->dp_origin_snap == NULL); | |
13fe0198 | 914 | ASSERT(rrw_held(&dp->dp_config_rwlock, RW_WRITER)); |
b128c09f BB |
915 | |
916 | /* create the origin dir, ds, & snap-ds */ | |
b128c09f | 917 | dsobj = dsl_dataset_create_sync(dp->dp_root_dir, ORIGIN_DIR_NAME, |
b5256303 | 918 | NULL, 0, kcred, NULL, tx); |
13fe0198 MA |
919 | VERIFY0(dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); |
920 | dsl_dataset_snapshot_sync_impl(ds, ORIGIN_DIR_NAME, tx); | |
d683ddbb | 921 | VERIFY0(dsl_dataset_hold_obj(dp, dsl_dataset_phys(ds)->ds_prev_snap_obj, |
b128c09f BB |
922 | dp, &dp->dp_origin_snap)); |
923 | dsl_dataset_rele(ds, FTAG); | |
b128c09f | 924 | } |
9babb374 BB |
925 | |
926 | taskq_t * | |
3558fd73 | 927 | dsl_pool_iput_taskq(dsl_pool_t *dp) |
9babb374 | 928 | { |
3558fd73 | 929 | return (dp->dp_iput_taskq); |
9babb374 | 930 | } |
428870ff BB |
931 | |
932 | /* | |
933 | * Walk through the pool-wide zap object of temporary snapshot user holds | |
934 | * and release them. | |
935 | */ | |
936 | void | |
937 | dsl_pool_clean_tmp_userrefs(dsl_pool_t *dp) | |
938 | { | |
939 | zap_attribute_t za; | |
940 | zap_cursor_t zc; | |
941 | objset_t *mos = dp->dp_meta_objset; | |
942 | uint64_t zapobj = dp->dp_tmp_userrefs_obj; | |
95fd54a1 | 943 | nvlist_t *holds; |
428870ff BB |
944 | |
945 | if (zapobj == 0) | |
946 | return; | |
947 | ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS); | |
948 | ||
95fd54a1 SH |
949 | holds = fnvlist_alloc(); |
950 | ||
428870ff BB |
951 | for (zap_cursor_init(&zc, mos, zapobj); |
952 | zap_cursor_retrieve(&zc, &za) == 0; | |
953 | zap_cursor_advance(&zc)) { | |
954 | char *htag; | |
95fd54a1 | 955 | nvlist_t *tags; |
428870ff BB |
956 | |
957 | htag = strchr(za.za_name, '-'); | |
958 | *htag = '\0'; | |
959 | ++htag; | |
95fd54a1 SH |
960 | if (nvlist_lookup_nvlist(holds, za.za_name, &tags) != 0) { |
961 | tags = fnvlist_alloc(); | |
962 | fnvlist_add_boolean(tags, htag); | |
963 | fnvlist_add_nvlist(holds, za.za_name, tags); | |
964 | fnvlist_free(tags); | |
965 | } else { | |
966 | fnvlist_add_boolean(tags, htag); | |
967 | } | |
428870ff | 968 | } |
95fd54a1 SH |
969 | dsl_dataset_user_release_tmp(dp, holds); |
970 | fnvlist_free(holds); | |
428870ff BB |
971 | zap_cursor_fini(&zc); |
972 | } | |
973 | ||
974 | /* | |
975 | * Create the pool-wide zap object for storing temporary snapshot holds. | |
976 | */ | |
977 | void | |
978 | dsl_pool_user_hold_create_obj(dsl_pool_t *dp, dmu_tx_t *tx) | |
979 | { | |
980 | objset_t *mos = dp->dp_meta_objset; | |
981 | ||
982 | ASSERT(dp->dp_tmp_userrefs_obj == 0); | |
983 | ASSERT(dmu_tx_is_syncing(tx)); | |
984 | ||
9ae529ec CS |
985 | dp->dp_tmp_userrefs_obj = zap_create_link(mos, DMU_OT_USERREFS, |
986 | DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_TMP_USERREFS, tx); | |
428870ff BB |
987 | } |
988 | ||
989 | static int | |
990 | dsl_pool_user_hold_rele_impl(dsl_pool_t *dp, uint64_t dsobj, | |
13fe0198 | 991 | const char *tag, uint64_t now, dmu_tx_t *tx, boolean_t holding) |
428870ff BB |
992 | { |
993 | objset_t *mos = dp->dp_meta_objset; | |
994 | uint64_t zapobj = dp->dp_tmp_userrefs_obj; | |
995 | char *name; | |
996 | int error; | |
997 | ||
998 | ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS); | |
999 | ASSERT(dmu_tx_is_syncing(tx)); | |
1000 | ||
1001 | /* | |
1002 | * If the pool was created prior to SPA_VERSION_USERREFS, the | |
1003 | * zap object for temporary holds might not exist yet. | |
1004 | */ | |
1005 | if (zapobj == 0) { | |
1006 | if (holding) { | |
1007 | dsl_pool_user_hold_create_obj(dp, tx); | |
1008 | zapobj = dp->dp_tmp_userrefs_obj; | |
1009 | } else { | |
2e528b49 | 1010 | return (SET_ERROR(ENOENT)); |
428870ff BB |
1011 | } |
1012 | } | |
1013 | ||
1014 | name = kmem_asprintf("%llx-%s", (u_longlong_t)dsobj, tag); | |
1015 | if (holding) | |
13fe0198 | 1016 | error = zap_add(mos, zapobj, name, 8, 1, &now, tx); |
428870ff BB |
1017 | else |
1018 | error = zap_remove(mos, zapobj, name, tx); | |
1019 | strfree(name); | |
1020 | ||
1021 | return (error); | |
1022 | } | |
1023 | ||
1024 | /* | |
1025 | * Add a temporary hold for the given dataset object and tag. | |
1026 | */ | |
1027 | int | |
1028 | dsl_pool_user_hold(dsl_pool_t *dp, uint64_t dsobj, const char *tag, | |
13fe0198 | 1029 | uint64_t now, dmu_tx_t *tx) |
428870ff BB |
1030 | { |
1031 | return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, now, tx, B_TRUE)); | |
1032 | } | |
1033 | ||
1034 | /* | |
1035 | * Release a temporary hold for the given dataset object and tag. | |
1036 | */ | |
1037 | int | |
1038 | dsl_pool_user_release(dsl_pool_t *dp, uint64_t dsobj, const char *tag, | |
1039 | dmu_tx_t *tx) | |
1040 | { | |
13fe0198 | 1041 | return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, 0, |
428870ff BB |
1042 | tx, B_FALSE)); |
1043 | } | |
c409e464 | 1044 | |
13fe0198 MA |
1045 | /* |
1046 | * DSL Pool Configuration Lock | |
1047 | * | |
1048 | * The dp_config_rwlock protects against changes to DSL state (e.g. dataset | |
1049 | * creation / destruction / rename / property setting). It must be held for | |
1050 | * read to hold a dataset or dsl_dir. I.e. you must call | |
1051 | * dsl_pool_config_enter() or dsl_pool_hold() before calling | |
1052 | * dsl_{dataset,dir}_hold{_obj}. In most circumstances, the dp_config_rwlock | |
1053 | * must be held continuously until all datasets and dsl_dirs are released. | |
1054 | * | |
1055 | * The only exception to this rule is that if a "long hold" is placed on | |
1056 | * a dataset, then the dp_config_rwlock may be dropped while the dataset | |
1057 | * is still held. The long hold will prevent the dataset from being | |
1058 | * destroyed -- the destroy will fail with EBUSY. A long hold can be | |
1059 | * obtained by calling dsl_dataset_long_hold(), or by "owning" a dataset | |
1060 | * (by calling dsl_{dataset,objset}_{try}own{_obj}). | |
1061 | * | |
1062 | * Legitimate long-holders (including owners) should be long-running, cancelable | |
1063 | * tasks that should cause "zfs destroy" to fail. This includes DMU | |
1064 | * consumers (i.e. a ZPL filesystem being mounted or ZVOL being open), | |
1065 | * "zfs send", and "zfs diff". There are several other long-holders whose | |
1066 | * uses are suboptimal (e.g. "zfs promote", and zil_suspend()). | |
1067 | * | |
1068 | * The usual formula for long-holding would be: | |
1069 | * dsl_pool_hold() | |
1070 | * dsl_dataset_hold() | |
1071 | * ... perform checks ... | |
1072 | * dsl_dataset_long_hold() | |
1073 | * dsl_pool_rele() | |
1074 | * ... perform long-running task ... | |
1075 | * dsl_dataset_long_rele() | |
1076 | * dsl_dataset_rele() | |
1077 | * | |
1078 | * Note that when the long hold is released, the dataset is still held but | |
1079 | * the pool is not held. The dataset may change arbitrarily during this time | |
1080 | * (e.g. it could be destroyed). Therefore you shouldn't do anything to the | |
1081 | * dataset except release it. | |
1082 | * | |
1083 | * User-initiated operations (e.g. ioctls, zfs_ioc_*()) are either read-only | |
1084 | * or modifying operations. | |
1085 | * | |
1086 | * Modifying operations should generally use dsl_sync_task(). The synctask | |
1087 | * infrastructure enforces proper locking strategy with respect to the | |
1088 | * dp_config_rwlock. See the comment above dsl_sync_task() for details. | |
1089 | * | |
1090 | * Read-only operations will manually hold the pool, then the dataset, obtain | |
1091 | * information from the dataset, then release the pool and dataset. | |
1092 | * dmu_objset_{hold,rele}() are convenience routines that also do the pool | |
1093 | * hold/rele. | |
1094 | */ | |
1095 | ||
1096 | int | |
1097 | dsl_pool_hold(const char *name, void *tag, dsl_pool_t **dp) | |
1098 | { | |
1099 | spa_t *spa; | |
1100 | int error; | |
1101 | ||
1102 | error = spa_open(name, &spa, tag); | |
1103 | if (error == 0) { | |
1104 | *dp = spa_get_dsl(spa); | |
1105 | dsl_pool_config_enter(*dp, tag); | |
1106 | } | |
1107 | return (error); | |
1108 | } | |
1109 | ||
1110 | void | |
1111 | dsl_pool_rele(dsl_pool_t *dp, void *tag) | |
1112 | { | |
1113 | dsl_pool_config_exit(dp, tag); | |
1114 | spa_close(dp->dp_spa, tag); | |
1115 | } | |
1116 | ||
1117 | void | |
1118 | dsl_pool_config_enter(dsl_pool_t *dp, void *tag) | |
1119 | { | |
1120 | /* | |
1121 | * We use a "reentrant" reader-writer lock, but not reentrantly. | |
1122 | * | |
1123 | * The rrwlock can (with the track_all flag) track all reading threads, | |
1124 | * which is very useful for debugging which code path failed to release | |
1125 | * the lock, and for verifying that the *current* thread does hold | |
1126 | * the lock. | |
1127 | * | |
1128 | * (Unlike a rwlock, which knows that N threads hold it for | |
1129 | * read, but not *which* threads, so rw_held(RW_READER) returns TRUE | |
1130 | * if any thread holds it for read, even if this thread doesn't). | |
1131 | */ | |
1132 | ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER)); | |
1133 | rrw_enter(&dp->dp_config_rwlock, RW_READER, tag); | |
1134 | } | |
1135 | ||
5e8cd5d1 AJ |
1136 | void |
1137 | dsl_pool_config_enter_prio(dsl_pool_t *dp, void *tag) | |
1138 | { | |
1139 | ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER)); | |
1140 | rrw_enter_read_prio(&dp->dp_config_rwlock, tag); | |
1141 | } | |
1142 | ||
13fe0198 MA |
1143 | void |
1144 | dsl_pool_config_exit(dsl_pool_t *dp, void *tag) | |
1145 | { | |
1146 | rrw_exit(&dp->dp_config_rwlock, tag); | |
1147 | } | |
1148 | ||
1149 | boolean_t | |
1150 | dsl_pool_config_held(dsl_pool_t *dp) | |
1151 | { | |
1152 | return (RRW_LOCK_HELD(&dp->dp_config_rwlock)); | |
1153 | } | |
1154 | ||
9c43027b AJ |
1155 | boolean_t |
1156 | dsl_pool_config_held_writer(dsl_pool_t *dp) | |
1157 | { | |
1158 | return (RRW_WRITE_HELD(&dp->dp_config_rwlock)); | |
1159 | } | |
1160 | ||
c409e464 | 1161 | #if defined(_KERNEL) && defined(HAVE_SPL) |
40a806df NB |
1162 | EXPORT_SYMBOL(dsl_pool_config_enter); |
1163 | EXPORT_SYMBOL(dsl_pool_config_exit); | |
1164 | ||
02730c33 | 1165 | /* BEGIN CSTYLED */ |
d1d7e268 | 1166 | /* zfs_dirty_data_max_percent only applied at module load in arc_init(). */ |
e8b96c60 MA |
1167 | module_param(zfs_dirty_data_max_percent, int, 0444); |
1168 | MODULE_PARM_DESC(zfs_dirty_data_max_percent, "percent of ram can be dirty"); | |
c409e464 | 1169 | |
d1d7e268 | 1170 | /* zfs_dirty_data_max_max_percent only applied at module load in arc_init(). */ |
e8b96c60 MA |
1171 | module_param(zfs_dirty_data_max_max_percent, int, 0444); |
1172 | MODULE_PARM_DESC(zfs_dirty_data_max_max_percent, | |
d1d7e268 | 1173 | "zfs_dirty_data_max upper bound as % of RAM"); |
c409e464 | 1174 | |
e8b96c60 MA |
1175 | module_param(zfs_delay_min_dirty_percent, int, 0644); |
1176 | MODULE_PARM_DESC(zfs_delay_min_dirty_percent, "transaction delay threshold"); | |
c409e464 | 1177 | |
e8b96c60 MA |
1178 | module_param(zfs_dirty_data_max, ulong, 0644); |
1179 | MODULE_PARM_DESC(zfs_dirty_data_max, "determines the dirty space limit"); | |
c409e464 | 1180 | |
d1d7e268 | 1181 | /* zfs_dirty_data_max_max only applied at module load in arc_init(). */ |
e8b96c60 MA |
1182 | module_param(zfs_dirty_data_max_max, ulong, 0444); |
1183 | MODULE_PARM_DESC(zfs_dirty_data_max_max, | |
d1d7e268 | 1184 | "zfs_dirty_data_max upper bound in bytes"); |
c409e464 | 1185 | |
e8b96c60 MA |
1186 | module_param(zfs_dirty_data_sync, ulong, 0644); |
1187 | MODULE_PARM_DESC(zfs_dirty_data_sync, "sync txg when this much dirty data"); | |
c409e464 | 1188 | |
e8b96c60 MA |
1189 | module_param(zfs_delay_scale, ulong, 0644); |
1190 | MODULE_PARM_DESC(zfs_delay_scale, "how quickly delay approaches infinity"); | |
64fc7762 MA |
1191 | |
1192 | module_param(zfs_sync_taskq_batch_pct, int, 0644); | |
1193 | MODULE_PARM_DESC(zfs_sync_taskq_batch_pct, | |
1194 | "max percent of CPUs that are used to sync dirty data"); | |
a032ac4b BB |
1195 | |
1196 | module_param(zfs_zil_clean_taskq_nthr_pct, int, 0644); | |
1197 | MODULE_PARM_DESC(zfs_zil_clean_taskq_nthr_pct, | |
1198 | "max percent of CPUs that are used per dp_sync_taskq"); | |
1199 | ||
1200 | module_param(zfs_zil_clean_taskq_minalloc, int, 0644); | |
1201 | MODULE_PARM_DESC(zfs_zil_clean_taskq_minalloc, | |
1202 | "number of taskq entries that are pre-populated"); | |
1203 | ||
1204 | module_param(zfs_zil_clean_taskq_maxalloc, int, 0644); | |
1205 | MODULE_PARM_DESC(zfs_zil_clean_taskq_maxalloc, | |
1206 | "max number of taskq entries that are cached"); | |
1207 | ||
02730c33 | 1208 | /* END CSTYLED */ |
c409e464 | 1209 | #endif |