]>
Commit | Line | Data |
---|---|---|
34dc7c2f BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
428870ff | 22 | * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. |
64fc7762 | 23 | * Copyright (c) 2011, 2017 by Delphix. All rights reserved. |
95fd54a1 | 24 | * Copyright (c) 2013 Steven Hartland. All rights reserved. |
0c66c32d | 25 | * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. |
539d33c7 | 26 | * Copyright 2016 Nexenta Systems, Inc. All rights reserved. |
34dc7c2f BB |
27 | */ |
28 | ||
34dc7c2f BB |
29 | #include <sys/dsl_pool.h> |
30 | #include <sys/dsl_dataset.h> | |
428870ff | 31 | #include <sys/dsl_prop.h> |
34dc7c2f BB |
32 | #include <sys/dsl_dir.h> |
33 | #include <sys/dsl_synctask.h> | |
428870ff BB |
34 | #include <sys/dsl_scan.h> |
35 | #include <sys/dnode.h> | |
34dc7c2f BB |
36 | #include <sys/dmu_tx.h> |
37 | #include <sys/dmu_objset.h> | |
38 | #include <sys/arc.h> | |
39 | #include <sys/zap.h> | |
40 | #include <sys/zio.h> | |
41 | #include <sys/zfs_context.h> | |
42 | #include <sys/fs/zfs.h> | |
b128c09f BB |
43 | #include <sys/zfs_znode.h> |
44 | #include <sys/spa_impl.h> | |
428870ff | 45 | #include <sys/dsl_deadlist.h> |
9ae529ec CS |
46 | #include <sys/bptree.h> |
47 | #include <sys/zfeature.h> | |
29809a6c | 48 | #include <sys/zil_impl.h> |
13fe0198 | 49 | #include <sys/dsl_userhold.h> |
49ee64e5 | 50 | #include <sys/trace_txg.h> |
379ca9cf | 51 | #include <sys/mmp.h> |
34dc7c2f | 52 | |
e8b96c60 MA |
53 | /* |
54 | * ZFS Write Throttle | |
55 | * ------------------ | |
56 | * | |
57 | * ZFS must limit the rate of incoming writes to the rate at which it is able | |
58 | * to sync data modifications to the backend storage. Throttling by too much | |
59 | * creates an artificial limit; throttling by too little can only be sustained | |
60 | * for short periods and would lead to highly lumpy performance. On a per-pool | |
61 | * basis, ZFS tracks the amount of modified (dirty) data. As operations change | |
62 | * data, the amount of dirty data increases; as ZFS syncs out data, the amount | |
63 | * of dirty data decreases. When the amount of dirty data exceeds a | |
64 | * predetermined threshold further modifications are blocked until the amount | |
65 | * of dirty data decreases (as data is synced out). | |
66 | * | |
67 | * The limit on dirty data is tunable, and should be adjusted according to | |
68 | * both the IO capacity and available memory of the system. The larger the | |
69 | * window, the more ZFS is able to aggregate and amortize metadata (and data) | |
70 | * changes. However, memory is a limited resource, and allowing for more dirty | |
71 | * data comes at the cost of keeping other useful data in memory (for example | |
72 | * ZFS data cached by the ARC). | |
73 | * | |
74 | * Implementation | |
75 | * | |
76 | * As buffers are modified dsl_pool_willuse_space() increments both the per- | |
77 | * txg (dp_dirty_pertxg[]) and poolwide (dp_dirty_total) accounting of | |
78 | * dirty space used; dsl_pool_dirty_space() decrements those values as data | |
79 | * is synced out from dsl_pool_sync(). While only the poolwide value is | |
80 | * relevant, the per-txg value is useful for debugging. The tunable | |
81 | * zfs_dirty_data_max determines the dirty space limit. Once that value is | |
82 | * exceeded, new writes are halted until space frees up. | |
83 | * | |
84 | * The zfs_dirty_data_sync tunable dictates the threshold at which we | |
85 | * ensure that there is a txg syncing (see the comment in txg.c for a full | |
86 | * description of transaction group stages). | |
87 | * | |
88 | * The IO scheduler uses both the dirty space limit and current amount of | |
89 | * dirty data as inputs. Those values affect the number of concurrent IOs ZFS | |
90 | * issues. See the comment in vdev_queue.c for details of the IO scheduler. | |
91 | * | |
92 | * The delay is also calculated based on the amount of dirty data. See the | |
93 | * comment above dmu_tx_delay() for details. | |
94 | */ | |
95 | ||
96 | /* | |
97 | * zfs_dirty_data_max will be set to zfs_dirty_data_max_percent% of all memory, | |
98 | * capped at zfs_dirty_data_max_max. It can also be overridden with a module | |
99 | * parameter. | |
100 | */ | |
101 | unsigned long zfs_dirty_data_max = 0; | |
102 | unsigned long zfs_dirty_data_max_max = 0; | |
103 | int zfs_dirty_data_max_percent = 10; | |
104 | int zfs_dirty_data_max_max_percent = 25; | |
b128c09f | 105 | |
e8b96c60 MA |
106 | /* |
107 | * If there is at least this much dirty data, push out a txg. | |
108 | */ | |
109 | unsigned long zfs_dirty_data_sync = 64 * 1024 * 1024; | |
34dc7c2f | 110 | |
e8b96c60 MA |
111 | /* |
112 | * Once there is this amount of dirty data, the dmu_tx_delay() will kick in | |
113 | * and delay each transaction. | |
114 | * This value should be >= zfs_vdev_async_write_active_max_dirty_percent. | |
115 | */ | |
116 | int zfs_delay_min_dirty_percent = 60; | |
b128c09f | 117 | |
e8b96c60 MA |
118 | /* |
119 | * This controls how quickly the delay approaches infinity. | |
120 | * Larger values cause it to delay more for a given amount of dirty data. | |
121 | * Therefore larger values will cause there to be less dirty data for a | |
122 | * given throughput. | |
123 | * | |
124 | * For the smoothest delay, this value should be about 1 billion divided | |
125 | * by the maximum number of operations per second. This will smoothly | |
126 | * handle between 10x and 1/10th this number. | |
127 | * | |
128 | * Note: zfs_delay_scale * zfs_dirty_data_max must be < 2^64, due to the | |
129 | * multiply in dmu_tx_delay(). | |
130 | */ | |
131 | unsigned long zfs_delay_scale = 1000 * 1000 * 1000 / 2000; | |
b128c09f | 132 | |
64fc7762 MA |
133 | /* |
134 | * This determines the number of threads used by the dp_sync_taskq. | |
135 | */ | |
136 | int zfs_sync_taskq_batch_pct = 75; | |
137 | ||
a032ac4b BB |
138 | /* |
139 | * These tunables determine the behavior of how zil_itxg_clean() is | |
140 | * called via zil_clean() in the context of spa_sync(). When an itxg | |
141 | * list needs to be cleaned, TQ_NOSLEEP will be used when dispatching. | |
142 | * If the dispatch fails, the call to zil_itxg_clean() will occur | |
143 | * synchronously in the context of spa_sync(), which can negatively | |
144 | * impact the performance of spa_sync() (e.g. in the case of the itxg | |
145 | * list having a large number of itxs that needs to be cleaned). | |
146 | * | |
147 | * Thus, these tunables can be used to manipulate the behavior of the | |
148 | * taskq used by zil_clean(); they determine the number of taskq entries | |
149 | * that are pre-populated when the taskq is first created (via the | |
150 | * "zfs_zil_clean_taskq_minalloc" tunable) and the maximum number of | |
151 | * taskq entries that are cached after an on-demand allocation (via the | |
152 | * "zfs_zil_clean_taskq_maxalloc"). | |
153 | * | |
154 | * The idea being, we want to try reasonably hard to ensure there will | |
155 | * already be a taskq entry pre-allocated by the time that it is needed | |
156 | * by zil_clean(). This way, we can avoid the possibility of an | |
157 | * on-demand allocation of a new taskq entry from failing, which would | |
158 | * result in zil_itxg_clean() being called synchronously from zil_clean() | |
159 | * (which can adversely affect performance of spa_sync()). | |
160 | * | |
161 | * Additionally, the number of threads used by the taskq can be | |
162 | * configured via the "zfs_zil_clean_taskq_nthr_pct" tunable. | |
163 | */ | |
164 | int zfs_zil_clean_taskq_nthr_pct = 100; | |
165 | int zfs_zil_clean_taskq_minalloc = 1024; | |
166 | int zfs_zil_clean_taskq_maxalloc = 1024 * 1024; | |
167 | ||
428870ff | 168 | int |
b128c09f | 169 | dsl_pool_open_special_dir(dsl_pool_t *dp, const char *name, dsl_dir_t **ddp) |
34dc7c2f BB |
170 | { |
171 | uint64_t obj; | |
172 | int err; | |
173 | ||
174 | err = zap_lookup(dp->dp_meta_objset, | |
d683ddbb | 175 | dsl_dir_phys(dp->dp_root_dir)->dd_child_dir_zapobj, |
b128c09f | 176 | name, sizeof (obj), 1, &obj); |
34dc7c2f BB |
177 | if (err) |
178 | return (err); | |
179 | ||
13fe0198 | 180 | return (dsl_dir_hold_obj(dp, obj, name, dp, ddp)); |
34dc7c2f BB |
181 | } |
182 | ||
183 | static dsl_pool_t * | |
184 | dsl_pool_open_impl(spa_t *spa, uint64_t txg) | |
185 | { | |
186 | dsl_pool_t *dp; | |
187 | blkptr_t *bp = spa_get_rootblkptr(spa); | |
34dc7c2f BB |
188 | |
189 | dp = kmem_zalloc(sizeof (dsl_pool_t), KM_SLEEP); | |
190 | dp->dp_spa = spa; | |
191 | dp->dp_meta_rootbp = *bp; | |
13fe0198 | 192 | rrw_init(&dp->dp_config_rwlock, B_TRUE); |
34dc7c2f | 193 | txg_init(dp, txg); |
379ca9cf | 194 | mmp_init(spa); |
34dc7c2f | 195 | |
4747a7d3 | 196 | txg_list_create(&dp->dp_dirty_datasets, spa, |
34dc7c2f | 197 | offsetof(dsl_dataset_t, ds_dirty_link)); |
4747a7d3 | 198 | txg_list_create(&dp->dp_dirty_zilogs, spa, |
29809a6c | 199 | offsetof(zilog_t, zl_dirty_link)); |
4747a7d3 | 200 | txg_list_create(&dp->dp_dirty_dirs, spa, |
34dc7c2f | 201 | offsetof(dsl_dir_t, dd_dirty_link)); |
4747a7d3 | 202 | txg_list_create(&dp->dp_sync_tasks, spa, |
13fe0198 | 203 | offsetof(dsl_sync_task_t, dst_node)); |
34dc7c2f | 204 | |
64fc7762 MA |
205 | dp->dp_sync_taskq = taskq_create("dp_sync_taskq", |
206 | zfs_sync_taskq_batch_pct, minclsyspri, 1, INT_MAX, | |
207 | TASKQ_THREADS_CPU_PCT); | |
208 | ||
a032ac4b BB |
209 | dp->dp_zil_clean_taskq = taskq_create("dp_zil_clean_taskq", |
210 | zfs_zil_clean_taskq_nthr_pct, minclsyspri, | |
211 | zfs_zil_clean_taskq_minalloc, | |
212 | zfs_zil_clean_taskq_maxalloc, | |
213 | TASKQ_PREPOPULATE | TASKQ_THREADS_CPU_PCT); | |
214 | ||
34dc7c2f | 215 | mutex_init(&dp->dp_lock, NULL, MUTEX_DEFAULT, NULL); |
e8b96c60 | 216 | cv_init(&dp->dp_spaceavail_cv, NULL, CV_DEFAULT, NULL); |
34dc7c2f | 217 | |
1229323d | 218 | dp->dp_iput_taskq = taskq_create("z_iput", max_ncpus, defclsyspri, |
aa9af22c | 219 | max_ncpus * 8, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC); |
9babb374 | 220 | |
34dc7c2f BB |
221 | return (dp); |
222 | } | |
223 | ||
224 | int | |
9ae529ec | 225 | dsl_pool_init(spa_t *spa, uint64_t txg, dsl_pool_t **dpp) |
34dc7c2f BB |
226 | { |
227 | int err; | |
228 | dsl_pool_t *dp = dsl_pool_open_impl(spa, txg); | |
9ae529ec | 229 | |
b7faa7aa G |
230 | /* |
231 | * Initialize the caller's dsl_pool_t structure before we actually open | |
232 | * the meta objset. This is done because a self-healing write zio may | |
233 | * be issued as part of dmu_objset_open_impl() and the spa needs its | |
234 | * dsl_pool_t initialized in order to handle the write. | |
235 | */ | |
236 | *dpp = dp; | |
237 | ||
9ae529ec CS |
238 | err = dmu_objset_open_impl(spa, NULL, &dp->dp_meta_rootbp, |
239 | &dp->dp_meta_objset); | |
b7faa7aa | 240 | if (err != 0) { |
9ae529ec | 241 | dsl_pool_close(dp); |
b7faa7aa G |
242 | *dpp = NULL; |
243 | } | |
9ae529ec CS |
244 | |
245 | return (err); | |
246 | } | |
247 | ||
248 | int | |
249 | dsl_pool_open(dsl_pool_t *dp) | |
250 | { | |
251 | int err; | |
b128c09f BB |
252 | dsl_dir_t *dd; |
253 | dsl_dataset_t *ds; | |
428870ff | 254 | uint64_t obj; |
34dc7c2f | 255 | |
13fe0198 | 256 | rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); |
34dc7c2f BB |
257 | err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, |
258 | DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1, | |
259 | &dp->dp_root_dir_obj); | |
260 | if (err) | |
261 | goto out; | |
262 | ||
13fe0198 | 263 | err = dsl_dir_hold_obj(dp, dp->dp_root_dir_obj, |
34dc7c2f BB |
264 | NULL, dp, &dp->dp_root_dir); |
265 | if (err) | |
266 | goto out; | |
267 | ||
b128c09f | 268 | err = dsl_pool_open_special_dir(dp, MOS_DIR_NAME, &dp->dp_mos_dir); |
34dc7c2f BB |
269 | if (err) |
270 | goto out; | |
271 | ||
9ae529ec | 272 | if (spa_version(dp->dp_spa) >= SPA_VERSION_ORIGIN) { |
b128c09f BB |
273 | err = dsl_pool_open_special_dir(dp, ORIGIN_DIR_NAME, &dd); |
274 | if (err) | |
275 | goto out; | |
d683ddbb JG |
276 | err = dsl_dataset_hold_obj(dp, |
277 | dsl_dir_phys(dd)->dd_head_dataset_obj, FTAG, &ds); | |
9babb374 BB |
278 | if (err == 0) { |
279 | err = dsl_dataset_hold_obj(dp, | |
d683ddbb | 280 | dsl_dataset_phys(ds)->ds_prev_snap_obj, dp, |
9babb374 BB |
281 | &dp->dp_origin_snap); |
282 | dsl_dataset_rele(ds, FTAG); | |
283 | } | |
13fe0198 | 284 | dsl_dir_rele(dd, dp); |
b128c09f BB |
285 | if (err) |
286 | goto out; | |
b128c09f BB |
287 | } |
288 | ||
9ae529ec | 289 | if (spa_version(dp->dp_spa) >= SPA_VERSION_DEADLISTS) { |
428870ff BB |
290 | err = dsl_pool_open_special_dir(dp, FREE_DIR_NAME, |
291 | &dp->dp_free_dir); | |
b128c09f BB |
292 | if (err) |
293 | goto out; | |
428870ff | 294 | |
b128c09f | 295 | err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, |
428870ff | 296 | DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj); |
b128c09f BB |
297 | if (err) |
298 | goto out; | |
13fe0198 | 299 | VERIFY0(bpobj_open(&dp->dp_free_bpobj, |
428870ff | 300 | dp->dp_meta_objset, obj)); |
b128c09f BB |
301 | } |
302 | ||
fbeddd60 MA |
303 | /* |
304 | * Note: errors ignored, because the leak dir will not exist if we | |
305 | * have not encountered a leak yet. | |
306 | */ | |
307 | (void) dsl_pool_open_special_dir(dp, LEAK_DIR_NAME, | |
308 | &dp->dp_leak_dir); | |
309 | ||
fa86b5db | 310 | if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY)) { |
9ae529ec CS |
311 | err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, |
312 | DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1, | |
313 | &dp->dp_bptree_obj); | |
314 | if (err != 0) | |
315 | goto out; | |
316 | } | |
317 | ||
fa86b5db | 318 | if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMPTY_BPOBJ)) { |
753c3839 MA |
319 | err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, |
320 | DMU_POOL_EMPTY_BPOBJ, sizeof (uint64_t), 1, | |
321 | &dp->dp_empty_bpobj); | |
322 | if (err != 0) | |
323 | goto out; | |
324 | } | |
325 | ||
428870ff BB |
326 | err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, |
327 | DMU_POOL_TMP_USERREFS, sizeof (uint64_t), 1, | |
328 | &dp->dp_tmp_userrefs_obj); | |
329 | if (err == ENOENT) | |
330 | err = 0; | |
331 | if (err) | |
332 | goto out; | |
333 | ||
9ae529ec | 334 | err = dsl_scan_init(dp, dp->dp_tx.tx_open_txg); |
428870ff | 335 | |
34dc7c2f | 336 | out: |
13fe0198 | 337 | rrw_exit(&dp->dp_config_rwlock, FTAG); |
34dc7c2f BB |
338 | return (err); |
339 | } | |
340 | ||
341 | void | |
342 | dsl_pool_close(dsl_pool_t *dp) | |
343 | { | |
b128c09f | 344 | /* |
e8b96c60 MA |
345 | * Drop our references from dsl_pool_open(). |
346 | * | |
b128c09f BB |
347 | * Since we held the origin_snap from "syncing" context (which |
348 | * includes pool-opening context), it actually only got a "ref" | |
349 | * and not a hold, so just drop that here. | |
350 | */ | |
351 | if (dp->dp_origin_snap) | |
13fe0198 | 352 | dsl_dataset_rele(dp->dp_origin_snap, dp); |
34dc7c2f | 353 | if (dp->dp_mos_dir) |
13fe0198 | 354 | dsl_dir_rele(dp->dp_mos_dir, dp); |
428870ff | 355 | if (dp->dp_free_dir) |
13fe0198 | 356 | dsl_dir_rele(dp->dp_free_dir, dp); |
fbeddd60 MA |
357 | if (dp->dp_leak_dir) |
358 | dsl_dir_rele(dp->dp_leak_dir, dp); | |
34dc7c2f | 359 | if (dp->dp_root_dir) |
13fe0198 | 360 | dsl_dir_rele(dp->dp_root_dir, dp); |
34dc7c2f | 361 | |
428870ff BB |
362 | bpobj_close(&dp->dp_free_bpobj); |
363 | ||
34dc7c2f BB |
364 | /* undo the dmu_objset_open_impl(mos) from dsl_pool_open() */ |
365 | if (dp->dp_meta_objset) | |
428870ff | 366 | dmu_objset_evict(dp->dp_meta_objset); |
34dc7c2f BB |
367 | |
368 | txg_list_destroy(&dp->dp_dirty_datasets); | |
29809a6c | 369 | txg_list_destroy(&dp->dp_dirty_zilogs); |
428870ff | 370 | txg_list_destroy(&dp->dp_sync_tasks); |
34dc7c2f | 371 | txg_list_destroy(&dp->dp_dirty_dirs); |
34dc7c2f | 372 | |
a032ac4b | 373 | taskq_destroy(dp->dp_zil_clean_taskq); |
64fc7762 MA |
374 | taskq_destroy(dp->dp_sync_taskq); |
375 | ||
ca0bf58d PS |
376 | /* |
377 | * We can't set retry to TRUE since we're explicitly specifying | |
378 | * a spa to flush. This is good enough; any missed buffers for | |
379 | * this spa won't cause trouble, and they'll eventually fall | |
380 | * out of the ARC just like any other unused buffer. | |
381 | */ | |
382 | arc_flush(dp->dp_spa, FALSE); | |
383 | ||
379ca9cf | 384 | mmp_fini(dp->dp_spa); |
34dc7c2f | 385 | txg_fini(dp); |
428870ff | 386 | dsl_scan_fini(dp); |
0c66c32d JG |
387 | dmu_buf_user_evict_wait(); |
388 | ||
13fe0198 | 389 | rrw_destroy(&dp->dp_config_rwlock); |
34dc7c2f | 390 | mutex_destroy(&dp->dp_lock); |
c17486b2 | 391 | cv_destroy(&dp->dp_spaceavail_cv); |
3558fd73 | 392 | taskq_destroy(dp->dp_iput_taskq); |
d4a72f23 TC |
393 | if (dp->dp_blkstats) { |
394 | mutex_destroy(&dp->dp_blkstats->zab_lock); | |
79c76d5b | 395 | vmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t)); |
d4a72f23 | 396 | } |
34dc7c2f BB |
397 | kmem_free(dp, sizeof (dsl_pool_t)); |
398 | } | |
399 | ||
400 | dsl_pool_t * | |
b5256303 TC |
401 | dsl_pool_create(spa_t *spa, nvlist_t *zplprops, dsl_crypto_params_t *dcp, |
402 | uint64_t txg) | |
34dc7c2f BB |
403 | { |
404 | int err; | |
405 | dsl_pool_t *dp = dsl_pool_open_impl(spa, txg); | |
406 | dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg); | |
b128c09f | 407 | dsl_dataset_t *ds; |
428870ff | 408 | uint64_t obj; |
b128c09f | 409 | |
13fe0198 MA |
410 | rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); |
411 | ||
b128c09f | 412 | /* create and open the MOS (meta-objset) */ |
428870ff BB |
413 | dp->dp_meta_objset = dmu_objset_create_impl(spa, |
414 | NULL, &dp->dp_meta_rootbp, DMU_OST_META, tx); | |
b5256303 | 415 | spa->spa_meta_objset = dp->dp_meta_objset; |
34dc7c2f BB |
416 | |
417 | /* create the pool directory */ | |
418 | err = zap_create_claim(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, | |
419 | DMU_OT_OBJECT_DIRECTORY, DMU_OT_NONE, 0, tx); | |
c99c9001 | 420 | ASSERT0(err); |
34dc7c2f | 421 | |
428870ff | 422 | /* Initialize scan structures */ |
13fe0198 | 423 | VERIFY0(dsl_scan_init(dp, txg)); |
428870ff | 424 | |
34dc7c2f | 425 | /* create and open the root dir */ |
b128c09f | 426 | dp->dp_root_dir_obj = dsl_dir_create_sync(dp, NULL, NULL, tx); |
13fe0198 | 427 | VERIFY0(dsl_dir_hold_obj(dp, dp->dp_root_dir_obj, |
34dc7c2f BB |
428 | NULL, dp, &dp->dp_root_dir)); |
429 | ||
430 | /* create and open the meta-objset dir */ | |
b128c09f | 431 | (void) dsl_dir_create_sync(dp, dp->dp_root_dir, MOS_DIR_NAME, tx); |
13fe0198 | 432 | VERIFY0(dsl_pool_open_special_dir(dp, |
b128c09f BB |
433 | MOS_DIR_NAME, &dp->dp_mos_dir)); |
434 | ||
428870ff BB |
435 | if (spa_version(spa) >= SPA_VERSION_DEADLISTS) { |
436 | /* create and open the free dir */ | |
437 | (void) dsl_dir_create_sync(dp, dp->dp_root_dir, | |
438 | FREE_DIR_NAME, tx); | |
13fe0198 | 439 | VERIFY0(dsl_pool_open_special_dir(dp, |
428870ff BB |
440 | FREE_DIR_NAME, &dp->dp_free_dir)); |
441 | ||
442 | /* create and open the free_bplist */ | |
f1512ee6 | 443 | obj = bpobj_alloc(dp->dp_meta_objset, SPA_OLD_MAXBLOCKSIZE, tx); |
428870ff BB |
444 | VERIFY(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, |
445 | DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx) == 0); | |
13fe0198 | 446 | VERIFY0(bpobj_open(&dp->dp_free_bpobj, |
428870ff BB |
447 | dp->dp_meta_objset, obj)); |
448 | } | |
449 | ||
b128c09f BB |
450 | if (spa_version(spa) >= SPA_VERSION_DSL_SCRUB) |
451 | dsl_pool_create_origin(dp, tx); | |
452 | ||
b5256303 TC |
453 | /* |
454 | * Some features may be needed when creating the root dataset, so we | |
455 | * create the feature objects here. | |
456 | */ | |
457 | if (spa_version(spa) >= SPA_VERSION_FEATURES) | |
458 | spa_feature_create_zap_objects(spa, tx); | |
459 | ||
460 | if (dcp != NULL && dcp->cp_crypt != ZIO_CRYPT_OFF && | |
461 | dcp->cp_crypt != ZIO_CRYPT_INHERIT) | |
462 | spa_feature_enable(spa, SPA_FEATURE_ENCRYPTION, tx); | |
463 | ||
b128c09f | 464 | /* create the root dataset */ |
b5256303 | 465 | obj = dsl_dataset_create_sync_dd(dp->dp_root_dir, NULL, dcp, 0, tx); |
b128c09f BB |
466 | |
467 | /* create the root objset */ | |
13fe0198 | 468 | VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG, &ds)); |
b128c09f | 469 | #ifdef _KERNEL |
d8fdfc2d BB |
470 | { |
471 | objset_t *os; | |
472 | rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); | |
473 | os = dmu_objset_create_impl(dp->dp_spa, ds, | |
474 | dsl_dataset_get_blkptr(ds), DMU_OST_ZFS, tx); | |
475 | rrw_exit(&ds->ds_bp_rwlock, FTAG); | |
476 | zfs_create_fs(os, kcred, zplprops, tx); | |
477 | } | |
b128c09f BB |
478 | #endif |
479 | dsl_dataset_rele(ds, FTAG); | |
34dc7c2f BB |
480 | |
481 | dmu_tx_commit(tx); | |
482 | ||
13fe0198 MA |
483 | rrw_exit(&dp->dp_config_rwlock, FTAG); |
484 | ||
34dc7c2f BB |
485 | return (dp); |
486 | } | |
487 | ||
29809a6c MA |
488 | /* |
489 | * Account for the meta-objset space in its placeholder dsl_dir. | |
490 | */ | |
491 | void | |
492 | dsl_pool_mos_diduse_space(dsl_pool_t *dp, | |
493 | int64_t used, int64_t comp, int64_t uncomp) | |
494 | { | |
495 | ASSERT3U(comp, ==, uncomp); /* it's all metadata */ | |
496 | mutex_enter(&dp->dp_lock); | |
497 | dp->dp_mos_used_delta += used; | |
498 | dp->dp_mos_compressed_delta += comp; | |
499 | dp->dp_mos_uncompressed_delta += uncomp; | |
500 | mutex_exit(&dp->dp_lock); | |
501 | } | |
502 | ||
e8b96c60 MA |
503 | static void |
504 | dsl_pool_sync_mos(dsl_pool_t *dp, dmu_tx_t *tx) | |
505 | { | |
506 | zio_t *zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); | |
507 | dmu_objset_sync(dp->dp_meta_objset, zio, tx); | |
508 | VERIFY0(zio_wait(zio)); | |
509 | dprintf_bp(&dp->dp_meta_rootbp, "meta objset rootbp is %s", ""); | |
510 | spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp); | |
511 | } | |
512 | ||
513 | static void | |
514 | dsl_pool_dirty_delta(dsl_pool_t *dp, int64_t delta) | |
515 | { | |
516 | ASSERT(MUTEX_HELD(&dp->dp_lock)); | |
517 | ||
518 | if (delta < 0) | |
519 | ASSERT3U(-delta, <=, dp->dp_dirty_total); | |
520 | ||
521 | dp->dp_dirty_total += delta; | |
522 | ||
523 | /* | |
524 | * Note: we signal even when increasing dp_dirty_total. | |
525 | * This ensures forward progress -- each thread wakes the next waiter. | |
526 | */ | |
c0c8cc7b | 527 | if (dp->dp_dirty_total < zfs_dirty_data_max) |
e8b96c60 MA |
528 | cv_signal(&dp->dp_spaceavail_cv); |
529 | } | |
530 | ||
34dc7c2f BB |
531 | void |
532 | dsl_pool_sync(dsl_pool_t *dp, uint64_t txg) | |
533 | { | |
534 | zio_t *zio; | |
535 | dmu_tx_t *tx; | |
536 | dsl_dir_t *dd; | |
537 | dsl_dataset_t *ds; | |
428870ff | 538 | objset_t *mos = dp->dp_meta_objset; |
29809a6c MA |
539 | list_t synced_datasets; |
540 | ||
541 | list_create(&synced_datasets, sizeof (dsl_dataset_t), | |
542 | offsetof(dsl_dataset_t, ds_synced_link)); | |
34dc7c2f BB |
543 | |
544 | tx = dmu_tx_create_assigned(dp, txg); | |
545 | ||
e8b96c60 MA |
546 | /* |
547 | * Write out all dirty blocks of dirty datasets. | |
548 | */ | |
34dc7c2f | 549 | zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); |
e8b96c60 | 550 | while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) { |
9babb374 BB |
551 | /* |
552 | * We must not sync any non-MOS datasets twice, because | |
553 | * we may have taken a snapshot of them. However, we | |
554 | * may sync newly-created datasets on pass 2. | |
555 | */ | |
556 | ASSERT(!list_link_active(&ds->ds_synced_link)); | |
29809a6c | 557 | list_insert_tail(&synced_datasets, ds); |
34dc7c2f BB |
558 | dsl_dataset_sync(ds, zio, tx); |
559 | } | |
e8b96c60 | 560 | VERIFY0(zio_wait(zio)); |
9babb374 | 561 | |
e8b96c60 MA |
562 | /* |
563 | * We have written all of the accounted dirty data, so our | |
564 | * dp_space_towrite should now be zero. However, some seldom-used | |
565 | * code paths do not adhere to this (e.g. dbuf_undirty(), also | |
566 | * rounding error in dbuf_write_physdone). | |
567 | * Shore up the accounting of any dirtied space now. | |
568 | */ | |
569 | dsl_pool_undirty_space(dp, dp->dp_dirty_pertxg[txg & TXG_MASK], txg); | |
34dc7c2f | 570 | |
539d33c7 GM |
571 | /* |
572 | * Update the long range free counter after | |
573 | * we're done syncing user data | |
574 | */ | |
575 | mutex_enter(&dp->dp_lock); | |
576 | ASSERT(spa_sync_pass(dp->dp_spa) == 1 || | |
577 | dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] == 0); | |
578 | dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] = 0; | |
579 | mutex_exit(&dp->dp_lock); | |
580 | ||
29809a6c MA |
581 | /* |
582 | * After the data blocks have been written (ensured by the zio_wait() | |
9c5167d1 | 583 | * above), update the user/group/project space accounting. This happens |
64fc7762 MA |
584 | * in tasks dispatched to dp_sync_taskq, so wait for them before |
585 | * continuing. | |
29809a6c | 586 | */ |
e8b96c60 MA |
587 | for (ds = list_head(&synced_datasets); ds != NULL; |
588 | ds = list_next(&synced_datasets, ds)) { | |
428870ff | 589 | dmu_objset_do_userquota_updates(ds->ds_objset, tx); |
e8b96c60 | 590 | } |
64fc7762 | 591 | taskq_wait(dp->dp_sync_taskq); |
9babb374 BB |
592 | |
593 | /* | |
594 | * Sync the datasets again to push out the changes due to | |
428870ff | 595 | * userspace updates. This must be done before we process the |
29809a6c MA |
596 | * sync tasks, so that any snapshots will have the correct |
597 | * user accounting information (and we won't get confused | |
598 | * about which blocks are part of the snapshot). | |
9babb374 BB |
599 | */ |
600 | zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); | |
e8b96c60 | 601 | while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) { |
9babb374 BB |
602 | ASSERT(list_link_active(&ds->ds_synced_link)); |
603 | dmu_buf_rele(ds->ds_dbuf, ds); | |
604 | dsl_dataset_sync(ds, zio, tx); | |
605 | } | |
e8b96c60 | 606 | VERIFY0(zio_wait(zio)); |
9babb374 | 607 | |
428870ff | 608 | /* |
29809a6c MA |
609 | * Now that the datasets have been completely synced, we can |
610 | * clean up our in-memory structures accumulated while syncing: | |
611 | * | |
612 | * - move dead blocks from the pending deadlist to the on-disk deadlist | |
29809a6c | 613 | * - release hold from dsl_dataset_dirty() |
428870ff | 614 | */ |
e8b96c60 | 615 | while ((ds = list_remove_head(&synced_datasets)) != NULL) { |
0efd9791 | 616 | dsl_dataset_sync_done(ds, tx); |
428870ff BB |
617 | } |
618 | ||
e8b96c60 | 619 | while ((dd = txg_list_remove(&dp->dp_dirty_dirs, txg)) != NULL) { |
34dc7c2f | 620 | dsl_dir_sync(dd, tx); |
e8b96c60 | 621 | } |
b128c09f | 622 | |
29809a6c MA |
623 | /* |
624 | * The MOS's space is accounted for in the pool/$MOS | |
625 | * (dp_mos_dir). We can't modify the mos while we're syncing | |
626 | * it, so we remember the deltas and apply them here. | |
627 | */ | |
628 | if (dp->dp_mos_used_delta != 0 || dp->dp_mos_compressed_delta != 0 || | |
629 | dp->dp_mos_uncompressed_delta != 0) { | |
630 | dsl_dir_diduse_space(dp->dp_mos_dir, DD_USED_HEAD, | |
631 | dp->dp_mos_used_delta, | |
632 | dp->dp_mos_compressed_delta, | |
633 | dp->dp_mos_uncompressed_delta, tx); | |
634 | dp->dp_mos_used_delta = 0; | |
635 | dp->dp_mos_compressed_delta = 0; | |
636 | dp->dp_mos_uncompressed_delta = 0; | |
637 | } | |
638 | ||
64fc7762 | 639 | if (!multilist_is_empty(mos->os_dirty_dnodes[txg & TXG_MASK])) { |
e8b96c60 | 640 | dsl_pool_sync_mos(dp, tx); |
34dc7c2f BB |
641 | } |
642 | ||
29809a6c MA |
643 | /* |
644 | * If we modify a dataset in the same txg that we want to destroy it, | |
645 | * its dsl_dir's dd_dbuf will be dirty, and thus have a hold on it. | |
646 | * dsl_dir_destroy_check() will fail if there are unexpected holds. | |
647 | * Therefore, we want to sync the MOS (thus syncing the dd_dbuf | |
648 | * and clearing the hold on it) before we process the sync_tasks. | |
649 | * The MOS data dirtied by the sync_tasks will be synced on the next | |
650 | * pass. | |
651 | */ | |
29809a6c | 652 | if (!txg_list_empty(&dp->dp_sync_tasks, txg)) { |
13fe0198 | 653 | dsl_sync_task_t *dst; |
29809a6c MA |
654 | /* |
655 | * No more sync tasks should have been added while we | |
656 | * were syncing. | |
657 | */ | |
e8b96c60 MA |
658 | ASSERT3U(spa_sync_pass(dp->dp_spa), ==, 1); |
659 | while ((dst = txg_list_remove(&dp->dp_sync_tasks, txg)) != NULL) | |
13fe0198 | 660 | dsl_sync_task_sync(dst, tx); |
29809a6c MA |
661 | } |
662 | ||
34dc7c2f | 663 | dmu_tx_commit(tx); |
b128c09f | 664 | |
e8b96c60 | 665 | DTRACE_PROBE2(dsl_pool_sync__done, dsl_pool_t *dp, dp, uint64_t, txg); |
34dc7c2f BB |
666 | } |
667 | ||
668 | void | |
428870ff | 669 | dsl_pool_sync_done(dsl_pool_t *dp, uint64_t txg) |
34dc7c2f | 670 | { |
29809a6c | 671 | zilog_t *zilog; |
34dc7c2f | 672 | |
55922e73 | 673 | while ((zilog = txg_list_head(&dp->dp_dirty_zilogs, txg))) { |
e8b96c60 | 674 | dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os); |
55922e73 GW |
675 | /* |
676 | * We don't remove the zilog from the dp_dirty_zilogs | |
677 | * list until after we've cleaned it. This ensures that | |
678 | * callers of zilog_is_dirty() receive an accurate | |
679 | * answer when they are racing with the spa sync thread. | |
680 | */ | |
29809a6c | 681 | zil_clean(zilog, txg); |
55922e73 | 682 | (void) txg_list_remove_this(&dp->dp_dirty_zilogs, zilog, txg); |
29809a6c MA |
683 | ASSERT(!dmu_objset_is_dirty(zilog->zl_os, txg)); |
684 | dmu_buf_rele(ds->ds_dbuf, zilog); | |
34dc7c2f | 685 | } |
428870ff | 686 | ASSERT(!dmu_objset_is_dirty(dp->dp_meta_objset, txg)); |
34dc7c2f BB |
687 | } |
688 | ||
689 | /* | |
690 | * TRUE if the current thread is the tx_sync_thread or if we | |
691 | * are being called from SPA context during pool initialization. | |
692 | */ | |
693 | int | |
694 | dsl_pool_sync_context(dsl_pool_t *dp) | |
695 | { | |
696 | return (curthread == dp->dp_tx.tx_sync_thread || | |
64fc7762 MA |
697 | spa_is_initializing(dp->dp_spa) || |
698 | taskq_member(dp->dp_sync_taskq, curthread)); | |
34dc7c2f BB |
699 | } |
700 | ||
701 | uint64_t | |
702 | dsl_pool_adjustedsize(dsl_pool_t *dp, boolean_t netfree) | |
703 | { | |
704 | uint64_t space, resv; | |
705 | ||
706 | /* | |
34dc7c2f BB |
707 | * If we're trying to assess whether it's OK to do a free, |
708 | * cut the reservation in half to allow forward progress | |
709 | * (e.g. make it possible to rm(1) files from a full pool). | |
710 | */ | |
711 | space = spa_get_dspace(dp->dp_spa); | |
0c60cc32 | 712 | resv = spa_get_slop_space(dp->dp_spa); |
34dc7c2f BB |
713 | if (netfree) |
714 | resv >>= 1; | |
715 | ||
716 | return (space - resv); | |
717 | } | |
718 | ||
e8b96c60 MA |
719 | boolean_t |
720 | dsl_pool_need_dirty_delay(dsl_pool_t *dp) | |
34dc7c2f | 721 | { |
e8b96c60 MA |
722 | uint64_t delay_min_bytes = |
723 | zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100; | |
724 | boolean_t rv; | |
34dc7c2f | 725 | |
e8b96c60 MA |
726 | mutex_enter(&dp->dp_lock); |
727 | if (dp->dp_dirty_total > zfs_dirty_data_sync) | |
728 | txg_kick(dp); | |
729 | rv = (dp->dp_dirty_total > delay_min_bytes); | |
730 | mutex_exit(&dp->dp_lock); | |
731 | return (rv); | |
34dc7c2f BB |
732 | } |
733 | ||
734 | void | |
e8b96c60 | 735 | dsl_pool_dirty_space(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx) |
34dc7c2f | 736 | { |
e8b96c60 MA |
737 | if (space > 0) { |
738 | mutex_enter(&dp->dp_lock); | |
739 | dp->dp_dirty_pertxg[tx->tx_txg & TXG_MASK] += space; | |
740 | dsl_pool_dirty_delta(dp, space); | |
741 | mutex_exit(&dp->dp_lock); | |
742 | } | |
34dc7c2f BB |
743 | } |
744 | ||
745 | void | |
e8b96c60 | 746 | dsl_pool_undirty_space(dsl_pool_t *dp, int64_t space, uint64_t txg) |
34dc7c2f | 747 | { |
e8b96c60 MA |
748 | ASSERT3S(space, >=, 0); |
749 | if (space == 0) | |
34dc7c2f BB |
750 | return; |
751 | ||
e8b96c60 MA |
752 | mutex_enter(&dp->dp_lock); |
753 | if (dp->dp_dirty_pertxg[txg & TXG_MASK] < space) { | |
754 | /* XXX writing something we didn't dirty? */ | |
755 | space = dp->dp_dirty_pertxg[txg & TXG_MASK]; | |
34dc7c2f | 756 | } |
e8b96c60 MA |
757 | ASSERT3U(dp->dp_dirty_pertxg[txg & TXG_MASK], >=, space); |
758 | dp->dp_dirty_pertxg[txg & TXG_MASK] -= space; | |
759 | ASSERT3U(dp->dp_dirty_total, >=, space); | |
760 | dsl_pool_dirty_delta(dp, -space); | |
761 | mutex_exit(&dp->dp_lock); | |
34dc7c2f | 762 | } |
b128c09f BB |
763 | |
764 | /* ARGSUSED */ | |
765 | static int | |
13fe0198 | 766 | upgrade_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) |
b128c09f BB |
767 | { |
768 | dmu_tx_t *tx = arg; | |
769 | dsl_dataset_t *ds, *prev = NULL; | |
770 | int err; | |
b128c09f | 771 | |
13fe0198 | 772 | err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds); |
b128c09f BB |
773 | if (err) |
774 | return (err); | |
775 | ||
d683ddbb JG |
776 | while (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) { |
777 | err = dsl_dataset_hold_obj(dp, | |
778 | dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev); | |
b128c09f BB |
779 | if (err) { |
780 | dsl_dataset_rele(ds, FTAG); | |
781 | return (err); | |
782 | } | |
783 | ||
d683ddbb | 784 | if (dsl_dataset_phys(prev)->ds_next_snap_obj != ds->ds_object) |
b128c09f BB |
785 | break; |
786 | dsl_dataset_rele(ds, FTAG); | |
787 | ds = prev; | |
788 | prev = NULL; | |
789 | } | |
790 | ||
791 | if (prev == NULL) { | |
792 | prev = dp->dp_origin_snap; | |
793 | ||
794 | /* | |
795 | * The $ORIGIN can't have any data, or the accounting | |
796 | * will be wrong. | |
797 | */ | |
cc9bb3e5 | 798 | rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); |
d683ddbb | 799 | ASSERT0(dsl_dataset_phys(prev)->ds_bp.blk_birth); |
cc9bb3e5 | 800 | rrw_exit(&ds->ds_bp_rwlock, FTAG); |
b128c09f BB |
801 | |
802 | /* The origin doesn't get attached to itself */ | |
803 | if (ds->ds_object == prev->ds_object) { | |
804 | dsl_dataset_rele(ds, FTAG); | |
805 | return (0); | |
806 | } | |
807 | ||
808 | dmu_buf_will_dirty(ds->ds_dbuf, tx); | |
d683ddbb JG |
809 | dsl_dataset_phys(ds)->ds_prev_snap_obj = prev->ds_object; |
810 | dsl_dataset_phys(ds)->ds_prev_snap_txg = | |
811 | dsl_dataset_phys(prev)->ds_creation_txg; | |
b128c09f BB |
812 | |
813 | dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx); | |
d683ddbb | 814 | dsl_dir_phys(ds->ds_dir)->dd_origin_obj = prev->ds_object; |
b128c09f BB |
815 | |
816 | dmu_buf_will_dirty(prev->ds_dbuf, tx); | |
d683ddbb | 817 | dsl_dataset_phys(prev)->ds_num_children++; |
b128c09f | 818 | |
d683ddbb | 819 | if (dsl_dataset_phys(ds)->ds_next_snap_obj == 0) { |
b128c09f | 820 | ASSERT(ds->ds_prev == NULL); |
13fe0198 | 821 | VERIFY0(dsl_dataset_hold_obj(dp, |
d683ddbb JG |
822 | dsl_dataset_phys(ds)->ds_prev_snap_obj, |
823 | ds, &ds->ds_prev)); | |
b128c09f BB |
824 | } |
825 | } | |
826 | ||
d683ddbb JG |
827 | ASSERT3U(dsl_dir_phys(ds->ds_dir)->dd_origin_obj, ==, prev->ds_object); |
828 | ASSERT3U(dsl_dataset_phys(ds)->ds_prev_snap_obj, ==, prev->ds_object); | |
b128c09f | 829 | |
d683ddbb | 830 | if (dsl_dataset_phys(prev)->ds_next_clones_obj == 0) { |
428870ff | 831 | dmu_buf_will_dirty(prev->ds_dbuf, tx); |
d683ddbb | 832 | dsl_dataset_phys(prev)->ds_next_clones_obj = |
b128c09f BB |
833 | zap_create(dp->dp_meta_objset, |
834 | DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx); | |
835 | } | |
13fe0198 | 836 | VERIFY0(zap_add_int(dp->dp_meta_objset, |
d683ddbb | 837 | dsl_dataset_phys(prev)->ds_next_clones_obj, ds->ds_object, tx)); |
b128c09f BB |
838 | |
839 | dsl_dataset_rele(ds, FTAG); | |
840 | if (prev != dp->dp_origin_snap) | |
841 | dsl_dataset_rele(prev, FTAG); | |
842 | return (0); | |
843 | } | |
844 | ||
845 | void | |
846 | dsl_pool_upgrade_clones(dsl_pool_t *dp, dmu_tx_t *tx) | |
847 | { | |
848 | ASSERT(dmu_tx_is_syncing(tx)); | |
849 | ASSERT(dp->dp_origin_snap != NULL); | |
850 | ||
13fe0198 | 851 | VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, upgrade_clones_cb, |
9c43027b | 852 | tx, DS_FIND_CHILDREN | DS_FIND_SERIALIZE)); |
428870ff BB |
853 | } |
854 | ||
855 | /* ARGSUSED */ | |
856 | static int | |
13fe0198 | 857 | upgrade_dir_clones_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg) |
428870ff BB |
858 | { |
859 | dmu_tx_t *tx = arg; | |
428870ff BB |
860 | objset_t *mos = dp->dp_meta_objset; |
861 | ||
d683ddbb | 862 | if (dsl_dir_phys(ds->ds_dir)->dd_origin_obj != 0) { |
428870ff BB |
863 | dsl_dataset_t *origin; |
864 | ||
13fe0198 | 865 | VERIFY0(dsl_dataset_hold_obj(dp, |
d683ddbb | 866 | dsl_dir_phys(ds->ds_dir)->dd_origin_obj, FTAG, &origin)); |
428870ff | 867 | |
d683ddbb | 868 | if (dsl_dir_phys(origin->ds_dir)->dd_clones == 0) { |
428870ff | 869 | dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx); |
d683ddbb JG |
870 | dsl_dir_phys(origin->ds_dir)->dd_clones = |
871 | zap_create(mos, DMU_OT_DSL_CLONES, DMU_OT_NONE, | |
872 | 0, tx); | |
428870ff BB |
873 | } |
874 | ||
13fe0198 | 875 | VERIFY0(zap_add_int(dp->dp_meta_objset, |
d683ddbb JG |
876 | dsl_dir_phys(origin->ds_dir)->dd_clones, |
877 | ds->ds_object, tx)); | |
428870ff BB |
878 | |
879 | dsl_dataset_rele(origin, FTAG); | |
880 | } | |
428870ff BB |
881 | return (0); |
882 | } | |
883 | ||
884 | void | |
885 | dsl_pool_upgrade_dir_clones(dsl_pool_t *dp, dmu_tx_t *tx) | |
886 | { | |
428870ff BB |
887 | uint64_t obj; |
888 | ||
d6320ddb BB |
889 | ASSERT(dmu_tx_is_syncing(tx)); |
890 | ||
428870ff | 891 | (void) dsl_dir_create_sync(dp, dp->dp_root_dir, FREE_DIR_NAME, tx); |
13fe0198 | 892 | VERIFY0(dsl_pool_open_special_dir(dp, |
428870ff BB |
893 | FREE_DIR_NAME, &dp->dp_free_dir)); |
894 | ||
895 | /* | |
896 | * We can't use bpobj_alloc(), because spa_version() still | |
897 | * returns the old version, and we need a new-version bpobj with | |
898 | * subobj support. So call dmu_object_alloc() directly. | |
899 | */ | |
900 | obj = dmu_object_alloc(dp->dp_meta_objset, DMU_OT_BPOBJ, | |
f1512ee6 | 901 | SPA_OLD_MAXBLOCKSIZE, DMU_OT_BPOBJ_HDR, sizeof (bpobj_phys_t), tx); |
13fe0198 | 902 | VERIFY0(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, |
428870ff | 903 | DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx)); |
13fe0198 | 904 | VERIFY0(bpobj_open(&dp->dp_free_bpobj, dp->dp_meta_objset, obj)); |
428870ff | 905 | |
13fe0198 | 906 | VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, |
9c43027b | 907 | upgrade_dir_clones_cb, tx, DS_FIND_CHILDREN | DS_FIND_SERIALIZE)); |
b128c09f BB |
908 | } |
909 | ||
910 | void | |
911 | dsl_pool_create_origin(dsl_pool_t *dp, dmu_tx_t *tx) | |
912 | { | |
913 | uint64_t dsobj; | |
914 | dsl_dataset_t *ds; | |
915 | ||
916 | ASSERT(dmu_tx_is_syncing(tx)); | |
917 | ASSERT(dp->dp_origin_snap == NULL); | |
13fe0198 | 918 | ASSERT(rrw_held(&dp->dp_config_rwlock, RW_WRITER)); |
b128c09f BB |
919 | |
920 | /* create the origin dir, ds, & snap-ds */ | |
b128c09f | 921 | dsobj = dsl_dataset_create_sync(dp->dp_root_dir, ORIGIN_DIR_NAME, |
b5256303 | 922 | NULL, 0, kcred, NULL, tx); |
13fe0198 MA |
923 | VERIFY0(dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); |
924 | dsl_dataset_snapshot_sync_impl(ds, ORIGIN_DIR_NAME, tx); | |
d683ddbb | 925 | VERIFY0(dsl_dataset_hold_obj(dp, dsl_dataset_phys(ds)->ds_prev_snap_obj, |
b128c09f BB |
926 | dp, &dp->dp_origin_snap)); |
927 | dsl_dataset_rele(ds, FTAG); | |
b128c09f | 928 | } |
9babb374 BB |
929 | |
930 | taskq_t * | |
3558fd73 | 931 | dsl_pool_iput_taskq(dsl_pool_t *dp) |
9babb374 | 932 | { |
3558fd73 | 933 | return (dp->dp_iput_taskq); |
9babb374 | 934 | } |
428870ff BB |
935 | |
936 | /* | |
937 | * Walk through the pool-wide zap object of temporary snapshot user holds | |
938 | * and release them. | |
939 | */ | |
940 | void | |
941 | dsl_pool_clean_tmp_userrefs(dsl_pool_t *dp) | |
942 | { | |
943 | zap_attribute_t za; | |
944 | zap_cursor_t zc; | |
945 | objset_t *mos = dp->dp_meta_objset; | |
946 | uint64_t zapobj = dp->dp_tmp_userrefs_obj; | |
95fd54a1 | 947 | nvlist_t *holds; |
428870ff BB |
948 | |
949 | if (zapobj == 0) | |
950 | return; | |
951 | ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS); | |
952 | ||
95fd54a1 SH |
953 | holds = fnvlist_alloc(); |
954 | ||
428870ff BB |
955 | for (zap_cursor_init(&zc, mos, zapobj); |
956 | zap_cursor_retrieve(&zc, &za) == 0; | |
957 | zap_cursor_advance(&zc)) { | |
958 | char *htag; | |
95fd54a1 | 959 | nvlist_t *tags; |
428870ff BB |
960 | |
961 | htag = strchr(za.za_name, '-'); | |
962 | *htag = '\0'; | |
963 | ++htag; | |
95fd54a1 SH |
964 | if (nvlist_lookup_nvlist(holds, za.za_name, &tags) != 0) { |
965 | tags = fnvlist_alloc(); | |
966 | fnvlist_add_boolean(tags, htag); | |
967 | fnvlist_add_nvlist(holds, za.za_name, tags); | |
968 | fnvlist_free(tags); | |
969 | } else { | |
970 | fnvlist_add_boolean(tags, htag); | |
971 | } | |
428870ff | 972 | } |
95fd54a1 SH |
973 | dsl_dataset_user_release_tmp(dp, holds); |
974 | fnvlist_free(holds); | |
428870ff BB |
975 | zap_cursor_fini(&zc); |
976 | } | |
977 | ||
978 | /* | |
979 | * Create the pool-wide zap object for storing temporary snapshot holds. | |
980 | */ | |
981 | void | |
982 | dsl_pool_user_hold_create_obj(dsl_pool_t *dp, dmu_tx_t *tx) | |
983 | { | |
984 | objset_t *mos = dp->dp_meta_objset; | |
985 | ||
986 | ASSERT(dp->dp_tmp_userrefs_obj == 0); | |
987 | ASSERT(dmu_tx_is_syncing(tx)); | |
988 | ||
9ae529ec CS |
989 | dp->dp_tmp_userrefs_obj = zap_create_link(mos, DMU_OT_USERREFS, |
990 | DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_TMP_USERREFS, tx); | |
428870ff BB |
991 | } |
992 | ||
993 | static int | |
994 | dsl_pool_user_hold_rele_impl(dsl_pool_t *dp, uint64_t dsobj, | |
13fe0198 | 995 | const char *tag, uint64_t now, dmu_tx_t *tx, boolean_t holding) |
428870ff BB |
996 | { |
997 | objset_t *mos = dp->dp_meta_objset; | |
998 | uint64_t zapobj = dp->dp_tmp_userrefs_obj; | |
999 | char *name; | |
1000 | int error; | |
1001 | ||
1002 | ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS); | |
1003 | ASSERT(dmu_tx_is_syncing(tx)); | |
1004 | ||
1005 | /* | |
1006 | * If the pool was created prior to SPA_VERSION_USERREFS, the | |
1007 | * zap object for temporary holds might not exist yet. | |
1008 | */ | |
1009 | if (zapobj == 0) { | |
1010 | if (holding) { | |
1011 | dsl_pool_user_hold_create_obj(dp, tx); | |
1012 | zapobj = dp->dp_tmp_userrefs_obj; | |
1013 | } else { | |
2e528b49 | 1014 | return (SET_ERROR(ENOENT)); |
428870ff BB |
1015 | } |
1016 | } | |
1017 | ||
1018 | name = kmem_asprintf("%llx-%s", (u_longlong_t)dsobj, tag); | |
1019 | if (holding) | |
13fe0198 | 1020 | error = zap_add(mos, zapobj, name, 8, 1, &now, tx); |
428870ff BB |
1021 | else |
1022 | error = zap_remove(mos, zapobj, name, tx); | |
1023 | strfree(name); | |
1024 | ||
1025 | return (error); | |
1026 | } | |
1027 | ||
1028 | /* | |
1029 | * Add a temporary hold for the given dataset object and tag. | |
1030 | */ | |
1031 | int | |
1032 | dsl_pool_user_hold(dsl_pool_t *dp, uint64_t dsobj, const char *tag, | |
13fe0198 | 1033 | uint64_t now, dmu_tx_t *tx) |
428870ff BB |
1034 | { |
1035 | return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, now, tx, B_TRUE)); | |
1036 | } | |
1037 | ||
1038 | /* | |
1039 | * Release a temporary hold for the given dataset object and tag. | |
1040 | */ | |
1041 | int | |
1042 | dsl_pool_user_release(dsl_pool_t *dp, uint64_t dsobj, const char *tag, | |
1043 | dmu_tx_t *tx) | |
1044 | { | |
13fe0198 | 1045 | return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, 0, |
428870ff BB |
1046 | tx, B_FALSE)); |
1047 | } | |
c409e464 | 1048 | |
13fe0198 MA |
1049 | /* |
1050 | * DSL Pool Configuration Lock | |
1051 | * | |
1052 | * The dp_config_rwlock protects against changes to DSL state (e.g. dataset | |
1053 | * creation / destruction / rename / property setting). It must be held for | |
1054 | * read to hold a dataset or dsl_dir. I.e. you must call | |
1055 | * dsl_pool_config_enter() or dsl_pool_hold() before calling | |
1056 | * dsl_{dataset,dir}_hold{_obj}. In most circumstances, the dp_config_rwlock | |
1057 | * must be held continuously until all datasets and dsl_dirs are released. | |
1058 | * | |
1059 | * The only exception to this rule is that if a "long hold" is placed on | |
1060 | * a dataset, then the dp_config_rwlock may be dropped while the dataset | |
1061 | * is still held. The long hold will prevent the dataset from being | |
1062 | * destroyed -- the destroy will fail with EBUSY. A long hold can be | |
1063 | * obtained by calling dsl_dataset_long_hold(), or by "owning" a dataset | |
1064 | * (by calling dsl_{dataset,objset}_{try}own{_obj}). | |
1065 | * | |
1066 | * Legitimate long-holders (including owners) should be long-running, cancelable | |
1067 | * tasks that should cause "zfs destroy" to fail. This includes DMU | |
1068 | * consumers (i.e. a ZPL filesystem being mounted or ZVOL being open), | |
1069 | * "zfs send", and "zfs diff". There are several other long-holders whose | |
1070 | * uses are suboptimal (e.g. "zfs promote", and zil_suspend()). | |
1071 | * | |
1072 | * The usual formula for long-holding would be: | |
1073 | * dsl_pool_hold() | |
1074 | * dsl_dataset_hold() | |
1075 | * ... perform checks ... | |
1076 | * dsl_dataset_long_hold() | |
1077 | * dsl_pool_rele() | |
1078 | * ... perform long-running task ... | |
1079 | * dsl_dataset_long_rele() | |
1080 | * dsl_dataset_rele() | |
1081 | * | |
1082 | * Note that when the long hold is released, the dataset is still held but | |
1083 | * the pool is not held. The dataset may change arbitrarily during this time | |
1084 | * (e.g. it could be destroyed). Therefore you shouldn't do anything to the | |
1085 | * dataset except release it. | |
1086 | * | |
1087 | * User-initiated operations (e.g. ioctls, zfs_ioc_*()) are either read-only | |
1088 | * or modifying operations. | |
1089 | * | |
1090 | * Modifying operations should generally use dsl_sync_task(). The synctask | |
1091 | * infrastructure enforces proper locking strategy with respect to the | |
1092 | * dp_config_rwlock. See the comment above dsl_sync_task() for details. | |
1093 | * | |
1094 | * Read-only operations will manually hold the pool, then the dataset, obtain | |
1095 | * information from the dataset, then release the pool and dataset. | |
1096 | * dmu_objset_{hold,rele}() are convenience routines that also do the pool | |
1097 | * hold/rele. | |
1098 | */ | |
1099 | ||
1100 | int | |
1101 | dsl_pool_hold(const char *name, void *tag, dsl_pool_t **dp) | |
1102 | { | |
1103 | spa_t *spa; | |
1104 | int error; | |
1105 | ||
1106 | error = spa_open(name, &spa, tag); | |
1107 | if (error == 0) { | |
1108 | *dp = spa_get_dsl(spa); | |
1109 | dsl_pool_config_enter(*dp, tag); | |
1110 | } | |
1111 | return (error); | |
1112 | } | |
1113 | ||
1114 | void | |
1115 | dsl_pool_rele(dsl_pool_t *dp, void *tag) | |
1116 | { | |
1117 | dsl_pool_config_exit(dp, tag); | |
1118 | spa_close(dp->dp_spa, tag); | |
1119 | } | |
1120 | ||
1121 | void | |
1122 | dsl_pool_config_enter(dsl_pool_t *dp, void *tag) | |
1123 | { | |
1124 | /* | |
1125 | * We use a "reentrant" reader-writer lock, but not reentrantly. | |
1126 | * | |
1127 | * The rrwlock can (with the track_all flag) track all reading threads, | |
1128 | * which is very useful for debugging which code path failed to release | |
1129 | * the lock, and for verifying that the *current* thread does hold | |
1130 | * the lock. | |
1131 | * | |
1132 | * (Unlike a rwlock, which knows that N threads hold it for | |
1133 | * read, but not *which* threads, so rw_held(RW_READER) returns TRUE | |
1134 | * if any thread holds it for read, even if this thread doesn't). | |
1135 | */ | |
1136 | ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER)); | |
1137 | rrw_enter(&dp->dp_config_rwlock, RW_READER, tag); | |
1138 | } | |
1139 | ||
5e8cd5d1 AJ |
1140 | void |
1141 | dsl_pool_config_enter_prio(dsl_pool_t *dp, void *tag) | |
1142 | { | |
1143 | ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER)); | |
1144 | rrw_enter_read_prio(&dp->dp_config_rwlock, tag); | |
1145 | } | |
1146 | ||
13fe0198 MA |
1147 | void |
1148 | dsl_pool_config_exit(dsl_pool_t *dp, void *tag) | |
1149 | { | |
1150 | rrw_exit(&dp->dp_config_rwlock, tag); | |
1151 | } | |
1152 | ||
1153 | boolean_t | |
1154 | dsl_pool_config_held(dsl_pool_t *dp) | |
1155 | { | |
1156 | return (RRW_LOCK_HELD(&dp->dp_config_rwlock)); | |
1157 | } | |
1158 | ||
9c43027b AJ |
1159 | boolean_t |
1160 | dsl_pool_config_held_writer(dsl_pool_t *dp) | |
1161 | { | |
1162 | return (RRW_WRITE_HELD(&dp->dp_config_rwlock)); | |
1163 | } | |
1164 | ||
c409e464 | 1165 | #if defined(_KERNEL) && defined(HAVE_SPL) |
40a806df NB |
1166 | EXPORT_SYMBOL(dsl_pool_config_enter); |
1167 | EXPORT_SYMBOL(dsl_pool_config_exit); | |
1168 | ||
02730c33 | 1169 | /* BEGIN CSTYLED */ |
d1d7e268 | 1170 | /* zfs_dirty_data_max_percent only applied at module load in arc_init(). */ |
e8b96c60 MA |
1171 | module_param(zfs_dirty_data_max_percent, int, 0444); |
1172 | MODULE_PARM_DESC(zfs_dirty_data_max_percent, "percent of ram can be dirty"); | |
c409e464 | 1173 | |
d1d7e268 | 1174 | /* zfs_dirty_data_max_max_percent only applied at module load in arc_init(). */ |
e8b96c60 MA |
1175 | module_param(zfs_dirty_data_max_max_percent, int, 0444); |
1176 | MODULE_PARM_DESC(zfs_dirty_data_max_max_percent, | |
d1d7e268 | 1177 | "zfs_dirty_data_max upper bound as % of RAM"); |
c409e464 | 1178 | |
e8b96c60 MA |
1179 | module_param(zfs_delay_min_dirty_percent, int, 0644); |
1180 | MODULE_PARM_DESC(zfs_delay_min_dirty_percent, "transaction delay threshold"); | |
c409e464 | 1181 | |
e8b96c60 MA |
1182 | module_param(zfs_dirty_data_max, ulong, 0644); |
1183 | MODULE_PARM_DESC(zfs_dirty_data_max, "determines the dirty space limit"); | |
c409e464 | 1184 | |
d1d7e268 | 1185 | /* zfs_dirty_data_max_max only applied at module load in arc_init(). */ |
e8b96c60 MA |
1186 | module_param(zfs_dirty_data_max_max, ulong, 0444); |
1187 | MODULE_PARM_DESC(zfs_dirty_data_max_max, | |
d1d7e268 | 1188 | "zfs_dirty_data_max upper bound in bytes"); |
c409e464 | 1189 | |
e8b96c60 MA |
1190 | module_param(zfs_dirty_data_sync, ulong, 0644); |
1191 | MODULE_PARM_DESC(zfs_dirty_data_sync, "sync txg when this much dirty data"); | |
c409e464 | 1192 | |
e8b96c60 MA |
1193 | module_param(zfs_delay_scale, ulong, 0644); |
1194 | MODULE_PARM_DESC(zfs_delay_scale, "how quickly delay approaches infinity"); | |
64fc7762 MA |
1195 | |
1196 | module_param(zfs_sync_taskq_batch_pct, int, 0644); | |
1197 | MODULE_PARM_DESC(zfs_sync_taskq_batch_pct, | |
1198 | "max percent of CPUs that are used to sync dirty data"); | |
a032ac4b BB |
1199 | |
1200 | module_param(zfs_zil_clean_taskq_nthr_pct, int, 0644); | |
1201 | MODULE_PARM_DESC(zfs_zil_clean_taskq_nthr_pct, | |
1202 | "max percent of CPUs that are used per dp_sync_taskq"); | |
1203 | ||
1204 | module_param(zfs_zil_clean_taskq_minalloc, int, 0644); | |
1205 | MODULE_PARM_DESC(zfs_zil_clean_taskq_minalloc, | |
1206 | "number of taskq entries that are pre-populated"); | |
1207 | ||
1208 | module_param(zfs_zil_clean_taskq_maxalloc, int, 0644); | |
1209 | MODULE_PARM_DESC(zfs_zil_clean_taskq_maxalloc, | |
1210 | "max number of taskq entries that are cached"); | |
1211 | ||
02730c33 | 1212 | /* END CSTYLED */ |
c409e464 | 1213 | #endif |