]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
22 | * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. | |
23 | * Copyright (c) 2011, 2017 by Delphix. All rights reserved. | |
24 | * Copyright (c) 2013 Steven Hartland. All rights reserved. | |
25 | * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. | |
26 | * Copyright 2016 Nexenta Systems, Inc. All rights reserved. | |
27 | */ | |
28 | ||
29 | #include <sys/dsl_pool.h> | |
30 | #include <sys/dsl_dataset.h> | |
31 | #include <sys/dsl_prop.h> | |
32 | #include <sys/dsl_dir.h> | |
33 | #include <sys/dsl_synctask.h> | |
34 | #include <sys/dsl_scan.h> | |
35 | #include <sys/dnode.h> | |
36 | #include <sys/dmu_tx.h> | |
37 | #include <sys/dmu_objset.h> | |
38 | #include <sys/arc.h> | |
39 | #include <sys/zap.h> | |
40 | #include <sys/zio.h> | |
41 | #include <sys/zfs_context.h> | |
42 | #include <sys/fs/zfs.h> | |
43 | #include <sys/zfs_znode.h> | |
44 | #include <sys/spa_impl.h> | |
45 | #include <sys/dsl_deadlist.h> | |
46 | #include <sys/vdev_impl.h> | |
47 | #include <sys/metaslab_impl.h> | |
48 | #include <sys/bptree.h> | |
49 | #include <sys/zfeature.h> | |
50 | #include <sys/zil_impl.h> | |
51 | #include <sys/dsl_userhold.h> | |
52 | #include <sys/trace_txg.h> | |
53 | #include <sys/mmp.h> | |
54 | ||
55 | /* | |
56 | * ZFS Write Throttle | |
57 | * ------------------ | |
58 | * | |
59 | * ZFS must limit the rate of incoming writes to the rate at which it is able | |
60 | * to sync data modifications to the backend storage. Throttling by too much | |
61 | * creates an artificial limit; throttling by too little can only be sustained | |
62 | * for short periods and would lead to highly lumpy performance. On a per-pool | |
63 | * basis, ZFS tracks the amount of modified (dirty) data. As operations change | |
64 | * data, the amount of dirty data increases; as ZFS syncs out data, the amount | |
65 | * of dirty data decreases. When the amount of dirty data exceeds a | |
66 | * predetermined threshold further modifications are blocked until the amount | |
67 | * of dirty data decreases (as data is synced out). | |
68 | * | |
69 | * The limit on dirty data is tunable, and should be adjusted according to | |
70 | * both the IO capacity and available memory of the system. The larger the | |
71 | * window, the more ZFS is able to aggregate and amortize metadata (and data) | |
72 | * changes. However, memory is a limited resource, and allowing for more dirty | |
73 | * data comes at the cost of keeping other useful data in memory (for example | |
74 | * ZFS data cached by the ARC). | |
75 | * | |
76 | * Implementation | |
77 | * | |
78 | * As buffers are modified dsl_pool_willuse_space() increments both the per- | |
79 | * txg (dp_dirty_pertxg[]) and poolwide (dp_dirty_total) accounting of | |
80 | * dirty space used; dsl_pool_dirty_space() decrements those values as data | |
81 | * is synced out from dsl_pool_sync(). While only the poolwide value is | |
82 | * relevant, the per-txg value is useful for debugging. The tunable | |
83 | * zfs_dirty_data_max determines the dirty space limit. Once that value is | |
84 | * exceeded, new writes are halted until space frees up. | |
85 | * | |
86 | * The zfs_dirty_data_sync_percent tunable dictates the threshold at which we | |
87 | * ensure that there is a txg syncing (see the comment in txg.c for a full | |
88 | * description of transaction group stages). | |
89 | * | |
90 | * The IO scheduler uses both the dirty space limit and current amount of | |
91 | * dirty data as inputs. Those values affect the number of concurrent IOs ZFS | |
92 | * issues. See the comment in vdev_queue.c for details of the IO scheduler. | |
93 | * | |
94 | * The delay is also calculated based on the amount of dirty data. See the | |
95 | * comment above dmu_tx_delay() for details. | |
96 | */ | |
97 | ||
98 | /* | |
99 | * zfs_dirty_data_max will be set to zfs_dirty_data_max_percent% of all memory, | |
100 | * capped at zfs_dirty_data_max_max. It can also be overridden with a module | |
101 | * parameter. | |
102 | */ | |
103 | unsigned long zfs_dirty_data_max = 0; | |
104 | unsigned long zfs_dirty_data_max_max = 0; | |
105 | int zfs_dirty_data_max_percent = 10; | |
106 | int zfs_dirty_data_max_max_percent = 25; | |
107 | ||
108 | /* | |
109 | * If there's at least this much dirty data (as a percentage of | |
110 | * zfs_dirty_data_max), push out a txg. This should be less than | |
111 | * zfs_vdev_async_write_active_min_dirty_percent. | |
112 | */ | |
113 | int zfs_dirty_data_sync_percent = 20; | |
114 | ||
115 | /* | |
116 | * Once there is this amount of dirty data, the dmu_tx_delay() will kick in | |
117 | * and delay each transaction. | |
118 | * This value should be >= zfs_vdev_async_write_active_max_dirty_percent. | |
119 | */ | |
120 | int zfs_delay_min_dirty_percent = 60; | |
121 | ||
122 | /* | |
123 | * This controls how quickly the delay approaches infinity. | |
124 | * Larger values cause it to delay more for a given amount of dirty data. | |
125 | * Therefore larger values will cause there to be less dirty data for a | |
126 | * given throughput. | |
127 | * | |
128 | * For the smoothest delay, this value should be about 1 billion divided | |
129 | * by the maximum number of operations per second. This will smoothly | |
130 | * handle between 10x and 1/10th this number. | |
131 | * | |
132 | * Note: zfs_delay_scale * zfs_dirty_data_max must be < 2^64, due to the | |
133 | * multiply in dmu_tx_delay(). | |
134 | */ | |
135 | unsigned long zfs_delay_scale = 1000 * 1000 * 1000 / 2000; | |
136 | ||
137 | /* | |
138 | * This determines the number of threads used by the dp_sync_taskq. | |
139 | */ | |
140 | int zfs_sync_taskq_batch_pct = 75; | |
141 | ||
142 | /* | |
143 | * These tunables determine the behavior of how zil_itxg_clean() is | |
144 | * called via zil_clean() in the context of spa_sync(). When an itxg | |
145 | * list needs to be cleaned, TQ_NOSLEEP will be used when dispatching. | |
146 | * If the dispatch fails, the call to zil_itxg_clean() will occur | |
147 | * synchronously in the context of spa_sync(), which can negatively | |
148 | * impact the performance of spa_sync() (e.g. in the case of the itxg | |
149 | * list having a large number of itxs that needs to be cleaned). | |
150 | * | |
151 | * Thus, these tunables can be used to manipulate the behavior of the | |
152 | * taskq used by zil_clean(); they determine the number of taskq entries | |
153 | * that are pre-populated when the taskq is first created (via the | |
154 | * "zfs_zil_clean_taskq_minalloc" tunable) and the maximum number of | |
155 | * taskq entries that are cached after an on-demand allocation (via the | |
156 | * "zfs_zil_clean_taskq_maxalloc"). | |
157 | * | |
158 | * The idea being, we want to try reasonably hard to ensure there will | |
159 | * already be a taskq entry pre-allocated by the time that it is needed | |
160 | * by zil_clean(). This way, we can avoid the possibility of an | |
161 | * on-demand allocation of a new taskq entry from failing, which would | |
162 | * result in zil_itxg_clean() being called synchronously from zil_clean() | |
163 | * (which can adversely affect performance of spa_sync()). | |
164 | * | |
165 | * Additionally, the number of threads used by the taskq can be | |
166 | * configured via the "zfs_zil_clean_taskq_nthr_pct" tunable. | |
167 | */ | |
168 | int zfs_zil_clean_taskq_nthr_pct = 100; | |
169 | int zfs_zil_clean_taskq_minalloc = 1024; | |
170 | int zfs_zil_clean_taskq_maxalloc = 1024 * 1024; | |
171 | ||
172 | int | |
173 | dsl_pool_open_special_dir(dsl_pool_t *dp, const char *name, dsl_dir_t **ddp) | |
174 | { | |
175 | uint64_t obj; | |
176 | int err; | |
177 | ||
178 | err = zap_lookup(dp->dp_meta_objset, | |
179 | dsl_dir_phys(dp->dp_root_dir)->dd_child_dir_zapobj, | |
180 | name, sizeof (obj), 1, &obj); | |
181 | if (err) | |
182 | return (err); | |
183 | ||
184 | return (dsl_dir_hold_obj(dp, obj, name, dp, ddp)); | |
185 | } | |
186 | ||
187 | static dsl_pool_t * | |
188 | dsl_pool_open_impl(spa_t *spa, uint64_t txg) | |
189 | { | |
190 | dsl_pool_t *dp; | |
191 | blkptr_t *bp = spa_get_rootblkptr(spa); | |
192 | ||
193 | dp = kmem_zalloc(sizeof (dsl_pool_t), KM_SLEEP); | |
194 | dp->dp_spa = spa; | |
195 | dp->dp_meta_rootbp = *bp; | |
196 | rrw_init(&dp->dp_config_rwlock, B_TRUE); | |
197 | txg_init(dp, txg); | |
198 | mmp_init(spa); | |
199 | ||
200 | txg_list_create(&dp->dp_dirty_datasets, spa, | |
201 | offsetof(dsl_dataset_t, ds_dirty_link)); | |
202 | txg_list_create(&dp->dp_dirty_zilogs, spa, | |
203 | offsetof(zilog_t, zl_dirty_link)); | |
204 | txg_list_create(&dp->dp_dirty_dirs, spa, | |
205 | offsetof(dsl_dir_t, dd_dirty_link)); | |
206 | txg_list_create(&dp->dp_sync_tasks, spa, | |
207 | offsetof(dsl_sync_task_t, dst_node)); | |
208 | txg_list_create(&dp->dp_early_sync_tasks, spa, | |
209 | offsetof(dsl_sync_task_t, dst_node)); | |
210 | ||
211 | dp->dp_sync_taskq = taskq_create("dp_sync_taskq", | |
212 | zfs_sync_taskq_batch_pct, minclsyspri, 1, INT_MAX, | |
213 | TASKQ_THREADS_CPU_PCT); | |
214 | ||
215 | dp->dp_zil_clean_taskq = taskq_create("dp_zil_clean_taskq", | |
216 | zfs_zil_clean_taskq_nthr_pct, minclsyspri, | |
217 | zfs_zil_clean_taskq_minalloc, | |
218 | zfs_zil_clean_taskq_maxalloc, | |
219 | TASKQ_PREPOPULATE | TASKQ_THREADS_CPU_PCT); | |
220 | ||
221 | mutex_init(&dp->dp_lock, NULL, MUTEX_DEFAULT, NULL); | |
222 | cv_init(&dp->dp_spaceavail_cv, NULL, CV_DEFAULT, NULL); | |
223 | ||
224 | dp->dp_iput_taskq = taskq_create("z_iput", max_ncpus, defclsyspri, | |
225 | max_ncpus * 8, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC); | |
226 | dp->dp_unlinked_drain_taskq = taskq_create("z_unlinked_drain", | |
227 | max_ncpus, defclsyspri, max_ncpus, INT_MAX, | |
228 | TASKQ_PREPOPULATE | TASKQ_DYNAMIC); | |
229 | ||
230 | return (dp); | |
231 | } | |
232 | ||
233 | int | |
234 | dsl_pool_init(spa_t *spa, uint64_t txg, dsl_pool_t **dpp) | |
235 | { | |
236 | int err; | |
237 | dsl_pool_t *dp = dsl_pool_open_impl(spa, txg); | |
238 | ||
239 | /* | |
240 | * Initialize the caller's dsl_pool_t structure before we actually open | |
241 | * the meta objset. This is done because a self-healing write zio may | |
242 | * be issued as part of dmu_objset_open_impl() and the spa needs its | |
243 | * dsl_pool_t initialized in order to handle the write. | |
244 | */ | |
245 | *dpp = dp; | |
246 | ||
247 | err = dmu_objset_open_impl(spa, NULL, &dp->dp_meta_rootbp, | |
248 | &dp->dp_meta_objset); | |
249 | if (err != 0) { | |
250 | dsl_pool_close(dp); | |
251 | *dpp = NULL; | |
252 | } | |
253 | ||
254 | return (err); | |
255 | } | |
256 | ||
257 | int | |
258 | dsl_pool_open(dsl_pool_t *dp) | |
259 | { | |
260 | int err; | |
261 | dsl_dir_t *dd; | |
262 | dsl_dataset_t *ds; | |
263 | uint64_t obj; | |
264 | ||
265 | rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); | |
266 | err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, | |
267 | DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1, | |
268 | &dp->dp_root_dir_obj); | |
269 | if (err) | |
270 | goto out; | |
271 | ||
272 | err = dsl_dir_hold_obj(dp, dp->dp_root_dir_obj, | |
273 | NULL, dp, &dp->dp_root_dir); | |
274 | if (err) | |
275 | goto out; | |
276 | ||
277 | err = dsl_pool_open_special_dir(dp, MOS_DIR_NAME, &dp->dp_mos_dir); | |
278 | if (err) | |
279 | goto out; | |
280 | ||
281 | if (spa_version(dp->dp_spa) >= SPA_VERSION_ORIGIN) { | |
282 | err = dsl_pool_open_special_dir(dp, ORIGIN_DIR_NAME, &dd); | |
283 | if (err) | |
284 | goto out; | |
285 | err = dsl_dataset_hold_obj(dp, | |
286 | dsl_dir_phys(dd)->dd_head_dataset_obj, FTAG, &ds); | |
287 | if (err == 0) { | |
288 | err = dsl_dataset_hold_obj(dp, | |
289 | dsl_dataset_phys(ds)->ds_prev_snap_obj, dp, | |
290 | &dp->dp_origin_snap); | |
291 | dsl_dataset_rele(ds, FTAG); | |
292 | } | |
293 | dsl_dir_rele(dd, dp); | |
294 | if (err) | |
295 | goto out; | |
296 | } | |
297 | ||
298 | if (spa_version(dp->dp_spa) >= SPA_VERSION_DEADLISTS) { | |
299 | err = dsl_pool_open_special_dir(dp, FREE_DIR_NAME, | |
300 | &dp->dp_free_dir); | |
301 | if (err) | |
302 | goto out; | |
303 | ||
304 | err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, | |
305 | DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj); | |
306 | if (err) | |
307 | goto out; | |
308 | VERIFY0(bpobj_open(&dp->dp_free_bpobj, | |
309 | dp->dp_meta_objset, obj)); | |
310 | } | |
311 | ||
312 | if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS)) { | |
313 | err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, | |
314 | DMU_POOL_OBSOLETE_BPOBJ, sizeof (uint64_t), 1, &obj); | |
315 | if (err == 0) { | |
316 | VERIFY0(bpobj_open(&dp->dp_obsolete_bpobj, | |
317 | dp->dp_meta_objset, obj)); | |
318 | } else if (err == ENOENT) { | |
319 | /* | |
320 | * We might not have created the remap bpobj yet. | |
321 | */ | |
322 | err = 0; | |
323 | } else { | |
324 | goto out; | |
325 | } | |
326 | } | |
327 | ||
328 | /* | |
329 | * Note: errors ignored, because the these special dirs, used for | |
330 | * space accounting, are only created on demand. | |
331 | */ | |
332 | (void) dsl_pool_open_special_dir(dp, LEAK_DIR_NAME, | |
333 | &dp->dp_leak_dir); | |
334 | ||
335 | if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY)) { | |
336 | err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, | |
337 | DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1, | |
338 | &dp->dp_bptree_obj); | |
339 | if (err != 0) | |
340 | goto out; | |
341 | } | |
342 | ||
343 | if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMPTY_BPOBJ)) { | |
344 | err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, | |
345 | DMU_POOL_EMPTY_BPOBJ, sizeof (uint64_t), 1, | |
346 | &dp->dp_empty_bpobj); | |
347 | if (err != 0) | |
348 | goto out; | |
349 | } | |
350 | ||
351 | err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, | |
352 | DMU_POOL_TMP_USERREFS, sizeof (uint64_t), 1, | |
353 | &dp->dp_tmp_userrefs_obj); | |
354 | if (err == ENOENT) | |
355 | err = 0; | |
356 | if (err) | |
357 | goto out; | |
358 | ||
359 | err = dsl_scan_init(dp, dp->dp_tx.tx_open_txg); | |
360 | ||
361 | out: | |
362 | rrw_exit(&dp->dp_config_rwlock, FTAG); | |
363 | return (err); | |
364 | } | |
365 | ||
366 | void | |
367 | dsl_pool_close(dsl_pool_t *dp) | |
368 | { | |
369 | /* | |
370 | * Drop our references from dsl_pool_open(). | |
371 | * | |
372 | * Since we held the origin_snap from "syncing" context (which | |
373 | * includes pool-opening context), it actually only got a "ref" | |
374 | * and not a hold, so just drop that here. | |
375 | */ | |
376 | if (dp->dp_origin_snap != NULL) | |
377 | dsl_dataset_rele(dp->dp_origin_snap, dp); | |
378 | if (dp->dp_mos_dir != NULL) | |
379 | dsl_dir_rele(dp->dp_mos_dir, dp); | |
380 | if (dp->dp_free_dir != NULL) | |
381 | dsl_dir_rele(dp->dp_free_dir, dp); | |
382 | if (dp->dp_leak_dir != NULL) | |
383 | dsl_dir_rele(dp->dp_leak_dir, dp); | |
384 | if (dp->dp_root_dir != NULL) | |
385 | dsl_dir_rele(dp->dp_root_dir, dp); | |
386 | ||
387 | bpobj_close(&dp->dp_free_bpobj); | |
388 | bpobj_close(&dp->dp_obsolete_bpobj); | |
389 | ||
390 | /* undo the dmu_objset_open_impl(mos) from dsl_pool_open() */ | |
391 | if (dp->dp_meta_objset != NULL) | |
392 | dmu_objset_evict(dp->dp_meta_objset); | |
393 | ||
394 | txg_list_destroy(&dp->dp_dirty_datasets); | |
395 | txg_list_destroy(&dp->dp_dirty_zilogs); | |
396 | txg_list_destroy(&dp->dp_sync_tasks); | |
397 | txg_list_destroy(&dp->dp_early_sync_tasks); | |
398 | txg_list_destroy(&dp->dp_dirty_dirs); | |
399 | ||
400 | taskq_destroy(dp->dp_zil_clean_taskq); | |
401 | taskq_destroy(dp->dp_sync_taskq); | |
402 | ||
403 | /* | |
404 | * We can't set retry to TRUE since we're explicitly specifying | |
405 | * a spa to flush. This is good enough; any missed buffers for | |
406 | * this spa won't cause trouble, and they'll eventually fall | |
407 | * out of the ARC just like any other unused buffer. | |
408 | */ | |
409 | arc_flush(dp->dp_spa, FALSE); | |
410 | ||
411 | mmp_fini(dp->dp_spa); | |
412 | txg_fini(dp); | |
413 | dsl_scan_fini(dp); | |
414 | dmu_buf_user_evict_wait(); | |
415 | ||
416 | rrw_destroy(&dp->dp_config_rwlock); | |
417 | mutex_destroy(&dp->dp_lock); | |
418 | cv_destroy(&dp->dp_spaceavail_cv); | |
419 | taskq_destroy(dp->dp_unlinked_drain_taskq); | |
420 | taskq_destroy(dp->dp_iput_taskq); | |
421 | if (dp->dp_blkstats != NULL) { | |
422 | mutex_destroy(&dp->dp_blkstats->zab_lock); | |
423 | vmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t)); | |
424 | } | |
425 | kmem_free(dp, sizeof (dsl_pool_t)); | |
426 | } | |
427 | ||
428 | void | |
429 | dsl_pool_create_obsolete_bpobj(dsl_pool_t *dp, dmu_tx_t *tx) | |
430 | { | |
431 | uint64_t obj; | |
432 | /* | |
433 | * Currently, we only create the obsolete_bpobj where there are | |
434 | * indirect vdevs with referenced mappings. | |
435 | */ | |
436 | ASSERT(spa_feature_is_active(dp->dp_spa, SPA_FEATURE_DEVICE_REMOVAL)); | |
437 | /* create and open the obsolete_bpobj */ | |
438 | obj = bpobj_alloc(dp->dp_meta_objset, SPA_OLD_MAXBLOCKSIZE, tx); | |
439 | VERIFY0(bpobj_open(&dp->dp_obsolete_bpobj, dp->dp_meta_objset, obj)); | |
440 | VERIFY0(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, | |
441 | DMU_POOL_OBSOLETE_BPOBJ, sizeof (uint64_t), 1, &obj, tx)); | |
442 | spa_feature_incr(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); | |
443 | } | |
444 | ||
445 | void | |
446 | dsl_pool_destroy_obsolete_bpobj(dsl_pool_t *dp, dmu_tx_t *tx) | |
447 | { | |
448 | spa_feature_decr(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); | |
449 | VERIFY0(zap_remove(dp->dp_meta_objset, | |
450 | DMU_POOL_DIRECTORY_OBJECT, | |
451 | DMU_POOL_OBSOLETE_BPOBJ, tx)); | |
452 | bpobj_free(dp->dp_meta_objset, | |
453 | dp->dp_obsolete_bpobj.bpo_object, tx); | |
454 | bpobj_close(&dp->dp_obsolete_bpobj); | |
455 | } | |
456 | ||
457 | dsl_pool_t * | |
458 | dsl_pool_create(spa_t *spa, nvlist_t *zplprops, dsl_crypto_params_t *dcp, | |
459 | uint64_t txg) | |
460 | { | |
461 | int err; | |
462 | dsl_pool_t *dp = dsl_pool_open_impl(spa, txg); | |
463 | dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg); | |
464 | #ifdef _KERNEL | |
465 | objset_t *os; | |
466 | #else | |
467 | objset_t *os __attribute__((unused)); | |
468 | #endif | |
469 | dsl_dataset_t *ds; | |
470 | uint64_t obj; | |
471 | ||
472 | rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); | |
473 | ||
474 | /* create and open the MOS (meta-objset) */ | |
475 | dp->dp_meta_objset = dmu_objset_create_impl(spa, | |
476 | NULL, &dp->dp_meta_rootbp, DMU_OST_META, tx); | |
477 | spa->spa_meta_objset = dp->dp_meta_objset; | |
478 | ||
479 | /* create the pool directory */ | |
480 | err = zap_create_claim(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, | |
481 | DMU_OT_OBJECT_DIRECTORY, DMU_OT_NONE, 0, tx); | |
482 | ASSERT0(err); | |
483 | ||
484 | /* Initialize scan structures */ | |
485 | VERIFY0(dsl_scan_init(dp, txg)); | |
486 | ||
487 | /* create and open the root dir */ | |
488 | dp->dp_root_dir_obj = dsl_dir_create_sync(dp, NULL, NULL, tx); | |
489 | VERIFY0(dsl_dir_hold_obj(dp, dp->dp_root_dir_obj, | |
490 | NULL, dp, &dp->dp_root_dir)); | |
491 | ||
492 | /* create and open the meta-objset dir */ | |
493 | (void) dsl_dir_create_sync(dp, dp->dp_root_dir, MOS_DIR_NAME, tx); | |
494 | VERIFY0(dsl_pool_open_special_dir(dp, | |
495 | MOS_DIR_NAME, &dp->dp_mos_dir)); | |
496 | ||
497 | if (spa_version(spa) >= SPA_VERSION_DEADLISTS) { | |
498 | /* create and open the free dir */ | |
499 | (void) dsl_dir_create_sync(dp, dp->dp_root_dir, | |
500 | FREE_DIR_NAME, tx); | |
501 | VERIFY0(dsl_pool_open_special_dir(dp, | |
502 | FREE_DIR_NAME, &dp->dp_free_dir)); | |
503 | ||
504 | /* create and open the free_bplist */ | |
505 | obj = bpobj_alloc(dp->dp_meta_objset, SPA_OLD_MAXBLOCKSIZE, tx); | |
506 | VERIFY(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, | |
507 | DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx) == 0); | |
508 | VERIFY0(bpobj_open(&dp->dp_free_bpobj, | |
509 | dp->dp_meta_objset, obj)); | |
510 | } | |
511 | ||
512 | if (spa_version(spa) >= SPA_VERSION_DSL_SCRUB) | |
513 | dsl_pool_create_origin(dp, tx); | |
514 | ||
515 | /* | |
516 | * Some features may be needed when creating the root dataset, so we | |
517 | * create the feature objects here. | |
518 | */ | |
519 | if (spa_version(spa) >= SPA_VERSION_FEATURES) | |
520 | spa_feature_create_zap_objects(spa, tx); | |
521 | ||
522 | if (dcp != NULL && dcp->cp_crypt != ZIO_CRYPT_OFF && | |
523 | dcp->cp_crypt != ZIO_CRYPT_INHERIT) | |
524 | spa_feature_enable(spa, SPA_FEATURE_ENCRYPTION, tx); | |
525 | ||
526 | /* create the root dataset */ | |
527 | obj = dsl_dataset_create_sync_dd(dp->dp_root_dir, NULL, dcp, 0, tx); | |
528 | ||
529 | /* create the root objset */ | |
530 | VERIFY0(dsl_dataset_hold_obj_flags(dp, obj, | |
531 | DS_HOLD_FLAG_DECRYPT, FTAG, &ds)); | |
532 | rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); | |
533 | os = dmu_objset_create_impl(dp->dp_spa, ds, | |
534 | dsl_dataset_get_blkptr(ds), DMU_OST_ZFS, tx); | |
535 | rrw_exit(&ds->ds_bp_rwlock, FTAG); | |
536 | #ifdef _KERNEL | |
537 | zfs_create_fs(os, kcred, zplprops, tx); | |
538 | #endif | |
539 | dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG); | |
540 | ||
541 | dmu_tx_commit(tx); | |
542 | ||
543 | rrw_exit(&dp->dp_config_rwlock, FTAG); | |
544 | ||
545 | return (dp); | |
546 | } | |
547 | ||
548 | /* | |
549 | * Account for the meta-objset space in its placeholder dsl_dir. | |
550 | */ | |
551 | void | |
552 | dsl_pool_mos_diduse_space(dsl_pool_t *dp, | |
553 | int64_t used, int64_t comp, int64_t uncomp) | |
554 | { | |
555 | ASSERT3U(comp, ==, uncomp); /* it's all metadata */ | |
556 | mutex_enter(&dp->dp_lock); | |
557 | dp->dp_mos_used_delta += used; | |
558 | dp->dp_mos_compressed_delta += comp; | |
559 | dp->dp_mos_uncompressed_delta += uncomp; | |
560 | mutex_exit(&dp->dp_lock); | |
561 | } | |
562 | ||
563 | static void | |
564 | dsl_pool_sync_mos(dsl_pool_t *dp, dmu_tx_t *tx) | |
565 | { | |
566 | zio_t *zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); | |
567 | dmu_objset_sync(dp->dp_meta_objset, zio, tx); | |
568 | VERIFY0(zio_wait(zio)); | |
569 | dprintf_bp(&dp->dp_meta_rootbp, "meta objset rootbp is %s", ""); | |
570 | spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp); | |
571 | } | |
572 | ||
573 | static void | |
574 | dsl_pool_dirty_delta(dsl_pool_t *dp, int64_t delta) | |
575 | { | |
576 | ASSERT(MUTEX_HELD(&dp->dp_lock)); | |
577 | ||
578 | if (delta < 0) | |
579 | ASSERT3U(-delta, <=, dp->dp_dirty_total); | |
580 | ||
581 | dp->dp_dirty_total += delta; | |
582 | ||
583 | /* | |
584 | * Note: we signal even when increasing dp_dirty_total. | |
585 | * This ensures forward progress -- each thread wakes the next waiter. | |
586 | */ | |
587 | if (dp->dp_dirty_total < zfs_dirty_data_max) | |
588 | cv_signal(&dp->dp_spaceavail_cv); | |
589 | } | |
590 | ||
591 | #ifdef ZFS_DEBUG | |
592 | static boolean_t | |
593 | dsl_early_sync_task_verify(dsl_pool_t *dp, uint64_t txg) | |
594 | { | |
595 | spa_t *spa = dp->dp_spa; | |
596 | vdev_t *rvd = spa->spa_root_vdev; | |
597 | ||
598 | for (uint64_t c = 0; c < rvd->vdev_children; c++) { | |
599 | vdev_t *vd = rvd->vdev_child[c]; | |
600 | txg_list_t *tl = &vd->vdev_ms_list; | |
601 | metaslab_t *ms; | |
602 | ||
603 | for (ms = txg_list_head(tl, TXG_CLEAN(txg)); ms; | |
604 | ms = txg_list_next(tl, ms, TXG_CLEAN(txg))) { | |
605 | VERIFY(range_tree_is_empty(ms->ms_freeing)); | |
606 | VERIFY(range_tree_is_empty(ms->ms_checkpointing)); | |
607 | } | |
608 | } | |
609 | ||
610 | return (B_TRUE); | |
611 | } | |
612 | #endif | |
613 | ||
614 | void | |
615 | dsl_pool_sync(dsl_pool_t *dp, uint64_t txg) | |
616 | { | |
617 | zio_t *zio; | |
618 | dmu_tx_t *tx; | |
619 | dsl_dir_t *dd; | |
620 | dsl_dataset_t *ds; | |
621 | objset_t *mos = dp->dp_meta_objset; | |
622 | list_t synced_datasets; | |
623 | ||
624 | list_create(&synced_datasets, sizeof (dsl_dataset_t), | |
625 | offsetof(dsl_dataset_t, ds_synced_link)); | |
626 | ||
627 | tx = dmu_tx_create_assigned(dp, txg); | |
628 | ||
629 | /* | |
630 | * Run all early sync tasks before writing out any dirty blocks. | |
631 | * For more info on early sync tasks see block comment in | |
632 | * dsl_early_sync_task(). | |
633 | */ | |
634 | if (!txg_list_empty(&dp->dp_early_sync_tasks, txg)) { | |
635 | dsl_sync_task_t *dst; | |
636 | ||
637 | ASSERT3U(spa_sync_pass(dp->dp_spa), ==, 1); | |
638 | while ((dst = | |
639 | txg_list_remove(&dp->dp_early_sync_tasks, txg)) != NULL) { | |
640 | ASSERT(dsl_early_sync_task_verify(dp, txg)); | |
641 | dsl_sync_task_sync(dst, tx); | |
642 | } | |
643 | ASSERT(dsl_early_sync_task_verify(dp, txg)); | |
644 | } | |
645 | ||
646 | /* | |
647 | * Write out all dirty blocks of dirty datasets. | |
648 | */ | |
649 | zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); | |
650 | while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) { | |
651 | /* | |
652 | * We must not sync any non-MOS datasets twice, because | |
653 | * we may have taken a snapshot of them. However, we | |
654 | * may sync newly-created datasets on pass 2. | |
655 | */ | |
656 | ASSERT(!list_link_active(&ds->ds_synced_link)); | |
657 | list_insert_tail(&synced_datasets, ds); | |
658 | dsl_dataset_sync(ds, zio, tx); | |
659 | } | |
660 | VERIFY0(zio_wait(zio)); | |
661 | ||
662 | /* | |
663 | * We have written all of the accounted dirty data, so our | |
664 | * dp_space_towrite should now be zero. However, some seldom-used | |
665 | * code paths do not adhere to this (e.g. dbuf_undirty(), also | |
666 | * rounding error in dbuf_write_physdone). | |
667 | * Shore up the accounting of any dirtied space now. | |
668 | */ | |
669 | dsl_pool_undirty_space(dp, dp->dp_dirty_pertxg[txg & TXG_MASK], txg); | |
670 | ||
671 | /* | |
672 | * Update the long range free counter after | |
673 | * we're done syncing user data | |
674 | */ | |
675 | mutex_enter(&dp->dp_lock); | |
676 | ASSERT(spa_sync_pass(dp->dp_spa) == 1 || | |
677 | dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] == 0); | |
678 | dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] = 0; | |
679 | mutex_exit(&dp->dp_lock); | |
680 | ||
681 | /* | |
682 | * After the data blocks have been written (ensured by the zio_wait() | |
683 | * above), update the user/group/project space accounting. This happens | |
684 | * in tasks dispatched to dp_sync_taskq, so wait for them before | |
685 | * continuing. | |
686 | */ | |
687 | for (ds = list_head(&synced_datasets); ds != NULL; | |
688 | ds = list_next(&synced_datasets, ds)) { | |
689 | dmu_objset_do_userquota_updates(ds->ds_objset, tx); | |
690 | } | |
691 | taskq_wait(dp->dp_sync_taskq); | |
692 | ||
693 | /* | |
694 | * Sync the datasets again to push out the changes due to | |
695 | * userspace updates. This must be done before we process the | |
696 | * sync tasks, so that any snapshots will have the correct | |
697 | * user accounting information (and we won't get confused | |
698 | * about which blocks are part of the snapshot). | |
699 | */ | |
700 | zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED); | |
701 | while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) { | |
702 | objset_t *os = ds->ds_objset; | |
703 | ||
704 | ASSERT(list_link_active(&ds->ds_synced_link)); | |
705 | dmu_buf_rele(ds->ds_dbuf, ds); | |
706 | dsl_dataset_sync(ds, zio, tx); | |
707 | ||
708 | /* | |
709 | * Release any key mappings created by calls to | |
710 | * dsl_dataset_dirty() from the userquota accounting | |
711 | * code paths. | |
712 | */ | |
713 | if (os->os_encrypted && !os->os_raw_receive && | |
714 | !os->os_next_write_raw[txg & TXG_MASK]) { | |
715 | ASSERT3P(ds->ds_key_mapping, !=, NULL); | |
716 | key_mapping_rele(dp->dp_spa, ds->ds_key_mapping, ds); | |
717 | } | |
718 | } | |
719 | VERIFY0(zio_wait(zio)); | |
720 | ||
721 | /* | |
722 | * Now that the datasets have been completely synced, we can | |
723 | * clean up our in-memory structures accumulated while syncing: | |
724 | * | |
725 | * - move dead blocks from the pending deadlist to the on-disk deadlist | |
726 | * - release hold from dsl_dataset_dirty() | |
727 | * - release key mapping hold from dsl_dataset_dirty() | |
728 | */ | |
729 | while ((ds = list_remove_head(&synced_datasets)) != NULL) { | |
730 | objset_t *os = ds->ds_objset; | |
731 | ||
732 | if (os->os_encrypted && !os->os_raw_receive && | |
733 | !os->os_next_write_raw[txg & TXG_MASK]) { | |
734 | ASSERT3P(ds->ds_key_mapping, !=, NULL); | |
735 | key_mapping_rele(dp->dp_spa, ds->ds_key_mapping, ds); | |
736 | } | |
737 | ||
738 | dsl_dataset_sync_done(ds, tx); | |
739 | } | |
740 | ||
741 | while ((dd = txg_list_remove(&dp->dp_dirty_dirs, txg)) != NULL) { | |
742 | dsl_dir_sync(dd, tx); | |
743 | } | |
744 | ||
745 | /* | |
746 | * The MOS's space is accounted for in the pool/$MOS | |
747 | * (dp_mos_dir). We can't modify the mos while we're syncing | |
748 | * it, so we remember the deltas and apply them here. | |
749 | */ | |
750 | if (dp->dp_mos_used_delta != 0 || dp->dp_mos_compressed_delta != 0 || | |
751 | dp->dp_mos_uncompressed_delta != 0) { | |
752 | dsl_dir_diduse_space(dp->dp_mos_dir, DD_USED_HEAD, | |
753 | dp->dp_mos_used_delta, | |
754 | dp->dp_mos_compressed_delta, | |
755 | dp->dp_mos_uncompressed_delta, tx); | |
756 | dp->dp_mos_used_delta = 0; | |
757 | dp->dp_mos_compressed_delta = 0; | |
758 | dp->dp_mos_uncompressed_delta = 0; | |
759 | } | |
760 | ||
761 | if (!multilist_is_empty(mos->os_dirty_dnodes[txg & TXG_MASK])) { | |
762 | dsl_pool_sync_mos(dp, tx); | |
763 | } | |
764 | ||
765 | /* | |
766 | * If we modify a dataset in the same txg that we want to destroy it, | |
767 | * its dsl_dir's dd_dbuf will be dirty, and thus have a hold on it. | |
768 | * dsl_dir_destroy_check() will fail if there are unexpected holds. | |
769 | * Therefore, we want to sync the MOS (thus syncing the dd_dbuf | |
770 | * and clearing the hold on it) before we process the sync_tasks. | |
771 | * The MOS data dirtied by the sync_tasks will be synced on the next | |
772 | * pass. | |
773 | */ | |
774 | if (!txg_list_empty(&dp->dp_sync_tasks, txg)) { | |
775 | dsl_sync_task_t *dst; | |
776 | /* | |
777 | * No more sync tasks should have been added while we | |
778 | * were syncing. | |
779 | */ | |
780 | ASSERT3U(spa_sync_pass(dp->dp_spa), ==, 1); | |
781 | while ((dst = txg_list_remove(&dp->dp_sync_tasks, txg)) != NULL) | |
782 | dsl_sync_task_sync(dst, tx); | |
783 | } | |
784 | ||
785 | dmu_tx_commit(tx); | |
786 | ||
787 | DTRACE_PROBE2(dsl_pool_sync__done, dsl_pool_t *dp, dp, uint64_t, txg); | |
788 | } | |
789 | ||
790 | void | |
791 | dsl_pool_sync_done(dsl_pool_t *dp, uint64_t txg) | |
792 | { | |
793 | zilog_t *zilog; | |
794 | ||
795 | while ((zilog = txg_list_head(&dp->dp_dirty_zilogs, txg))) { | |
796 | dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os); | |
797 | /* | |
798 | * We don't remove the zilog from the dp_dirty_zilogs | |
799 | * list until after we've cleaned it. This ensures that | |
800 | * callers of zilog_is_dirty() receive an accurate | |
801 | * answer when they are racing with the spa sync thread. | |
802 | */ | |
803 | zil_clean(zilog, txg); | |
804 | (void) txg_list_remove_this(&dp->dp_dirty_zilogs, zilog, txg); | |
805 | ASSERT(!dmu_objset_is_dirty(zilog->zl_os, txg)); | |
806 | dmu_buf_rele(ds->ds_dbuf, zilog); | |
807 | } | |
808 | ASSERT(!dmu_objset_is_dirty(dp->dp_meta_objset, txg)); | |
809 | } | |
810 | ||
811 | /* | |
812 | * TRUE if the current thread is the tx_sync_thread or if we | |
813 | * are being called from SPA context during pool initialization. | |
814 | */ | |
815 | int | |
816 | dsl_pool_sync_context(dsl_pool_t *dp) | |
817 | { | |
818 | return (curthread == dp->dp_tx.tx_sync_thread || | |
819 | spa_is_initializing(dp->dp_spa) || | |
820 | taskq_member(dp->dp_sync_taskq, curthread)); | |
821 | } | |
822 | ||
823 | /* | |
824 | * This function returns the amount of allocatable space in the pool | |
825 | * minus whatever space is currently reserved by ZFS for specific | |
826 | * purposes. Specifically: | |
827 | * | |
828 | * 1] Any reserved SLOP space | |
829 | * 2] Any space used by the checkpoint | |
830 | * 3] Any space used for deferred frees | |
831 | * | |
832 | * The latter 2 are especially important because they are needed to | |
833 | * rectify the SPA's and DMU's different understanding of how much space | |
834 | * is used. Now the DMU is aware of that extra space tracked by the SPA | |
835 | * without having to maintain a separate special dir (e.g similar to | |
836 | * $MOS, $FREEING, and $LEAKED). | |
837 | * | |
838 | * Note: By deferred frees here, we mean the frees that were deferred | |
839 | * in spa_sync() after sync pass 1 (spa_deferred_bpobj), and not the | |
840 | * segments placed in ms_defer trees during metaslab_sync_done(). | |
841 | */ | |
842 | uint64_t | |
843 | dsl_pool_adjustedsize(dsl_pool_t *dp, zfs_space_check_t slop_policy) | |
844 | { | |
845 | spa_t *spa = dp->dp_spa; | |
846 | uint64_t space, resv, adjustedsize; | |
847 | uint64_t spa_deferred_frees = | |
848 | spa->spa_deferred_bpobj.bpo_phys->bpo_bytes; | |
849 | ||
850 | space = spa_get_dspace(spa) | |
851 | - spa_get_checkpoint_space(spa) - spa_deferred_frees; | |
852 | resv = spa_get_slop_space(spa); | |
853 | ||
854 | switch (slop_policy) { | |
855 | case ZFS_SPACE_CHECK_NORMAL: | |
856 | break; | |
857 | case ZFS_SPACE_CHECK_RESERVED: | |
858 | resv >>= 1; | |
859 | break; | |
860 | case ZFS_SPACE_CHECK_EXTRA_RESERVED: | |
861 | resv >>= 2; | |
862 | break; | |
863 | case ZFS_SPACE_CHECK_NONE: | |
864 | resv = 0; | |
865 | break; | |
866 | default: | |
867 | panic("invalid slop policy value: %d", slop_policy); | |
868 | break; | |
869 | } | |
870 | adjustedsize = (space >= resv) ? (space - resv) : 0; | |
871 | ||
872 | return (adjustedsize); | |
873 | } | |
874 | ||
875 | uint64_t | |
876 | dsl_pool_unreserved_space(dsl_pool_t *dp, zfs_space_check_t slop_policy) | |
877 | { | |
878 | uint64_t poolsize = dsl_pool_adjustedsize(dp, slop_policy); | |
879 | uint64_t deferred = | |
880 | metaslab_class_get_deferred(spa_normal_class(dp->dp_spa)); | |
881 | uint64_t quota = (poolsize >= deferred) ? (poolsize - deferred) : 0; | |
882 | return (quota); | |
883 | } | |
884 | ||
885 | boolean_t | |
886 | dsl_pool_need_dirty_delay(dsl_pool_t *dp) | |
887 | { | |
888 | uint64_t delay_min_bytes = | |
889 | zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100; | |
890 | uint64_t dirty_min_bytes = | |
891 | zfs_dirty_data_max * zfs_dirty_data_sync_percent / 100; | |
892 | boolean_t rv; | |
893 | ||
894 | mutex_enter(&dp->dp_lock); | |
895 | if (dp->dp_dirty_total > dirty_min_bytes) | |
896 | txg_kick(dp); | |
897 | rv = (dp->dp_dirty_total > delay_min_bytes); | |
898 | mutex_exit(&dp->dp_lock); | |
899 | return (rv); | |
900 | } | |
901 | ||
902 | void | |
903 | dsl_pool_dirty_space(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx) | |
904 | { | |
905 | if (space > 0) { | |
906 | mutex_enter(&dp->dp_lock); | |
907 | dp->dp_dirty_pertxg[tx->tx_txg & TXG_MASK] += space; | |
908 | dsl_pool_dirty_delta(dp, space); | |
909 | mutex_exit(&dp->dp_lock); | |
910 | } | |
911 | } | |
912 | ||
913 | void | |
914 | dsl_pool_undirty_space(dsl_pool_t *dp, int64_t space, uint64_t txg) | |
915 | { | |
916 | ASSERT3S(space, >=, 0); | |
917 | if (space == 0) | |
918 | return; | |
919 | ||
920 | mutex_enter(&dp->dp_lock); | |
921 | if (dp->dp_dirty_pertxg[txg & TXG_MASK] < space) { | |
922 | /* XXX writing something we didn't dirty? */ | |
923 | space = dp->dp_dirty_pertxg[txg & TXG_MASK]; | |
924 | } | |
925 | ASSERT3U(dp->dp_dirty_pertxg[txg & TXG_MASK], >=, space); | |
926 | dp->dp_dirty_pertxg[txg & TXG_MASK] -= space; | |
927 | ASSERT3U(dp->dp_dirty_total, >=, space); | |
928 | dsl_pool_dirty_delta(dp, -space); | |
929 | mutex_exit(&dp->dp_lock); | |
930 | } | |
931 | ||
932 | /* ARGSUSED */ | |
933 | static int | |
934 | upgrade_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg) | |
935 | { | |
936 | dmu_tx_t *tx = arg; | |
937 | dsl_dataset_t *ds, *prev = NULL; | |
938 | int err; | |
939 | ||
940 | err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds); | |
941 | if (err) | |
942 | return (err); | |
943 | ||
944 | while (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) { | |
945 | err = dsl_dataset_hold_obj(dp, | |
946 | dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev); | |
947 | if (err) { | |
948 | dsl_dataset_rele(ds, FTAG); | |
949 | return (err); | |
950 | } | |
951 | ||
952 | if (dsl_dataset_phys(prev)->ds_next_snap_obj != ds->ds_object) | |
953 | break; | |
954 | dsl_dataset_rele(ds, FTAG); | |
955 | ds = prev; | |
956 | prev = NULL; | |
957 | } | |
958 | ||
959 | if (prev == NULL) { | |
960 | prev = dp->dp_origin_snap; | |
961 | ||
962 | /* | |
963 | * The $ORIGIN can't have any data, or the accounting | |
964 | * will be wrong. | |
965 | */ | |
966 | rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); | |
967 | ASSERT0(dsl_dataset_phys(prev)->ds_bp.blk_birth); | |
968 | rrw_exit(&ds->ds_bp_rwlock, FTAG); | |
969 | ||
970 | /* The origin doesn't get attached to itself */ | |
971 | if (ds->ds_object == prev->ds_object) { | |
972 | dsl_dataset_rele(ds, FTAG); | |
973 | return (0); | |
974 | } | |
975 | ||
976 | dmu_buf_will_dirty(ds->ds_dbuf, tx); | |
977 | dsl_dataset_phys(ds)->ds_prev_snap_obj = prev->ds_object; | |
978 | dsl_dataset_phys(ds)->ds_prev_snap_txg = | |
979 | dsl_dataset_phys(prev)->ds_creation_txg; | |
980 | ||
981 | dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx); | |
982 | dsl_dir_phys(ds->ds_dir)->dd_origin_obj = prev->ds_object; | |
983 | ||
984 | dmu_buf_will_dirty(prev->ds_dbuf, tx); | |
985 | dsl_dataset_phys(prev)->ds_num_children++; | |
986 | ||
987 | if (dsl_dataset_phys(ds)->ds_next_snap_obj == 0) { | |
988 | ASSERT(ds->ds_prev == NULL); | |
989 | VERIFY0(dsl_dataset_hold_obj(dp, | |
990 | dsl_dataset_phys(ds)->ds_prev_snap_obj, | |
991 | ds, &ds->ds_prev)); | |
992 | } | |
993 | } | |
994 | ||
995 | ASSERT3U(dsl_dir_phys(ds->ds_dir)->dd_origin_obj, ==, prev->ds_object); | |
996 | ASSERT3U(dsl_dataset_phys(ds)->ds_prev_snap_obj, ==, prev->ds_object); | |
997 | ||
998 | if (dsl_dataset_phys(prev)->ds_next_clones_obj == 0) { | |
999 | dmu_buf_will_dirty(prev->ds_dbuf, tx); | |
1000 | dsl_dataset_phys(prev)->ds_next_clones_obj = | |
1001 | zap_create(dp->dp_meta_objset, | |
1002 | DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx); | |
1003 | } | |
1004 | VERIFY0(zap_add_int(dp->dp_meta_objset, | |
1005 | dsl_dataset_phys(prev)->ds_next_clones_obj, ds->ds_object, tx)); | |
1006 | ||
1007 | dsl_dataset_rele(ds, FTAG); | |
1008 | if (prev != dp->dp_origin_snap) | |
1009 | dsl_dataset_rele(prev, FTAG); | |
1010 | return (0); | |
1011 | } | |
1012 | ||
1013 | void | |
1014 | dsl_pool_upgrade_clones(dsl_pool_t *dp, dmu_tx_t *tx) | |
1015 | { | |
1016 | ASSERT(dmu_tx_is_syncing(tx)); | |
1017 | ASSERT(dp->dp_origin_snap != NULL); | |
1018 | ||
1019 | VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, upgrade_clones_cb, | |
1020 | tx, DS_FIND_CHILDREN | DS_FIND_SERIALIZE)); | |
1021 | } | |
1022 | ||
1023 | /* ARGSUSED */ | |
1024 | static int | |
1025 | upgrade_dir_clones_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg) | |
1026 | { | |
1027 | dmu_tx_t *tx = arg; | |
1028 | objset_t *mos = dp->dp_meta_objset; | |
1029 | ||
1030 | if (dsl_dir_phys(ds->ds_dir)->dd_origin_obj != 0) { | |
1031 | dsl_dataset_t *origin; | |
1032 | ||
1033 | VERIFY0(dsl_dataset_hold_obj(dp, | |
1034 | dsl_dir_phys(ds->ds_dir)->dd_origin_obj, FTAG, &origin)); | |
1035 | ||
1036 | if (dsl_dir_phys(origin->ds_dir)->dd_clones == 0) { | |
1037 | dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx); | |
1038 | dsl_dir_phys(origin->ds_dir)->dd_clones = | |
1039 | zap_create(mos, DMU_OT_DSL_CLONES, DMU_OT_NONE, | |
1040 | 0, tx); | |
1041 | } | |
1042 | ||
1043 | VERIFY0(zap_add_int(dp->dp_meta_objset, | |
1044 | dsl_dir_phys(origin->ds_dir)->dd_clones, | |
1045 | ds->ds_object, tx)); | |
1046 | ||
1047 | dsl_dataset_rele(origin, FTAG); | |
1048 | } | |
1049 | return (0); | |
1050 | } | |
1051 | ||
1052 | void | |
1053 | dsl_pool_upgrade_dir_clones(dsl_pool_t *dp, dmu_tx_t *tx) | |
1054 | { | |
1055 | uint64_t obj; | |
1056 | ||
1057 | ASSERT(dmu_tx_is_syncing(tx)); | |
1058 | ||
1059 | (void) dsl_dir_create_sync(dp, dp->dp_root_dir, FREE_DIR_NAME, tx); | |
1060 | VERIFY0(dsl_pool_open_special_dir(dp, | |
1061 | FREE_DIR_NAME, &dp->dp_free_dir)); | |
1062 | ||
1063 | /* | |
1064 | * We can't use bpobj_alloc(), because spa_version() still | |
1065 | * returns the old version, and we need a new-version bpobj with | |
1066 | * subobj support. So call dmu_object_alloc() directly. | |
1067 | */ | |
1068 | obj = dmu_object_alloc(dp->dp_meta_objset, DMU_OT_BPOBJ, | |
1069 | SPA_OLD_MAXBLOCKSIZE, DMU_OT_BPOBJ_HDR, sizeof (bpobj_phys_t), tx); | |
1070 | VERIFY0(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, | |
1071 | DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx)); | |
1072 | VERIFY0(bpobj_open(&dp->dp_free_bpobj, dp->dp_meta_objset, obj)); | |
1073 | ||
1074 | VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, | |
1075 | upgrade_dir_clones_cb, tx, DS_FIND_CHILDREN | DS_FIND_SERIALIZE)); | |
1076 | } | |
1077 | ||
1078 | void | |
1079 | dsl_pool_create_origin(dsl_pool_t *dp, dmu_tx_t *tx) | |
1080 | { | |
1081 | uint64_t dsobj; | |
1082 | dsl_dataset_t *ds; | |
1083 | ||
1084 | ASSERT(dmu_tx_is_syncing(tx)); | |
1085 | ASSERT(dp->dp_origin_snap == NULL); | |
1086 | ASSERT(rrw_held(&dp->dp_config_rwlock, RW_WRITER)); | |
1087 | ||
1088 | /* create the origin dir, ds, & snap-ds */ | |
1089 | dsobj = dsl_dataset_create_sync(dp->dp_root_dir, ORIGIN_DIR_NAME, | |
1090 | NULL, 0, kcred, NULL, tx); | |
1091 | VERIFY0(dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); | |
1092 | dsl_dataset_snapshot_sync_impl(ds, ORIGIN_DIR_NAME, tx); | |
1093 | VERIFY0(dsl_dataset_hold_obj(dp, dsl_dataset_phys(ds)->ds_prev_snap_obj, | |
1094 | dp, &dp->dp_origin_snap)); | |
1095 | dsl_dataset_rele(ds, FTAG); | |
1096 | } | |
1097 | ||
1098 | taskq_t * | |
1099 | dsl_pool_iput_taskq(dsl_pool_t *dp) | |
1100 | { | |
1101 | return (dp->dp_iput_taskq); | |
1102 | } | |
1103 | ||
1104 | taskq_t * | |
1105 | dsl_pool_unlinked_drain_taskq(dsl_pool_t *dp) | |
1106 | { | |
1107 | return (dp->dp_unlinked_drain_taskq); | |
1108 | } | |
1109 | ||
1110 | /* | |
1111 | * Walk through the pool-wide zap object of temporary snapshot user holds | |
1112 | * and release them. | |
1113 | */ | |
1114 | void | |
1115 | dsl_pool_clean_tmp_userrefs(dsl_pool_t *dp) | |
1116 | { | |
1117 | zap_attribute_t za; | |
1118 | zap_cursor_t zc; | |
1119 | objset_t *mos = dp->dp_meta_objset; | |
1120 | uint64_t zapobj = dp->dp_tmp_userrefs_obj; | |
1121 | nvlist_t *holds; | |
1122 | ||
1123 | if (zapobj == 0) | |
1124 | return; | |
1125 | ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS); | |
1126 | ||
1127 | holds = fnvlist_alloc(); | |
1128 | ||
1129 | for (zap_cursor_init(&zc, mos, zapobj); | |
1130 | zap_cursor_retrieve(&zc, &za) == 0; | |
1131 | zap_cursor_advance(&zc)) { | |
1132 | char *htag; | |
1133 | nvlist_t *tags; | |
1134 | ||
1135 | htag = strchr(za.za_name, '-'); | |
1136 | *htag = '\0'; | |
1137 | ++htag; | |
1138 | if (nvlist_lookup_nvlist(holds, za.za_name, &tags) != 0) { | |
1139 | tags = fnvlist_alloc(); | |
1140 | fnvlist_add_boolean(tags, htag); | |
1141 | fnvlist_add_nvlist(holds, za.za_name, tags); | |
1142 | fnvlist_free(tags); | |
1143 | } else { | |
1144 | fnvlist_add_boolean(tags, htag); | |
1145 | } | |
1146 | } | |
1147 | dsl_dataset_user_release_tmp(dp, holds); | |
1148 | fnvlist_free(holds); | |
1149 | zap_cursor_fini(&zc); | |
1150 | } | |
1151 | ||
1152 | /* | |
1153 | * Create the pool-wide zap object for storing temporary snapshot holds. | |
1154 | */ | |
1155 | void | |
1156 | dsl_pool_user_hold_create_obj(dsl_pool_t *dp, dmu_tx_t *tx) | |
1157 | { | |
1158 | objset_t *mos = dp->dp_meta_objset; | |
1159 | ||
1160 | ASSERT(dp->dp_tmp_userrefs_obj == 0); | |
1161 | ASSERT(dmu_tx_is_syncing(tx)); | |
1162 | ||
1163 | dp->dp_tmp_userrefs_obj = zap_create_link(mos, DMU_OT_USERREFS, | |
1164 | DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_TMP_USERREFS, tx); | |
1165 | } | |
1166 | ||
1167 | static int | |
1168 | dsl_pool_user_hold_rele_impl(dsl_pool_t *dp, uint64_t dsobj, | |
1169 | const char *tag, uint64_t now, dmu_tx_t *tx, boolean_t holding) | |
1170 | { | |
1171 | objset_t *mos = dp->dp_meta_objset; | |
1172 | uint64_t zapobj = dp->dp_tmp_userrefs_obj; | |
1173 | char *name; | |
1174 | int error; | |
1175 | ||
1176 | ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS); | |
1177 | ASSERT(dmu_tx_is_syncing(tx)); | |
1178 | ||
1179 | /* | |
1180 | * If the pool was created prior to SPA_VERSION_USERREFS, the | |
1181 | * zap object for temporary holds might not exist yet. | |
1182 | */ | |
1183 | if (zapobj == 0) { | |
1184 | if (holding) { | |
1185 | dsl_pool_user_hold_create_obj(dp, tx); | |
1186 | zapobj = dp->dp_tmp_userrefs_obj; | |
1187 | } else { | |
1188 | return (SET_ERROR(ENOENT)); | |
1189 | } | |
1190 | } | |
1191 | ||
1192 | name = kmem_asprintf("%llx-%s", (u_longlong_t)dsobj, tag); | |
1193 | if (holding) | |
1194 | error = zap_add(mos, zapobj, name, 8, 1, &now, tx); | |
1195 | else | |
1196 | error = zap_remove(mos, zapobj, name, tx); | |
1197 | strfree(name); | |
1198 | ||
1199 | return (error); | |
1200 | } | |
1201 | ||
1202 | /* | |
1203 | * Add a temporary hold for the given dataset object and tag. | |
1204 | */ | |
1205 | int | |
1206 | dsl_pool_user_hold(dsl_pool_t *dp, uint64_t dsobj, const char *tag, | |
1207 | uint64_t now, dmu_tx_t *tx) | |
1208 | { | |
1209 | return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, now, tx, B_TRUE)); | |
1210 | } | |
1211 | ||
1212 | /* | |
1213 | * Release a temporary hold for the given dataset object and tag. | |
1214 | */ | |
1215 | int | |
1216 | dsl_pool_user_release(dsl_pool_t *dp, uint64_t dsobj, const char *tag, | |
1217 | dmu_tx_t *tx) | |
1218 | { | |
1219 | return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, 0, | |
1220 | tx, B_FALSE)); | |
1221 | } | |
1222 | ||
1223 | /* | |
1224 | * DSL Pool Configuration Lock | |
1225 | * | |
1226 | * The dp_config_rwlock protects against changes to DSL state (e.g. dataset | |
1227 | * creation / destruction / rename / property setting). It must be held for | |
1228 | * read to hold a dataset or dsl_dir. I.e. you must call | |
1229 | * dsl_pool_config_enter() or dsl_pool_hold() before calling | |
1230 | * dsl_{dataset,dir}_hold{_obj}. In most circumstances, the dp_config_rwlock | |
1231 | * must be held continuously until all datasets and dsl_dirs are released. | |
1232 | * | |
1233 | * The only exception to this rule is that if a "long hold" is placed on | |
1234 | * a dataset, then the dp_config_rwlock may be dropped while the dataset | |
1235 | * is still held. The long hold will prevent the dataset from being | |
1236 | * destroyed -- the destroy will fail with EBUSY. A long hold can be | |
1237 | * obtained by calling dsl_dataset_long_hold(), or by "owning" a dataset | |
1238 | * (by calling dsl_{dataset,objset}_{try}own{_obj}). | |
1239 | * | |
1240 | * Legitimate long-holders (including owners) should be long-running, cancelable | |
1241 | * tasks that should cause "zfs destroy" to fail. This includes DMU | |
1242 | * consumers (i.e. a ZPL filesystem being mounted or ZVOL being open), | |
1243 | * "zfs send", and "zfs diff". There are several other long-holders whose | |
1244 | * uses are suboptimal (e.g. "zfs promote", and zil_suspend()). | |
1245 | * | |
1246 | * The usual formula for long-holding would be: | |
1247 | * dsl_pool_hold() | |
1248 | * dsl_dataset_hold() | |
1249 | * ... perform checks ... | |
1250 | * dsl_dataset_long_hold() | |
1251 | * dsl_pool_rele() | |
1252 | * ... perform long-running task ... | |
1253 | * dsl_dataset_long_rele() | |
1254 | * dsl_dataset_rele() | |
1255 | * | |
1256 | * Note that when the long hold is released, the dataset is still held but | |
1257 | * the pool is not held. The dataset may change arbitrarily during this time | |
1258 | * (e.g. it could be destroyed). Therefore you shouldn't do anything to the | |
1259 | * dataset except release it. | |
1260 | * | |
1261 | * User-initiated operations (e.g. ioctls, zfs_ioc_*()) are either read-only | |
1262 | * or modifying operations. | |
1263 | * | |
1264 | * Modifying operations should generally use dsl_sync_task(). The synctask | |
1265 | * infrastructure enforces proper locking strategy with respect to the | |
1266 | * dp_config_rwlock. See the comment above dsl_sync_task() for details. | |
1267 | * | |
1268 | * Read-only operations will manually hold the pool, then the dataset, obtain | |
1269 | * information from the dataset, then release the pool and dataset. | |
1270 | * dmu_objset_{hold,rele}() are convenience routines that also do the pool | |
1271 | * hold/rele. | |
1272 | */ | |
1273 | ||
1274 | int | |
1275 | dsl_pool_hold(const char *name, void *tag, dsl_pool_t **dp) | |
1276 | { | |
1277 | spa_t *spa; | |
1278 | int error; | |
1279 | ||
1280 | error = spa_open(name, &spa, tag); | |
1281 | if (error == 0) { | |
1282 | *dp = spa_get_dsl(spa); | |
1283 | dsl_pool_config_enter(*dp, tag); | |
1284 | } | |
1285 | return (error); | |
1286 | } | |
1287 | ||
1288 | void | |
1289 | dsl_pool_rele(dsl_pool_t *dp, void *tag) | |
1290 | { | |
1291 | dsl_pool_config_exit(dp, tag); | |
1292 | spa_close(dp->dp_spa, tag); | |
1293 | } | |
1294 | ||
1295 | void | |
1296 | dsl_pool_config_enter(dsl_pool_t *dp, void *tag) | |
1297 | { | |
1298 | /* | |
1299 | * We use a "reentrant" reader-writer lock, but not reentrantly. | |
1300 | * | |
1301 | * The rrwlock can (with the track_all flag) track all reading threads, | |
1302 | * which is very useful for debugging which code path failed to release | |
1303 | * the lock, and for verifying that the *current* thread does hold | |
1304 | * the lock. | |
1305 | * | |
1306 | * (Unlike a rwlock, which knows that N threads hold it for | |
1307 | * read, but not *which* threads, so rw_held(RW_READER) returns TRUE | |
1308 | * if any thread holds it for read, even if this thread doesn't). | |
1309 | */ | |
1310 | ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER)); | |
1311 | rrw_enter(&dp->dp_config_rwlock, RW_READER, tag); | |
1312 | } | |
1313 | ||
1314 | void | |
1315 | dsl_pool_config_enter_prio(dsl_pool_t *dp, void *tag) | |
1316 | { | |
1317 | ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER)); | |
1318 | rrw_enter_read_prio(&dp->dp_config_rwlock, tag); | |
1319 | } | |
1320 | ||
1321 | void | |
1322 | dsl_pool_config_exit(dsl_pool_t *dp, void *tag) | |
1323 | { | |
1324 | rrw_exit(&dp->dp_config_rwlock, tag); | |
1325 | } | |
1326 | ||
1327 | boolean_t | |
1328 | dsl_pool_config_held(dsl_pool_t *dp) | |
1329 | { | |
1330 | return (RRW_LOCK_HELD(&dp->dp_config_rwlock)); | |
1331 | } | |
1332 | ||
1333 | boolean_t | |
1334 | dsl_pool_config_held_writer(dsl_pool_t *dp) | |
1335 | { | |
1336 | return (RRW_WRITE_HELD(&dp->dp_config_rwlock)); | |
1337 | } | |
1338 | ||
1339 | #if defined(_KERNEL) | |
1340 | EXPORT_SYMBOL(dsl_pool_config_enter); | |
1341 | EXPORT_SYMBOL(dsl_pool_config_exit); | |
1342 | ||
1343 | /* BEGIN CSTYLED */ | |
1344 | /* zfs_dirty_data_max_percent only applied at module load in arc_init(). */ | |
1345 | module_param(zfs_dirty_data_max_percent, int, 0444); | |
1346 | MODULE_PARM_DESC(zfs_dirty_data_max_percent, "percent of ram can be dirty"); | |
1347 | ||
1348 | /* zfs_dirty_data_max_max_percent only applied at module load in arc_init(). */ | |
1349 | module_param(zfs_dirty_data_max_max_percent, int, 0444); | |
1350 | MODULE_PARM_DESC(zfs_dirty_data_max_max_percent, | |
1351 | "zfs_dirty_data_max upper bound as % of RAM"); | |
1352 | ||
1353 | module_param(zfs_delay_min_dirty_percent, int, 0644); | |
1354 | MODULE_PARM_DESC(zfs_delay_min_dirty_percent, "transaction delay threshold"); | |
1355 | ||
1356 | module_param(zfs_dirty_data_max, ulong, 0644); | |
1357 | MODULE_PARM_DESC(zfs_dirty_data_max, "determines the dirty space limit"); | |
1358 | ||
1359 | /* zfs_dirty_data_max_max only applied at module load in arc_init(). */ | |
1360 | module_param(zfs_dirty_data_max_max, ulong, 0444); | |
1361 | MODULE_PARM_DESC(zfs_dirty_data_max_max, | |
1362 | "zfs_dirty_data_max upper bound in bytes"); | |
1363 | ||
1364 | module_param(zfs_dirty_data_sync_percent, int, 0644); | |
1365 | MODULE_PARM_DESC(zfs_dirty_data_sync_percent, | |
1366 | "dirty data txg sync threshold as a percentage of zfs_dirty_data_max"); | |
1367 | ||
1368 | module_param(zfs_delay_scale, ulong, 0644); | |
1369 | MODULE_PARM_DESC(zfs_delay_scale, "how quickly delay approaches infinity"); | |
1370 | ||
1371 | module_param(zfs_sync_taskq_batch_pct, int, 0644); | |
1372 | MODULE_PARM_DESC(zfs_sync_taskq_batch_pct, | |
1373 | "max percent of CPUs that are used to sync dirty data"); | |
1374 | ||
1375 | module_param(zfs_zil_clean_taskq_nthr_pct, int, 0644); | |
1376 | MODULE_PARM_DESC(zfs_zil_clean_taskq_nthr_pct, | |
1377 | "max percent of CPUs that are used per dp_sync_taskq"); | |
1378 | ||
1379 | module_param(zfs_zil_clean_taskq_minalloc, int, 0644); | |
1380 | MODULE_PARM_DESC(zfs_zil_clean_taskq_minalloc, | |
1381 | "number of taskq entries that are pre-populated"); | |
1382 | ||
1383 | module_param(zfs_zil_clean_taskq_maxalloc, int, 0644); | |
1384 | MODULE_PARM_DESC(zfs_zil_clean_taskq_maxalloc, | |
1385 | "max number of taskq entries that are cached"); | |
1386 | ||
1387 | /* END CSTYLED */ | |
1388 | #endif |