4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
26 #include <sys/dmu_objset.h>
27 #include <sys/dmu_tx.h>
28 #include <sys/dsl_dataset.h>
29 #include <sys/dsl_dir.h>
30 #include <sys/dsl_prop.h>
31 #include <sys/dsl_synctask.h>
32 #include <sys/dsl_deleg.h>
34 #include <sys/metaslab.h>
38 #include <sys/sunddi.h>
39 #include "zfs_namecheck.h"
41 static uint64_t dsl_dir_space_towrite(dsl_dir_t
*dd
);
42 static void dsl_dir_set_reservation_sync(void *arg1
, void *arg2
, dmu_tx_t
*tx
);
47 dsl_dir_evict(dmu_buf_t
*db
, void *arg
)
50 ASSERTV(dsl_pool_t
*dp
= dd
->dd_pool
;)
53 for (t
= 0; t
< TXG_SIZE
; t
++) {
54 ASSERT(!txg_list_member(&dp
->dp_dirty_dirs
, dd
, t
));
55 ASSERT(dd
->dd_tempreserved
[t
] == 0);
56 ASSERT(dd
->dd_space_towrite
[t
] == 0);
60 dsl_dir_close(dd
->dd_parent
, dd
);
62 spa_close(dd
->dd_pool
->dp_spa
, dd
);
65 * The props callback list should have been cleaned up by
68 list_destroy(&dd
->dd_prop_cbs
);
69 mutex_destroy(&dd
->dd_lock
);
70 kmem_free(dd
, sizeof (dsl_dir_t
));
74 dsl_dir_open_obj(dsl_pool_t
*dp
, uint64_t ddobj
,
75 const char *tail
, void *tag
, dsl_dir_t
**ddp
)
81 ASSERT(RW_LOCK_HELD(&dp
->dp_config_rwlock
) ||
82 dsl_pool_sync_context(dp
));
84 err
= dmu_bonus_hold(dp
->dp_meta_objset
, ddobj
, tag
, &dbuf
);
87 dd
= dmu_buf_get_user(dbuf
);
90 dmu_object_info_t doi
;
91 dmu_object_info_from_db(dbuf
, &doi
);
92 ASSERT3U(doi
.doi_type
, ==, DMU_OT_DSL_DIR
);
93 ASSERT3U(doi
.doi_bonus_size
, >=, sizeof (dsl_dir_phys_t
));
99 dd
= kmem_zalloc(sizeof (dsl_dir_t
), KM_SLEEP
);
100 dd
->dd_object
= ddobj
;
103 dd
->dd_phys
= dbuf
->db_data
;
104 mutex_init(&dd
->dd_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
106 list_create(&dd
->dd_prop_cbs
, sizeof (dsl_prop_cb_record_t
),
107 offsetof(dsl_prop_cb_record_t
, cbr_node
));
109 dsl_dir_snap_cmtime_update(dd
);
111 if (dd
->dd_phys
->dd_parent_obj
) {
112 err
= dsl_dir_open_obj(dp
, dd
->dd_phys
->dd_parent_obj
,
113 NULL
, dd
, &dd
->dd_parent
);
120 err
= zap_lookup(dp
->dp_meta_objset
,
121 dd
->dd_parent
->dd_phys
->dd_child_dir_zapobj
,
122 tail
, sizeof (foundobj
), 1, &foundobj
);
123 ASSERT(err
|| foundobj
== ddobj
);
125 (void) strcpy(dd
->dd_myname
, tail
);
127 err
= zap_value_search(dp
->dp_meta_objset
,
128 dd
->dd_parent
->dd_phys
->dd_child_dir_zapobj
,
129 ddobj
, 0, dd
->dd_myname
);
134 (void) strcpy(dd
->dd_myname
, spa_name(dp
->dp_spa
));
137 if (dsl_dir_is_clone(dd
)) {
138 dmu_buf_t
*origin_bonus
;
139 dsl_dataset_phys_t
*origin_phys
;
142 * We can't open the origin dataset, because
143 * that would require opening this dsl_dir.
144 * Just look at its phys directly instead.
146 err
= dmu_bonus_hold(dp
->dp_meta_objset
,
147 dd
->dd_phys
->dd_origin_obj
, FTAG
, &origin_bonus
);
150 origin_phys
= origin_bonus
->db_data
;
152 origin_phys
->ds_creation_txg
;
153 dmu_buf_rele(origin_bonus
, FTAG
);
156 winner
= dmu_buf_set_user_ie(dbuf
, dd
, &dd
->dd_phys
,
160 dsl_dir_close(dd
->dd_parent
, dd
);
161 mutex_destroy(&dd
->dd_lock
);
162 kmem_free(dd
, sizeof (dsl_dir_t
));
165 spa_open_ref(dp
->dp_spa
, dd
);
170 * The dsl_dir_t has both open-to-close and instantiate-to-evict
171 * holds on the spa. We need the open-to-close holds because
172 * otherwise the spa_refcnt wouldn't change when we open a
173 * dir which the spa also has open, so we could incorrectly
174 * think it was OK to unload/export/destroy the pool. We need
175 * the instantiate-to-evict hold because the dsl_dir_t has a
176 * pointer to the dd_pool, which has a pointer to the spa_t.
178 spa_open_ref(dp
->dp_spa
, tag
);
179 ASSERT3P(dd
->dd_pool
, ==, dp
);
180 ASSERT3U(dd
->dd_object
, ==, ddobj
);
181 ASSERT3P(dd
->dd_dbuf
, ==, dbuf
);
187 dsl_dir_close(dd
->dd_parent
, dd
);
188 mutex_destroy(&dd
->dd_lock
);
189 kmem_free(dd
, sizeof (dsl_dir_t
));
190 dmu_buf_rele(dbuf
, tag
);
196 dsl_dir_close(dsl_dir_t
*dd
, void *tag
)
198 dprintf_dd(dd
, "%s\n", "");
199 spa_close(dd
->dd_pool
->dp_spa
, tag
);
200 dmu_buf_rele(dd
->dd_dbuf
, tag
);
203 /* buf must be long enough (MAXNAMELEN + strlen(MOS_DIR_NAME) + 1 should do) */
205 dsl_dir_name(dsl_dir_t
*dd
, char *buf
)
208 dsl_dir_name(dd
->dd_parent
, buf
);
209 (void) strcat(buf
, "/");
213 if (!MUTEX_HELD(&dd
->dd_lock
)) {
215 * recursive mutex so that we can use
216 * dprintf_dd() with dd_lock held
218 mutex_enter(&dd
->dd_lock
);
219 (void) strcat(buf
, dd
->dd_myname
);
220 mutex_exit(&dd
->dd_lock
);
222 (void) strcat(buf
, dd
->dd_myname
);
226 /* Calculate name legnth, avoiding all the strcat calls of dsl_dir_name */
228 dsl_dir_namelen(dsl_dir_t
*dd
)
233 /* parent's name + 1 for the "/" */
234 result
= dsl_dir_namelen(dd
->dd_parent
) + 1;
237 if (!MUTEX_HELD(&dd
->dd_lock
)) {
238 /* see dsl_dir_name */
239 mutex_enter(&dd
->dd_lock
);
240 result
+= strlen(dd
->dd_myname
);
241 mutex_exit(&dd
->dd_lock
);
243 result
+= strlen(dd
->dd_myname
);
250 getcomponent(const char *path
, char *component
, const char **nextp
)
253 if ((path
== NULL
) || (path
[0] == '\0'))
255 /* This would be a good place to reserve some namespace... */
256 p
= strpbrk(path
, "/@");
257 if (p
&& (p
[1] == '/' || p
[1] == '@')) {
258 /* two separators in a row */
261 if (p
== NULL
|| p
== path
) {
263 * if the first thing is an @ or /, it had better be an
264 * @ and it had better not have any more ats or slashes,
265 * and it had better have something after the @.
268 (p
[0] != '@' || strpbrk(path
+1, "/@") || p
[1] == '\0'))
270 if (strlen(path
) >= MAXNAMELEN
)
271 return (ENAMETOOLONG
);
272 (void) strcpy(component
, path
);
274 } else if (p
[0] == '/') {
275 if (p
-path
>= MAXNAMELEN
)
276 return (ENAMETOOLONG
);
277 (void) strncpy(component
, path
, p
- path
);
278 component
[p
-path
] = '\0';
280 } else if (p
[0] == '@') {
282 * if the next separator is an @, there better not be
285 if (strchr(path
, '/'))
287 if (p
-path
>= MAXNAMELEN
)
288 return (ENAMETOOLONG
);
289 (void) strncpy(component
, path
, p
- path
);
290 component
[p
-path
] = '\0';
292 ASSERT(!"invalid p");
299 * same as dsl_open_dir, ignore the first component of name and use the
303 dsl_dir_open_spa(spa_t
*spa
, const char *name
, void *tag
,
304 dsl_dir_t
**ddp
, const char **tailp
)
307 const char *next
, *nextnext
= NULL
;
312 int openedspa
= FALSE
;
314 dprintf("%s\n", name
);
316 buf
= kmem_alloc(MAXNAMELEN
, KM_SLEEP
);
317 err
= getcomponent(name
, buf
, &next
);
321 err
= spa_open(buf
, &spa
, FTAG
);
323 dprintf("spa_open(%s) failed\n", buf
);
328 /* XXX this assertion belongs in spa_open */
329 ASSERT(!dsl_pool_sync_context(spa_get_dsl(spa
)));
332 dp
= spa_get_dsl(spa
);
334 rw_enter(&dp
->dp_config_rwlock
, RW_READER
);
335 err
= dsl_dir_open_obj(dp
, dp
->dp_root_dir_obj
, NULL
, tag
, &dd
);
337 rw_exit(&dp
->dp_config_rwlock
);
339 spa_close(spa
, FTAG
);
343 while (next
!= NULL
) {
345 err
= getcomponent(next
, buf
, &nextnext
);
348 ASSERT(next
[0] != '\0');
351 dprintf("looking up %s in obj%lld\n",
352 buf
, dd
->dd_phys
->dd_child_dir_zapobj
);
354 err
= zap_lookup(dp
->dp_meta_objset
,
355 dd
->dd_phys
->dd_child_dir_zapobj
,
356 buf
, sizeof (ddobj
), 1, &ddobj
);
363 err
= dsl_dir_open_obj(dp
, ddobj
, buf
, tag
, &child_ds
);
366 dsl_dir_close(dd
, tag
);
370 rw_exit(&dp
->dp_config_rwlock
);
373 dsl_dir_close(dd
, tag
);
375 spa_close(spa
, FTAG
);
380 * It's an error if there's more than one component left, or
381 * tailp==NULL and there's any component left.
384 (tailp
== NULL
|| (nextnext
&& nextnext
[0] != '\0'))) {
386 dsl_dir_close(dd
, tag
);
387 dprintf("next=%p (%s) tail=%p\n", next
, next
?next
:"", tailp
);
393 spa_close(spa
, FTAG
);
396 kmem_free(buf
, MAXNAMELEN
);
401 * Return the dsl_dir_t, and possibly the last component which couldn't
402 * be found in *tail. Return NULL if the path is bogus, or if
403 * tail==NULL and we couldn't parse the whole name. (*tail)[0] == '@'
404 * means that the last component is a snapshot.
407 dsl_dir_open(const char *name
, void *tag
, dsl_dir_t
**ddp
, const char **tailp
)
409 return (dsl_dir_open_spa(NULL
, name
, tag
, ddp
, tailp
));
413 dsl_dir_create_sync(dsl_pool_t
*dp
, dsl_dir_t
*pds
, const char *name
,
416 objset_t
*mos
= dp
->dp_meta_objset
;
418 dsl_dir_phys_t
*ddphys
;
421 ddobj
= dmu_object_alloc(mos
, DMU_OT_DSL_DIR
, 0,
422 DMU_OT_DSL_DIR
, sizeof (dsl_dir_phys_t
), tx
);
424 VERIFY(0 == zap_add(mos
, pds
->dd_phys
->dd_child_dir_zapobj
,
425 name
, sizeof (uint64_t), 1, &ddobj
, tx
));
427 /* it's the root dir */
428 VERIFY(0 == zap_add(mos
, DMU_POOL_DIRECTORY_OBJECT
,
429 DMU_POOL_ROOT_DATASET
, sizeof (uint64_t), 1, &ddobj
, tx
));
431 VERIFY(0 == dmu_bonus_hold(mos
, ddobj
, FTAG
, &dbuf
));
432 dmu_buf_will_dirty(dbuf
, tx
);
433 ddphys
= dbuf
->db_data
;
435 ddphys
->dd_creation_time
= gethrestime_sec();
437 ddphys
->dd_parent_obj
= pds
->dd_object
;
438 ddphys
->dd_props_zapobj
= zap_create(mos
,
439 DMU_OT_DSL_PROPS
, DMU_OT_NONE
, 0, tx
);
440 ddphys
->dd_child_dir_zapobj
= zap_create(mos
,
441 DMU_OT_DSL_DIR_CHILD_MAP
, DMU_OT_NONE
, 0, tx
);
442 if (spa_version(dp
->dp_spa
) >= SPA_VERSION_USED_BREAKDOWN
)
443 ddphys
->dd_flags
|= DD_FLAG_USED_BREAKDOWN
;
444 dmu_buf_rele(dbuf
, FTAG
);
451 dsl_dir_destroy_check(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
453 dsl_dataset_t
*ds
= arg1
;
454 dsl_dir_t
*dd
= ds
->ds_dir
;
455 dsl_pool_t
*dp
= dd
->dd_pool
;
456 objset_t
*mos
= dp
->dp_meta_objset
;
461 * There should be exactly two holds, both from
462 * dsl_dataset_destroy: one on the dd directory, and one on its
463 * head ds. Otherwise, someone is trying to lookup something
464 * inside this dir while we want to destroy it. The
465 * config_rwlock ensures that nobody else opens it after we
468 if (dmu_buf_refcount(dd
->dd_dbuf
) > 2)
471 err
= zap_count(mos
, dd
->dd_phys
->dd_child_dir_zapobj
, &count
);
481 dsl_dir_destroy_sync(void *arg1
, void *tag
, dmu_tx_t
*tx
)
483 dsl_dataset_t
*ds
= arg1
;
484 dsl_dir_t
*dd
= ds
->ds_dir
;
485 objset_t
*mos
= dd
->dd_pool
->dp_meta_objset
;
486 dsl_prop_setarg_t psa
;
491 ASSERT(RW_WRITE_HELD(&dd
->dd_pool
->dp_config_rwlock
));
492 ASSERT(dd
->dd_phys
->dd_head_dataset_obj
== 0);
494 /* Remove our reservation. */
495 dsl_prop_setarg_init_uint64(&psa
, "reservation",
496 (ZPROP_SRC_NONE
| ZPROP_SRC_LOCAL
| ZPROP_SRC_RECEIVED
),
498 psa
.psa_effective_value
= 0; /* predict default value */
500 dsl_dir_set_reservation_sync(ds
, &psa
, tx
);
502 ASSERT3U(dd
->dd_phys
->dd_used_bytes
, ==, 0);
503 ASSERT3U(dd
->dd_phys
->dd_reserved
, ==, 0);
504 for (t
= 0; t
< DD_USED_NUM
; t
++)
505 ASSERT3U(dd
->dd_phys
->dd_used_breakdown
[t
], ==, 0);
507 VERIFY(0 == zap_destroy(mos
, dd
->dd_phys
->dd_child_dir_zapobj
, tx
));
508 VERIFY(0 == zap_destroy(mos
, dd
->dd_phys
->dd_props_zapobj
, tx
));
509 VERIFY(0 == dsl_deleg_destroy(mos
, dd
->dd_phys
->dd_deleg_zapobj
, tx
));
510 VERIFY(0 == zap_remove(mos
,
511 dd
->dd_parent
->dd_phys
->dd_child_dir_zapobj
, dd
->dd_myname
, tx
));
514 dsl_dir_close(dd
, tag
);
515 VERIFY(0 == dmu_object_free(mos
, obj
, tx
));
519 dsl_dir_is_clone(dsl_dir_t
*dd
)
521 return (dd
->dd_phys
->dd_origin_obj
&&
522 (dd
->dd_pool
->dp_origin_snap
== NULL
||
523 dd
->dd_phys
->dd_origin_obj
!=
524 dd
->dd_pool
->dp_origin_snap
->ds_object
));
528 dsl_dir_stats(dsl_dir_t
*dd
, nvlist_t
*nv
)
530 mutex_enter(&dd
->dd_lock
);
531 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_USED
,
532 dd
->dd_phys
->dd_used_bytes
);
533 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_QUOTA
, dd
->dd_phys
->dd_quota
);
534 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_RESERVATION
,
535 dd
->dd_phys
->dd_reserved
);
536 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_COMPRESSRATIO
,
537 dd
->dd_phys
->dd_compressed_bytes
== 0 ? 100 :
538 (dd
->dd_phys
->dd_uncompressed_bytes
* 100 /
539 dd
->dd_phys
->dd_compressed_bytes
));
540 if (dd
->dd_phys
->dd_flags
& DD_FLAG_USED_BREAKDOWN
) {
541 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_USEDSNAP
,
542 dd
->dd_phys
->dd_used_breakdown
[DD_USED_SNAP
]);
543 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_USEDDS
,
544 dd
->dd_phys
->dd_used_breakdown
[DD_USED_HEAD
]);
545 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_USEDREFRESERV
,
546 dd
->dd_phys
->dd_used_breakdown
[DD_USED_REFRSRV
]);
547 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_USEDCHILD
,
548 dd
->dd_phys
->dd_used_breakdown
[DD_USED_CHILD
] +
549 dd
->dd_phys
->dd_used_breakdown
[DD_USED_CHILD_RSRV
]);
551 mutex_exit(&dd
->dd_lock
);
553 rw_enter(&dd
->dd_pool
->dp_config_rwlock
, RW_READER
);
554 if (dsl_dir_is_clone(dd
)) {
556 char buf
[MAXNAMELEN
];
558 VERIFY(0 == dsl_dataset_hold_obj(dd
->dd_pool
,
559 dd
->dd_phys
->dd_origin_obj
, FTAG
, &ds
));
560 dsl_dataset_name(ds
, buf
);
561 dsl_dataset_rele(ds
, FTAG
);
562 dsl_prop_nvlist_add_string(nv
, ZFS_PROP_ORIGIN
, buf
);
564 rw_exit(&dd
->dd_pool
->dp_config_rwlock
);
568 dsl_dir_dirty(dsl_dir_t
*dd
, dmu_tx_t
*tx
)
570 dsl_pool_t
*dp
= dd
->dd_pool
;
574 if (txg_list_add(&dp
->dp_dirty_dirs
, dd
, tx
->tx_txg
) == 0) {
575 /* up the hold count until we can be written out */
576 dmu_buf_add_ref(dd
->dd_dbuf
, dd
);
581 parent_delta(dsl_dir_t
*dd
, uint64_t used
, int64_t delta
)
583 uint64_t old_accounted
= MAX(used
, dd
->dd_phys
->dd_reserved
);
584 uint64_t new_accounted
= MAX(used
+ delta
, dd
->dd_phys
->dd_reserved
);
585 return (new_accounted
- old_accounted
);
589 dsl_dir_sync(dsl_dir_t
*dd
, dmu_tx_t
*tx
)
591 ASSERT(dmu_tx_is_syncing(tx
));
593 dmu_buf_will_dirty(dd
->dd_dbuf
, tx
);
595 mutex_enter(&dd
->dd_lock
);
596 ASSERT3U(dd
->dd_tempreserved
[tx
->tx_txg
&TXG_MASK
], ==, 0);
597 dprintf_dd(dd
, "txg=%llu towrite=%lluK\n", tx
->tx_txg
,
598 dd
->dd_space_towrite
[tx
->tx_txg
&TXG_MASK
] / 1024);
599 dd
->dd_space_towrite
[tx
->tx_txg
&TXG_MASK
] = 0;
600 mutex_exit(&dd
->dd_lock
);
602 /* release the hold from dsl_dir_dirty */
603 dmu_buf_rele(dd
->dd_dbuf
, dd
);
607 dsl_dir_space_towrite(dsl_dir_t
*dd
)
612 ASSERT(MUTEX_HELD(&dd
->dd_lock
));
614 for (i
= 0; i
< TXG_SIZE
; i
++) {
615 space
+= dd
->dd_space_towrite
[i
&TXG_MASK
];
616 ASSERT3U(dd
->dd_space_towrite
[i
&TXG_MASK
], >=, 0);
622 * How much space would dd have available if ancestor had delta applied
623 * to it? If ondiskonly is set, we're only interested in what's
624 * on-disk, not estimated pending changes.
627 dsl_dir_space_available(dsl_dir_t
*dd
,
628 dsl_dir_t
*ancestor
, int64_t delta
, int ondiskonly
)
630 uint64_t parentspace
, myspace
, quota
, used
;
633 * If there are no restrictions otherwise, assume we have
634 * unlimited space available.
637 parentspace
= UINT64_MAX
;
639 if (dd
->dd_parent
!= NULL
) {
640 parentspace
= dsl_dir_space_available(dd
->dd_parent
,
641 ancestor
, delta
, ondiskonly
);
644 mutex_enter(&dd
->dd_lock
);
645 if (dd
->dd_phys
->dd_quota
!= 0)
646 quota
= dd
->dd_phys
->dd_quota
;
647 used
= dd
->dd_phys
->dd_used_bytes
;
649 used
+= dsl_dir_space_towrite(dd
);
651 if (dd
->dd_parent
== NULL
) {
652 uint64_t poolsize
= dsl_pool_adjustedsize(dd
->dd_pool
, FALSE
);
653 quota
= MIN(quota
, poolsize
);
656 if (dd
->dd_phys
->dd_reserved
> used
&& parentspace
!= UINT64_MAX
) {
658 * We have some space reserved, in addition to what our
661 parentspace
+= dd
->dd_phys
->dd_reserved
- used
;
664 if (dd
== ancestor
) {
666 ASSERT(used
>= -delta
);
668 if (parentspace
!= UINT64_MAX
)
669 parentspace
-= delta
;
677 * the lesser of the space provided by our parent and
678 * the space left in our quota
680 myspace
= MIN(parentspace
, quota
- used
);
683 mutex_exit(&dd
->dd_lock
);
696 dsl_dir_tempreserve_impl(dsl_dir_t
*dd
, uint64_t asize
, boolean_t netfree
,
697 boolean_t ignorequota
, boolean_t checkrefquota
, list_t
*tr_list
,
698 dmu_tx_t
*tx
, boolean_t first
)
700 uint64_t txg
= tx
->tx_txg
;
701 uint64_t est_inflight
, used_on_disk
, quota
, parent_rsrv
;
702 uint64_t deferred
= 0;
703 struct tempreserve
*tr
;
705 int txgidx
= txg
& TXG_MASK
;
707 uint64_t ref_rsrv
= 0;
709 ASSERT3U(txg
, !=, 0);
710 ASSERT3S(asize
, >, 0);
712 mutex_enter(&dd
->dd_lock
);
715 * Check against the dsl_dir's quota. We don't add in the delta
716 * when checking for over-quota because they get one free hit.
718 est_inflight
= dsl_dir_space_towrite(dd
);
719 for (i
= 0; i
< TXG_SIZE
; i
++)
720 est_inflight
+= dd
->dd_tempreserved
[i
];
721 used_on_disk
= dd
->dd_phys
->dd_used_bytes
;
724 * On the first iteration, fetch the dataset's used-on-disk and
725 * refreservation values. Also, if checkrefquota is set, test if
726 * allocating this space would exceed the dataset's refquota.
728 if (first
&& tx
->tx_objset
) {
730 dsl_dataset_t
*ds
= tx
->tx_objset
->os_dsl_dataset
;
732 error
= dsl_dataset_check_quota(ds
, checkrefquota
,
733 asize
, est_inflight
, &used_on_disk
, &ref_rsrv
);
735 mutex_exit(&dd
->dd_lock
);
741 * If this transaction will result in a net free of space,
742 * we want to let it through.
744 if (ignorequota
|| netfree
|| dd
->dd_phys
->dd_quota
== 0)
747 quota
= dd
->dd_phys
->dd_quota
;
750 * Adjust the quota against the actual pool size at the root
751 * minus any outstanding deferred frees.
752 * To ensure that it's possible to remove files from a full
753 * pool without inducing transient overcommits, we throttle
754 * netfree transactions against a quota that is slightly larger,
755 * but still within the pool's allocation slop. In cases where
756 * we're very close to full, this will allow a steady trickle of
757 * removes to get through.
759 if (dd
->dd_parent
== NULL
) {
760 spa_t
*spa
= dd
->dd_pool
->dp_spa
;
761 uint64_t poolsize
= dsl_pool_adjustedsize(dd
->dd_pool
, netfree
);
762 deferred
= metaslab_class_get_deferred(spa_normal_class(spa
));
763 if (poolsize
- deferred
< quota
) {
764 quota
= poolsize
- deferred
;
770 * If they are requesting more space, and our current estimate
771 * is over quota, they get to try again unless the actual
772 * on-disk is over quota and there are no pending changes (which
773 * may free up space for us).
775 if (used_on_disk
+ est_inflight
>= quota
) {
776 if (est_inflight
> 0 || used_on_disk
< quota
||
777 (retval
== ENOSPC
&& used_on_disk
< quota
+ deferred
))
779 dprintf_dd(dd
, "failing: used=%lluK inflight = %lluK "
780 "quota=%lluK tr=%lluK err=%d\n",
781 used_on_disk
>>10, est_inflight
>>10,
782 quota
>>10, asize
>>10, retval
);
783 mutex_exit(&dd
->dd_lock
);
787 /* We need to up our estimated delta before dropping dd_lock */
788 dd
->dd_tempreserved
[txgidx
] += asize
;
790 parent_rsrv
= parent_delta(dd
, used_on_disk
+ est_inflight
,
792 mutex_exit(&dd
->dd_lock
);
794 tr
= kmem_zalloc(sizeof (struct tempreserve
), KM_SLEEP
);
797 list_insert_tail(tr_list
, tr
);
799 /* see if it's OK with our parent */
800 if (dd
->dd_parent
&& parent_rsrv
) {
801 boolean_t ismos
= (dd
->dd_phys
->dd_head_dataset_obj
== 0);
803 return (dsl_dir_tempreserve_impl(dd
->dd_parent
,
804 parent_rsrv
, netfree
, ismos
, TRUE
, tr_list
, tx
, FALSE
));
811 * Reserve space in this dsl_dir, to be used in this tx's txg.
812 * After the space has been dirtied (and dsl_dir_willuse_space()
813 * has been called), the reservation should be canceled, using
814 * dsl_dir_tempreserve_clear().
817 dsl_dir_tempreserve_space(dsl_dir_t
*dd
, uint64_t lsize
, uint64_t asize
,
818 uint64_t fsize
, uint64_t usize
, void **tr_cookiep
, dmu_tx_t
*tx
)
828 tr_list
= kmem_alloc(sizeof (list_t
), KM_SLEEP
);
829 list_create(tr_list
, sizeof (struct tempreserve
),
830 offsetof(struct tempreserve
, tr_node
));
831 ASSERT3S(asize
, >, 0);
832 ASSERT3S(fsize
, >=, 0);
834 err
= arc_tempreserve_space(lsize
, tx
->tx_txg
);
836 struct tempreserve
*tr
;
838 tr
= kmem_zalloc(sizeof (struct tempreserve
), KM_SLEEP
);
840 list_insert_tail(tr_list
, tr
);
842 err
= dsl_pool_tempreserve_space(dd
->dd_pool
, asize
, tx
);
845 txg_delay(dd
->dd_pool
, tx
->tx_txg
, 1);
848 dsl_pool_memory_pressure(dd
->dd_pool
);
852 struct tempreserve
*tr
;
854 tr
= kmem_zalloc(sizeof (struct tempreserve
), KM_SLEEP
);
855 tr
->tr_dp
= dd
->dd_pool
;
857 list_insert_tail(tr_list
, tr
);
859 err
= dsl_dir_tempreserve_impl(dd
, asize
, fsize
>= asize
,
860 FALSE
, asize
> usize
, tr_list
, tx
, TRUE
);
864 dsl_dir_tempreserve_clear(tr_list
, tx
);
866 *tr_cookiep
= tr_list
;
872 * Clear a temporary reservation that we previously made with
873 * dsl_dir_tempreserve_space().
876 dsl_dir_tempreserve_clear(void *tr_cookie
, dmu_tx_t
*tx
)
878 int txgidx
= tx
->tx_txg
& TXG_MASK
;
879 list_t
*tr_list
= tr_cookie
;
880 struct tempreserve
*tr
;
882 ASSERT3U(tx
->tx_txg
, !=, 0);
884 if (tr_cookie
== NULL
)
887 while ((tr
= list_head(tr_list
))) {
889 dsl_pool_tempreserve_clear(tr
->tr_dp
, tr
->tr_size
, tx
);
890 } else if (tr
->tr_ds
) {
891 mutex_enter(&tr
->tr_ds
->dd_lock
);
892 ASSERT3U(tr
->tr_ds
->dd_tempreserved
[txgidx
], >=,
894 tr
->tr_ds
->dd_tempreserved
[txgidx
] -= tr
->tr_size
;
895 mutex_exit(&tr
->tr_ds
->dd_lock
);
897 arc_tempreserve_clear(tr
->tr_size
);
899 list_remove(tr_list
, tr
);
900 kmem_free(tr
, sizeof (struct tempreserve
));
903 kmem_free(tr_list
, sizeof (list_t
));
907 dsl_dir_willuse_space_impl(dsl_dir_t
*dd
, int64_t space
, dmu_tx_t
*tx
)
909 int64_t parent_space
;
912 mutex_enter(&dd
->dd_lock
);
914 dd
->dd_space_towrite
[tx
->tx_txg
& TXG_MASK
] += space
;
916 est_used
= dsl_dir_space_towrite(dd
) + dd
->dd_phys
->dd_used_bytes
;
917 parent_space
= parent_delta(dd
, est_used
, space
);
918 mutex_exit(&dd
->dd_lock
);
920 /* Make sure that we clean up dd_space_to* */
921 dsl_dir_dirty(dd
, tx
);
923 /* XXX this is potentially expensive and unnecessary... */
924 if (parent_space
&& dd
->dd_parent
)
925 dsl_dir_willuse_space_impl(dd
->dd_parent
, parent_space
, tx
);
929 * Call in open context when we think we're going to write/free space,
930 * eg. when dirtying data. Be conservative (ie. OK to write less than
931 * this or free more than this, but don't write more or free less).
934 dsl_dir_willuse_space(dsl_dir_t
*dd
, int64_t space
, dmu_tx_t
*tx
)
936 dsl_pool_willuse_space(dd
->dd_pool
, space
, tx
);
937 dsl_dir_willuse_space_impl(dd
, space
, tx
);
940 /* call from syncing context when we actually write/free space for this dd */
942 dsl_dir_diduse_space(dsl_dir_t
*dd
, dd_used_t type
,
943 int64_t used
, int64_t compressed
, int64_t uncompressed
, dmu_tx_t
*tx
)
945 int64_t accounted_delta
;
946 boolean_t needlock
= !MUTEX_HELD(&dd
->dd_lock
);
948 ASSERT(dmu_tx_is_syncing(tx
));
949 ASSERT(type
< DD_USED_NUM
);
951 dsl_dir_dirty(dd
, tx
);
954 mutex_enter(&dd
->dd_lock
);
955 accounted_delta
= parent_delta(dd
, dd
->dd_phys
->dd_used_bytes
, used
);
956 ASSERT(used
>= 0 || dd
->dd_phys
->dd_used_bytes
>= -used
);
957 ASSERT(compressed
>= 0 ||
958 dd
->dd_phys
->dd_compressed_bytes
>= -compressed
);
959 ASSERT(uncompressed
>= 0 ||
960 dd
->dd_phys
->dd_uncompressed_bytes
>= -uncompressed
);
961 dd
->dd_phys
->dd_used_bytes
+= used
;
962 dd
->dd_phys
->dd_uncompressed_bytes
+= uncompressed
;
963 dd
->dd_phys
->dd_compressed_bytes
+= compressed
;
965 if (dd
->dd_phys
->dd_flags
& DD_FLAG_USED_BREAKDOWN
) {
967 dd
->dd_phys
->dd_used_breakdown
[type
] >= -used
);
968 dd
->dd_phys
->dd_used_breakdown
[type
] += used
;
973 for (t
= 0; t
< DD_USED_NUM
; t
++)
974 u
+= dd
->dd_phys
->dd_used_breakdown
[t
];
975 ASSERT3U(u
, ==, dd
->dd_phys
->dd_used_bytes
);
980 mutex_exit(&dd
->dd_lock
);
982 if (dd
->dd_parent
!= NULL
) {
983 dsl_dir_diduse_space(dd
->dd_parent
, DD_USED_CHILD
,
984 accounted_delta
, compressed
, uncompressed
, tx
);
985 dsl_dir_transfer_space(dd
->dd_parent
,
986 used
- accounted_delta
,
987 DD_USED_CHILD_RSRV
, DD_USED_CHILD
, tx
);
992 dsl_dir_transfer_space(dsl_dir_t
*dd
, int64_t delta
,
993 dd_used_t oldtype
, dd_used_t newtype
, dmu_tx_t
*tx
)
995 boolean_t needlock
= !MUTEX_HELD(&dd
->dd_lock
);
997 ASSERT(dmu_tx_is_syncing(tx
));
998 ASSERT(oldtype
< DD_USED_NUM
);
999 ASSERT(newtype
< DD_USED_NUM
);
1001 if (delta
== 0 || !(dd
->dd_phys
->dd_flags
& DD_FLAG_USED_BREAKDOWN
))
1004 dsl_dir_dirty(dd
, tx
);
1006 mutex_enter(&dd
->dd_lock
);
1008 dd
->dd_phys
->dd_used_breakdown
[oldtype
] >= delta
:
1009 dd
->dd_phys
->dd_used_breakdown
[newtype
] >= -delta
);
1010 ASSERT(dd
->dd_phys
->dd_used_bytes
>= ABS(delta
));
1011 dd
->dd_phys
->dd_used_breakdown
[oldtype
] -= delta
;
1012 dd
->dd_phys
->dd_used_breakdown
[newtype
] += delta
;
1014 mutex_exit(&dd
->dd_lock
);
1018 dsl_dir_set_quota_check(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
1020 dsl_dataset_t
*ds
= arg1
;
1021 dsl_dir_t
*dd
= ds
->ds_dir
;
1022 dsl_prop_setarg_t
*psa
= arg2
;
1026 if ((err
= dsl_prop_predict_sync(ds
->ds_dir
, psa
)) != 0)
1029 if (psa
->psa_effective_value
== 0)
1032 mutex_enter(&dd
->dd_lock
);
1034 * If we are doing the preliminary check in open context, and
1035 * there are pending changes, then don't fail it, since the
1036 * pending changes could under-estimate the amount of space to be
1039 towrite
= dsl_dir_space_towrite(dd
);
1040 if ((dmu_tx_is_syncing(tx
) || towrite
== 0) &&
1041 (psa
->psa_effective_value
< dd
->dd_phys
->dd_reserved
||
1042 psa
->psa_effective_value
< dd
->dd_phys
->dd_used_bytes
+ towrite
)) {
1045 mutex_exit(&dd
->dd_lock
);
1049 extern dsl_syncfunc_t dsl_prop_set_sync
;
1052 dsl_dir_set_quota_sync(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
1054 dsl_dataset_t
*ds
= arg1
;
1055 dsl_dir_t
*dd
= ds
->ds_dir
;
1056 dsl_prop_setarg_t
*psa
= arg2
;
1057 uint64_t effective_value
= psa
->psa_effective_value
;
1059 dsl_prop_set_sync(ds
, psa
, tx
);
1060 DSL_PROP_CHECK_PREDICTION(dd
, psa
);
1062 dmu_buf_will_dirty(dd
->dd_dbuf
, tx
);
1064 mutex_enter(&dd
->dd_lock
);
1065 dd
->dd_phys
->dd_quota
= effective_value
;
1066 mutex_exit(&dd
->dd_lock
);
1068 spa_history_log_internal(LOG_DS_QUOTA
, dd
->dd_pool
->dp_spa
,
1069 tx
, "%lld dataset = %llu ",
1070 (longlong_t
)effective_value
, dd
->dd_phys
->dd_head_dataset_obj
);
1074 dsl_dir_set_quota(const char *ddname
, zprop_source_t source
, uint64_t quota
)
1078 dsl_prop_setarg_t psa
;
1081 dsl_prop_setarg_init_uint64(&psa
, "quota", source
, "a
);
1083 err
= dsl_dataset_hold(ddname
, FTAG
, &ds
);
1087 err
= dsl_dir_open(ddname
, FTAG
, &dd
, NULL
);
1089 dsl_dataset_rele(ds
, FTAG
);
1093 ASSERT(ds
->ds_dir
== dd
);
1096 * If someone removes a file, then tries to set the quota, we want to
1097 * make sure the file freeing takes effect.
1099 txg_wait_open(dd
->dd_pool
, 0);
1101 err
= dsl_sync_task_do(dd
->dd_pool
, dsl_dir_set_quota_check
,
1102 dsl_dir_set_quota_sync
, ds
, &psa
, 0);
1104 dsl_dir_close(dd
, FTAG
);
1105 dsl_dataset_rele(ds
, FTAG
);
1110 dsl_dir_set_reservation_check(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
1112 dsl_dataset_t
*ds
= arg1
;
1113 dsl_dir_t
*dd
= ds
->ds_dir
;
1114 dsl_prop_setarg_t
*psa
= arg2
;
1115 uint64_t effective_value
;
1116 uint64_t used
, avail
;
1119 if ((err
= dsl_prop_predict_sync(ds
->ds_dir
, psa
)) != 0)
1122 effective_value
= psa
->psa_effective_value
;
1125 * If we are doing the preliminary check in open context, the
1126 * space estimates may be inaccurate.
1128 if (!dmu_tx_is_syncing(tx
))
1131 mutex_enter(&dd
->dd_lock
);
1132 used
= dd
->dd_phys
->dd_used_bytes
;
1133 mutex_exit(&dd
->dd_lock
);
1135 if (dd
->dd_parent
) {
1136 avail
= dsl_dir_space_available(dd
->dd_parent
,
1139 avail
= dsl_pool_adjustedsize(dd
->dd_pool
, B_FALSE
) - used
;
1142 if (MAX(used
, effective_value
) > MAX(used
, dd
->dd_phys
->dd_reserved
)) {
1143 uint64_t delta
= MAX(used
, effective_value
) -
1144 MAX(used
, dd
->dd_phys
->dd_reserved
);
1148 if (dd
->dd_phys
->dd_quota
> 0 &&
1149 effective_value
> dd
->dd_phys
->dd_quota
)
1157 dsl_dir_set_reservation_sync(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
1159 dsl_dataset_t
*ds
= arg1
;
1160 dsl_dir_t
*dd
= ds
->ds_dir
;
1161 dsl_prop_setarg_t
*psa
= arg2
;
1162 uint64_t effective_value
= psa
->psa_effective_value
;
1166 dsl_prop_set_sync(ds
, psa
, tx
);
1167 DSL_PROP_CHECK_PREDICTION(dd
, psa
);
1169 dmu_buf_will_dirty(dd
->dd_dbuf
, tx
);
1171 mutex_enter(&dd
->dd_lock
);
1172 used
= dd
->dd_phys
->dd_used_bytes
;
1173 delta
= MAX(used
, effective_value
) -
1174 MAX(used
, dd
->dd_phys
->dd_reserved
);
1175 dd
->dd_phys
->dd_reserved
= effective_value
;
1177 if (dd
->dd_parent
!= NULL
) {
1178 /* Roll up this additional usage into our ancestors */
1179 dsl_dir_diduse_space(dd
->dd_parent
, DD_USED_CHILD_RSRV
,
1182 mutex_exit(&dd
->dd_lock
);
1184 spa_history_log_internal(LOG_DS_RESERVATION
, dd
->dd_pool
->dp_spa
,
1185 tx
, "%lld dataset = %llu",
1186 (longlong_t
)effective_value
, dd
->dd_phys
->dd_head_dataset_obj
);
1190 dsl_dir_set_reservation(const char *ddname
, zprop_source_t source
,
1191 uint64_t reservation
)
1195 dsl_prop_setarg_t psa
;
1198 dsl_prop_setarg_init_uint64(&psa
, "reservation", source
, &reservation
);
1200 err
= dsl_dataset_hold(ddname
, FTAG
, &ds
);
1204 err
= dsl_dir_open(ddname
, FTAG
, &dd
, NULL
);
1206 dsl_dataset_rele(ds
, FTAG
);
1210 ASSERT(ds
->ds_dir
== dd
);
1212 err
= dsl_sync_task_do(dd
->dd_pool
, dsl_dir_set_reservation_check
,
1213 dsl_dir_set_reservation_sync
, ds
, &psa
, 0);
1215 dsl_dir_close(dd
, FTAG
);
1216 dsl_dataset_rele(ds
, FTAG
);
1221 closest_common_ancestor(dsl_dir_t
*ds1
, dsl_dir_t
*ds2
)
1223 for (; ds1
; ds1
= ds1
->dd_parent
) {
1225 for (dd
= ds2
; dd
; dd
= dd
->dd_parent
) {
1234 * If delta is applied to dd, how much of that delta would be applied to
1235 * ancestor? Syncing context only.
1238 would_change(dsl_dir_t
*dd
, int64_t delta
, dsl_dir_t
*ancestor
)
1243 mutex_enter(&dd
->dd_lock
);
1244 delta
= parent_delta(dd
, dd
->dd_phys
->dd_used_bytes
, delta
);
1245 mutex_exit(&dd
->dd_lock
);
1246 return (would_change(dd
->dd_parent
, delta
, ancestor
));
1250 dsl_dir_t
*newparent
;
1251 const char *mynewname
;
1255 dsl_dir_rename_check(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
1257 dsl_dir_t
*dd
= arg1
;
1258 struct renamearg
*ra
= arg2
;
1259 dsl_pool_t
*dp
= dd
->dd_pool
;
1260 objset_t
*mos
= dp
->dp_meta_objset
;
1265 * There should only be one reference, from dmu_objset_rename().
1266 * Fleeting holds are also possible (eg, from "zfs list" getting
1267 * stats), but any that are present in open context will likely
1268 * be gone by syncing context, so only fail from syncing
1271 if (dmu_tx_is_syncing(tx
) && dmu_buf_refcount(dd
->dd_dbuf
) > 1)
1274 /* check for existing name */
1275 err
= zap_lookup(mos
, ra
->newparent
->dd_phys
->dd_child_dir_zapobj
,
1276 ra
->mynewname
, 8, 1, &val
);
1282 if (ra
->newparent
!= dd
->dd_parent
) {
1283 /* is there enough space? */
1285 MAX(dd
->dd_phys
->dd_used_bytes
, dd
->dd_phys
->dd_reserved
);
1287 /* no rename into our descendant */
1288 if (closest_common_ancestor(dd
, ra
->newparent
) == dd
)
1291 if ((err
= dsl_dir_transfer_possible(dd
->dd_parent
,
1292 ra
->newparent
, myspace
)))
1300 dsl_dir_rename_sync(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
1302 dsl_dir_t
*dd
= arg1
;
1303 struct renamearg
*ra
= arg2
;
1304 dsl_pool_t
*dp
= dd
->dd_pool
;
1305 objset_t
*mos
= dp
->dp_meta_objset
;
1308 ASSERT(dmu_buf_refcount(dd
->dd_dbuf
) <= 2);
1310 if (ra
->newparent
!= dd
->dd_parent
) {
1311 dsl_dir_diduse_space(dd
->dd_parent
, DD_USED_CHILD
,
1312 -dd
->dd_phys
->dd_used_bytes
,
1313 -dd
->dd_phys
->dd_compressed_bytes
,
1314 -dd
->dd_phys
->dd_uncompressed_bytes
, tx
);
1315 dsl_dir_diduse_space(ra
->newparent
, DD_USED_CHILD
,
1316 dd
->dd_phys
->dd_used_bytes
,
1317 dd
->dd_phys
->dd_compressed_bytes
,
1318 dd
->dd_phys
->dd_uncompressed_bytes
, tx
);
1320 if (dd
->dd_phys
->dd_reserved
> dd
->dd_phys
->dd_used_bytes
) {
1321 uint64_t unused_rsrv
= dd
->dd_phys
->dd_reserved
-
1322 dd
->dd_phys
->dd_used_bytes
;
1324 dsl_dir_diduse_space(dd
->dd_parent
, DD_USED_CHILD_RSRV
,
1325 -unused_rsrv
, 0, 0, tx
);
1326 dsl_dir_diduse_space(ra
->newparent
, DD_USED_CHILD_RSRV
,
1327 unused_rsrv
, 0, 0, tx
);
1331 dmu_buf_will_dirty(dd
->dd_dbuf
, tx
);
1333 /* remove from old parent zapobj */
1334 err
= zap_remove(mos
, dd
->dd_parent
->dd_phys
->dd_child_dir_zapobj
,
1336 ASSERT3U(err
, ==, 0);
1338 (void) strcpy(dd
->dd_myname
, ra
->mynewname
);
1339 dsl_dir_close(dd
->dd_parent
, dd
);
1340 dd
->dd_phys
->dd_parent_obj
= ra
->newparent
->dd_object
;
1341 VERIFY(0 == dsl_dir_open_obj(dd
->dd_pool
,
1342 ra
->newparent
->dd_object
, NULL
, dd
, &dd
->dd_parent
));
1344 /* add to new parent zapobj */
1345 err
= zap_add(mos
, ra
->newparent
->dd_phys
->dd_child_dir_zapobj
,
1346 dd
->dd_myname
, 8, 1, &dd
->dd_object
, tx
);
1347 ASSERT3U(err
, ==, 0);
1349 spa_history_log_internal(LOG_DS_RENAME
, dd
->dd_pool
->dp_spa
,
1350 tx
, "dataset = %llu", dd
->dd_phys
->dd_head_dataset_obj
);
1354 dsl_dir_rename(dsl_dir_t
*dd
, const char *newname
)
1356 struct renamearg ra
;
1359 /* new parent should exist */
1360 err
= dsl_dir_open(newname
, FTAG
, &ra
.newparent
, &ra
.mynewname
);
1364 /* can't rename to different pool */
1365 if (dd
->dd_pool
!= ra
.newparent
->dd_pool
) {
1370 /* new name should not already exist */
1371 if (ra
.mynewname
== NULL
) {
1376 err
= dsl_sync_task_do(dd
->dd_pool
,
1377 dsl_dir_rename_check
, dsl_dir_rename_sync
, dd
, &ra
, 3);
1380 dsl_dir_close(ra
.newparent
, FTAG
);
1385 dsl_dir_transfer_possible(dsl_dir_t
*sdd
, dsl_dir_t
*tdd
, uint64_t space
)
1387 dsl_dir_t
*ancestor
;
1391 ancestor
= closest_common_ancestor(sdd
, tdd
);
1392 adelta
= would_change(sdd
, -space
, ancestor
);
1393 avail
= dsl_dir_space_available(tdd
, ancestor
, adelta
, FALSE
);
1401 dsl_dir_snap_cmtime(dsl_dir_t
*dd
)
1405 mutex_enter(&dd
->dd_lock
);
1406 t
= dd
->dd_snap_cmtime
;
1407 mutex_exit(&dd
->dd_lock
);
1413 dsl_dir_snap_cmtime_update(dsl_dir_t
*dd
)
1418 mutex_enter(&dd
->dd_lock
);
1419 dd
->dd_snap_cmtime
= t
;
1420 mutex_exit(&dd
->dd_lock
);