4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2016 by Delphix. All rights reserved.
24 * Copyright (c) 2017 Datto Inc.
27 #include <sys/bpobj.h>
28 #include <sys/zfs_context.h>
29 #include <sys/refcount.h>
30 #include <sys/dsl_pool.h>
31 #include <sys/zfeature.h>
35 * Return an empty bpobj, preferably the empty dummy one (dp_empty_bpobj).
38 bpobj_alloc_empty(objset_t
*os
, int blocksize
, dmu_tx_t
*tx
)
40 spa_t
*spa
= dmu_objset_spa(os
);
41 dsl_pool_t
*dp
= dmu_objset_pool(os
);
43 if (spa_feature_is_enabled(spa
, SPA_FEATURE_EMPTY_BPOBJ
)) {
44 if (!spa_feature_is_active(spa
, SPA_FEATURE_EMPTY_BPOBJ
)) {
45 ASSERT0(dp
->dp_empty_bpobj
);
47 bpobj_alloc(os
, SPA_OLD_MAXBLOCKSIZE
, tx
);
49 DMU_POOL_DIRECTORY_OBJECT
,
50 DMU_POOL_EMPTY_BPOBJ
, sizeof (uint64_t), 1,
51 &dp
->dp_empty_bpobj
, tx
) == 0);
53 spa_feature_incr(spa
, SPA_FEATURE_EMPTY_BPOBJ
, tx
);
54 ASSERT(dp
->dp_empty_bpobj
!= 0);
55 return (dp
->dp_empty_bpobj
);
57 return (bpobj_alloc(os
, blocksize
, tx
));
62 bpobj_decr_empty(objset_t
*os
, dmu_tx_t
*tx
)
64 dsl_pool_t
*dp
= dmu_objset_pool(os
);
66 spa_feature_decr(dmu_objset_spa(os
), SPA_FEATURE_EMPTY_BPOBJ
, tx
);
67 if (!spa_feature_is_active(dmu_objset_spa(os
),
68 SPA_FEATURE_EMPTY_BPOBJ
)) {
69 VERIFY3U(0, ==, zap_remove(dp
->dp_meta_objset
,
70 DMU_POOL_DIRECTORY_OBJECT
,
71 DMU_POOL_EMPTY_BPOBJ
, tx
));
72 VERIFY3U(0, ==, dmu_object_free(os
, dp
->dp_empty_bpobj
, tx
));
73 dp
->dp_empty_bpobj
= 0;
78 bpobj_alloc(objset_t
*os
, int blocksize
, dmu_tx_t
*tx
)
82 if (spa_version(dmu_objset_spa(os
)) < SPA_VERSION_BPOBJ_ACCOUNT
)
84 else if (spa_version(dmu_objset_spa(os
)) < SPA_VERSION_DEADLISTS
)
87 size
= sizeof (bpobj_phys_t
);
89 return (dmu_object_alloc(os
, DMU_OT_BPOBJ
, blocksize
,
90 DMU_OT_BPOBJ_HDR
, size
, tx
));
94 bpobj_free(objset_t
*os
, uint64_t obj
, dmu_tx_t
*tx
)
98 dmu_object_info_t doi
;
100 dmu_buf_t
*dbuf
= NULL
;
102 ASSERT(obj
!= dmu_objset_pool(os
)->dp_empty_bpobj
);
103 VERIFY3U(0, ==, bpobj_open(&bpo
, os
, obj
));
105 mutex_enter(&bpo
.bpo_lock
);
107 if (!bpo
.bpo_havesubobj
|| bpo
.bpo_phys
->bpo_subobjs
== 0)
110 VERIFY3U(0, ==, dmu_object_info(os
, bpo
.bpo_phys
->bpo_subobjs
, &doi
));
111 epb
= doi
.doi_data_block_size
/ sizeof (uint64_t);
113 for (i
= bpo
.bpo_phys
->bpo_num_subobjs
- 1; i
>= 0; i
--) {
115 uint64_t offset
, blkoff
;
117 offset
= i
* sizeof (uint64_t);
118 blkoff
= P2PHASE(i
, epb
);
120 if (dbuf
== NULL
|| dbuf
->db_offset
> offset
) {
122 dmu_buf_rele(dbuf
, FTAG
);
123 VERIFY3U(0, ==, dmu_buf_hold(os
,
124 bpo
.bpo_phys
->bpo_subobjs
, offset
, FTAG
, &dbuf
, 0));
127 ASSERT3U(offset
, >=, dbuf
->db_offset
);
128 ASSERT3U(offset
, <, dbuf
->db_offset
+ dbuf
->db_size
);
130 objarray
= dbuf
->db_data
;
131 bpobj_free(os
, objarray
[blkoff
], tx
);
134 dmu_buf_rele(dbuf
, FTAG
);
137 VERIFY3U(0, ==, dmu_object_free(os
, bpo
.bpo_phys
->bpo_subobjs
, tx
));
140 mutex_exit(&bpo
.bpo_lock
);
143 VERIFY3U(0, ==, dmu_object_free(os
, obj
, tx
));
147 bpobj_open(bpobj_t
*bpo
, objset_t
*os
, uint64_t object
)
149 dmu_object_info_t doi
;
152 err
= dmu_object_info(os
, object
, &doi
);
156 bzero(bpo
, sizeof (*bpo
));
157 mutex_init(&bpo
->bpo_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
159 ASSERT(bpo
->bpo_dbuf
== NULL
);
160 ASSERT(bpo
->bpo_phys
== NULL
);
162 ASSERT3U(doi
.doi_type
, ==, DMU_OT_BPOBJ
);
163 ASSERT3U(doi
.doi_bonus_type
, ==, DMU_OT_BPOBJ_HDR
);
165 err
= dmu_bonus_hold(os
, object
, bpo
, &bpo
->bpo_dbuf
);
170 bpo
->bpo_object
= object
;
171 bpo
->bpo_epb
= doi
.doi_data_block_size
>> SPA_BLKPTRSHIFT
;
172 bpo
->bpo_havecomp
= (doi
.doi_bonus_size
> BPOBJ_SIZE_V0
);
173 bpo
->bpo_havesubobj
= (doi
.doi_bonus_size
> BPOBJ_SIZE_V1
);
174 bpo
->bpo_phys
= bpo
->bpo_dbuf
->db_data
;
179 bpobj_close(bpobj_t
*bpo
)
181 /* Lame workaround for closing a bpobj that was never opened. */
182 if (bpo
->bpo_object
== 0)
185 dmu_buf_rele(bpo
->bpo_dbuf
, bpo
);
186 if (bpo
->bpo_cached_dbuf
!= NULL
)
187 dmu_buf_rele(bpo
->bpo_cached_dbuf
, bpo
);
188 bpo
->bpo_dbuf
= NULL
;
189 bpo
->bpo_phys
= NULL
;
190 bpo
->bpo_cached_dbuf
= NULL
;
193 mutex_destroy(&bpo
->bpo_lock
);
197 bpobj_hasentries(bpobj_t
*bpo
)
199 return (bpo
->bpo_phys
->bpo_num_blkptrs
!= 0 ||
200 (bpo
->bpo_havesubobj
&& bpo
->bpo_phys
->bpo_num_subobjs
!= 0));
204 bpobj_iterate_impl(bpobj_t
*bpo
, bpobj_itor_t func
, void *arg
, dmu_tx_t
*tx
,
207 dmu_object_info_t doi
;
211 dmu_buf_t
*dbuf
= NULL
;
213 mutex_enter(&bpo
->bpo_lock
);
215 if (!bpobj_hasentries(bpo
))
219 dmu_buf_will_dirty(bpo
->bpo_dbuf
, tx
);
221 for (i
= bpo
->bpo_phys
->bpo_num_blkptrs
- 1; i
>= 0; i
--) {
224 uint64_t offset
, blkoff
;
226 offset
= i
* sizeof (blkptr_t
);
227 blkoff
= P2PHASE(i
, bpo
->bpo_epb
);
229 if (dbuf
== NULL
|| dbuf
->db_offset
> offset
) {
231 dmu_buf_rele(dbuf
, FTAG
);
232 err
= dmu_buf_hold(bpo
->bpo_os
, bpo
->bpo_object
, offset
,
238 ASSERT3U(offset
, >=, dbuf
->db_offset
);
239 ASSERT3U(offset
, <, dbuf
->db_offset
+ dbuf
->db_size
);
241 bparray
= dbuf
->db_data
;
242 bp
= &bparray
[blkoff
];
243 err
= func(arg
, bp
, tx
);
247 bpo
->bpo_phys
->bpo_bytes
-=
248 bp_get_dsize_sync(dmu_objset_spa(bpo
->bpo_os
), bp
);
249 ASSERT3S(bpo
->bpo_phys
->bpo_bytes
, >=, 0);
250 if (bpo
->bpo_havecomp
) {
251 bpo
->bpo_phys
->bpo_comp
-= BP_GET_PSIZE(bp
);
252 bpo
->bpo_phys
->bpo_uncomp
-= BP_GET_UCSIZE(bp
);
254 bpo
->bpo_phys
->bpo_num_blkptrs
--;
255 ASSERT3S(bpo
->bpo_phys
->bpo_num_blkptrs
, >=, 0);
259 dmu_buf_rele(dbuf
, FTAG
);
263 VERIFY3U(0, ==, dmu_free_range(bpo
->bpo_os
, bpo
->bpo_object
,
264 (i
+ 1) * sizeof (blkptr_t
), -1ULL, tx
));
266 if (err
|| !bpo
->bpo_havesubobj
|| bpo
->bpo_phys
->bpo_subobjs
== 0)
269 ASSERT(bpo
->bpo_havecomp
);
270 err
= dmu_object_info(bpo
->bpo_os
, bpo
->bpo_phys
->bpo_subobjs
, &doi
);
272 mutex_exit(&bpo
->bpo_lock
);
275 ASSERT3U(doi
.doi_type
, ==, DMU_OT_BPOBJ_SUBOBJ
);
276 epb
= doi
.doi_data_block_size
/ sizeof (uint64_t);
278 for (i
= bpo
->bpo_phys
->bpo_num_subobjs
- 1; i
>= 0; i
--) {
280 uint64_t offset
, blkoff
;
282 uint64_t used_before
, comp_before
, uncomp_before
;
283 uint64_t used_after
, comp_after
, uncomp_after
;
285 offset
= i
* sizeof (uint64_t);
286 blkoff
= P2PHASE(i
, epb
);
288 if (dbuf
== NULL
|| dbuf
->db_offset
> offset
) {
290 dmu_buf_rele(dbuf
, FTAG
);
291 err
= dmu_buf_hold(bpo
->bpo_os
,
292 bpo
->bpo_phys
->bpo_subobjs
, offset
, FTAG
, &dbuf
, 0);
297 ASSERT3U(offset
, >=, dbuf
->db_offset
);
298 ASSERT3U(offset
, <, dbuf
->db_offset
+ dbuf
->db_size
);
300 objarray
= dbuf
->db_data
;
301 err
= bpobj_open(&sublist
, bpo
->bpo_os
, objarray
[blkoff
]);
305 err
= bpobj_space(&sublist
,
306 &used_before
, &comp_before
, &uncomp_before
);
308 bpobj_close(&sublist
);
312 err
= bpobj_iterate_impl(&sublist
, func
, arg
, tx
, free
);
314 VERIFY3U(0, ==, bpobj_space(&sublist
,
315 &used_after
, &comp_after
, &uncomp_after
));
316 bpo
->bpo_phys
->bpo_bytes
-= used_before
- used_after
;
317 ASSERT3S(bpo
->bpo_phys
->bpo_bytes
, >=, 0);
318 bpo
->bpo_phys
->bpo_comp
-= comp_before
- comp_after
;
319 bpo
->bpo_phys
->bpo_uncomp
-=
320 uncomp_before
- uncomp_after
;
323 bpobj_close(&sublist
);
327 err
= dmu_object_free(bpo
->bpo_os
,
328 objarray
[blkoff
], tx
);
331 bpo
->bpo_phys
->bpo_num_subobjs
--;
332 ASSERT3S(bpo
->bpo_phys
->bpo_num_subobjs
, >=, 0);
336 dmu_buf_rele(dbuf
, FTAG
);
340 VERIFY3U(0, ==, dmu_free_range(bpo
->bpo_os
,
341 bpo
->bpo_phys
->bpo_subobjs
,
342 (i
+ 1) * sizeof (uint64_t), -1ULL, tx
));
346 /* If there are no entries, there should be no bytes. */
347 if (!bpobj_hasentries(bpo
)) {
348 ASSERT0(bpo
->bpo_phys
->bpo_bytes
);
349 ASSERT0(bpo
->bpo_phys
->bpo_comp
);
350 ASSERT0(bpo
->bpo_phys
->bpo_uncomp
);
353 mutex_exit(&bpo
->bpo_lock
);
358 * Iterate and remove the entries. If func returns nonzero, iteration
359 * will stop and that entry will not be removed.
362 bpobj_iterate(bpobj_t
*bpo
, bpobj_itor_t func
, void *arg
, dmu_tx_t
*tx
)
364 return (bpobj_iterate_impl(bpo
, func
, arg
, tx
, B_TRUE
));
368 * Iterate the entries. If func returns nonzero, iteration will stop.
371 bpobj_iterate_nofree(bpobj_t
*bpo
, bpobj_itor_t func
, void *arg
, dmu_tx_t
*tx
)
373 return (bpobj_iterate_impl(bpo
, func
, arg
, tx
, B_FALSE
));
377 bpobj_enqueue_subobj(bpobj_t
*bpo
, uint64_t subobj
, dmu_tx_t
*tx
)
380 uint64_t used
, comp
, uncomp
, subsubobjs
;
381 ASSERTV(dmu_object_info_t doi
);
383 ASSERT(bpo
->bpo_havesubobj
);
384 ASSERT(bpo
->bpo_havecomp
);
385 ASSERT(bpo
->bpo_object
!= dmu_objset_pool(bpo
->bpo_os
)->dp_empty_bpobj
);
387 if (subobj
== dmu_objset_pool(bpo
->bpo_os
)->dp_empty_bpobj
) {
388 bpobj_decr_empty(bpo
->bpo_os
, tx
);
392 VERIFY3U(0, ==, bpobj_open(&subbpo
, bpo
->bpo_os
, subobj
));
393 VERIFY3U(0, ==, bpobj_space(&subbpo
, &used
, &comp
, &uncomp
));
395 if (!bpobj_hasentries(&subbpo
)) {
396 /* No point in having an empty subobj. */
397 bpobj_close(&subbpo
);
398 bpobj_free(bpo
->bpo_os
, subobj
, tx
);
402 mutex_enter(&bpo
->bpo_lock
);
403 dmu_buf_will_dirty(bpo
->bpo_dbuf
, tx
);
404 if (bpo
->bpo_phys
->bpo_subobjs
== 0) {
405 bpo
->bpo_phys
->bpo_subobjs
= dmu_object_alloc(bpo
->bpo_os
,
406 DMU_OT_BPOBJ_SUBOBJ
, SPA_OLD_MAXBLOCKSIZE
,
410 ASSERT0(dmu_object_info(bpo
->bpo_os
, bpo
->bpo_phys
->bpo_subobjs
, &doi
));
411 ASSERT3U(doi
.doi_type
, ==, DMU_OT_BPOBJ_SUBOBJ
);
413 dmu_write(bpo
->bpo_os
, bpo
->bpo_phys
->bpo_subobjs
,
414 bpo
->bpo_phys
->bpo_num_subobjs
* sizeof (subobj
),
415 sizeof (subobj
), &subobj
, tx
);
416 bpo
->bpo_phys
->bpo_num_subobjs
++;
419 * If subobj has only one block of subobjs, then move subobj's
420 * subobjs to bpo's subobj list directly. This reduces
421 * recursion in bpobj_iterate due to nested subobjs.
423 subsubobjs
= subbpo
.bpo_phys
->bpo_subobjs
;
424 if (subsubobjs
!= 0) {
425 dmu_object_info_t doi
;
427 VERIFY3U(0, ==, dmu_object_info(bpo
->bpo_os
, subsubobjs
, &doi
));
428 if (doi
.doi_max_offset
== doi
.doi_data_block_size
) {
430 uint64_t numsubsub
= subbpo
.bpo_phys
->bpo_num_subobjs
;
432 VERIFY3U(0, ==, dmu_buf_hold(bpo
->bpo_os
, subsubobjs
,
433 0, FTAG
, &subdb
, 0));
435 * Make sure that we are not asking dmu_write()
436 * to write more data than we have in our buffer.
438 VERIFY3U(subdb
->db_size
, >=,
439 numsubsub
* sizeof (subobj
));
440 dmu_write(bpo
->bpo_os
, bpo
->bpo_phys
->bpo_subobjs
,
441 bpo
->bpo_phys
->bpo_num_subobjs
* sizeof (subobj
),
442 numsubsub
* sizeof (subobj
), subdb
->db_data
, tx
);
443 dmu_buf_rele(subdb
, FTAG
);
444 bpo
->bpo_phys
->bpo_num_subobjs
+= numsubsub
;
446 dmu_buf_will_dirty(subbpo
.bpo_dbuf
, tx
);
447 subbpo
.bpo_phys
->bpo_subobjs
= 0;
448 VERIFY3U(0, ==, dmu_object_free(bpo
->bpo_os
,
452 bpo
->bpo_phys
->bpo_bytes
+= used
;
453 bpo
->bpo_phys
->bpo_comp
+= comp
;
454 bpo
->bpo_phys
->bpo_uncomp
+= uncomp
;
455 mutex_exit(&bpo
->bpo_lock
);
457 bpobj_close(&subbpo
);
461 bpobj_enqueue(bpobj_t
*bpo
, const blkptr_t
*bp
, dmu_tx_t
*tx
)
463 blkptr_t stored_bp
= *bp
;
468 ASSERT(!BP_IS_HOLE(bp
));
469 ASSERT(bpo
->bpo_object
!= dmu_objset_pool(bpo
->bpo_os
)->dp_empty_bpobj
);
471 if (BP_IS_EMBEDDED(bp
)) {
473 * The bpobj will compress better without the payload.
475 * Note that we store EMBEDDED bp's because they have an
476 * uncompressed size, which must be accounted for. An
477 * alternative would be to add their size to bpo_uncomp
478 * without storing the bp, but that would create additional
479 * complications: bpo_uncomp would be inconsistent with the
480 * set of BP's stored, and bpobj_iterate() wouldn't visit
481 * all the space accounted for in the bpobj.
483 bzero(&stored_bp
, sizeof (stored_bp
));
484 stored_bp
.blk_prop
= bp
->blk_prop
;
485 stored_bp
.blk_birth
= bp
->blk_birth
;
486 } else if (!BP_GET_DEDUP(bp
)) {
487 /* The bpobj will compress better without the checksum */
488 bzero(&stored_bp
.blk_cksum
, sizeof (stored_bp
.blk_cksum
));
491 /* We never need the fill count. */
492 stored_bp
.blk_fill
= 0;
494 mutex_enter(&bpo
->bpo_lock
);
496 offset
= bpo
->bpo_phys
->bpo_num_blkptrs
* sizeof (stored_bp
);
497 blkoff
= P2PHASE(bpo
->bpo_phys
->bpo_num_blkptrs
, bpo
->bpo_epb
);
499 if (bpo
->bpo_cached_dbuf
== NULL
||
500 offset
< bpo
->bpo_cached_dbuf
->db_offset
||
501 offset
>= bpo
->bpo_cached_dbuf
->db_offset
+
502 bpo
->bpo_cached_dbuf
->db_size
) {
503 if (bpo
->bpo_cached_dbuf
)
504 dmu_buf_rele(bpo
->bpo_cached_dbuf
, bpo
);
505 VERIFY3U(0, ==, dmu_buf_hold(bpo
->bpo_os
, bpo
->bpo_object
,
506 offset
, bpo
, &bpo
->bpo_cached_dbuf
, 0));
509 dmu_buf_will_dirty(bpo
->bpo_cached_dbuf
, tx
);
510 bparray
= bpo
->bpo_cached_dbuf
->db_data
;
511 bparray
[blkoff
] = stored_bp
;
513 dmu_buf_will_dirty(bpo
->bpo_dbuf
, tx
);
514 bpo
->bpo_phys
->bpo_num_blkptrs
++;
515 bpo
->bpo_phys
->bpo_bytes
+=
516 bp_get_dsize_sync(dmu_objset_spa(bpo
->bpo_os
), bp
);
517 if (bpo
->bpo_havecomp
) {
518 bpo
->bpo_phys
->bpo_comp
+= BP_GET_PSIZE(bp
);
519 bpo
->bpo_phys
->bpo_uncomp
+= BP_GET_UCSIZE(bp
);
521 mutex_exit(&bpo
->bpo_lock
);
524 struct space_range_arg
{
535 space_range_cb(void *arg
, const blkptr_t
*bp
, dmu_tx_t
*tx
)
537 struct space_range_arg
*sra
= arg
;
539 if (bp
->blk_birth
> sra
->mintxg
&& bp
->blk_birth
<= sra
->maxtxg
) {
540 if (dsl_pool_sync_context(spa_get_dsl(sra
->spa
)))
541 sra
->used
+= bp_get_dsize_sync(sra
->spa
, bp
);
543 sra
->used
+= bp_get_dsize(sra
->spa
, bp
);
544 sra
->comp
+= BP_GET_PSIZE(bp
);
545 sra
->uncomp
+= BP_GET_UCSIZE(bp
);
551 bpobj_space(bpobj_t
*bpo
, uint64_t *usedp
, uint64_t *compp
, uint64_t *uncompp
)
553 mutex_enter(&bpo
->bpo_lock
);
555 *usedp
= bpo
->bpo_phys
->bpo_bytes
;
556 if (bpo
->bpo_havecomp
) {
557 *compp
= bpo
->bpo_phys
->bpo_comp
;
558 *uncompp
= bpo
->bpo_phys
->bpo_uncomp
;
559 mutex_exit(&bpo
->bpo_lock
);
562 mutex_exit(&bpo
->bpo_lock
);
563 return (bpobj_space_range(bpo
, 0, UINT64_MAX
,
564 usedp
, compp
, uncompp
));
569 * Return the amount of space in the bpobj which is:
570 * mintxg < blk_birth <= maxtxg
573 bpobj_space_range(bpobj_t
*bpo
, uint64_t mintxg
, uint64_t maxtxg
,
574 uint64_t *usedp
, uint64_t *compp
, uint64_t *uncompp
)
576 struct space_range_arg sra
= { 0 };
580 * As an optimization, if they want the whole txg range, just
581 * get bpo_bytes rather than iterating over the bps.
583 if (mintxg
< TXG_INITIAL
&& maxtxg
== UINT64_MAX
&& bpo
->bpo_havecomp
)
584 return (bpobj_space(bpo
, usedp
, compp
, uncompp
));
586 sra
.spa
= dmu_objset_spa(bpo
->bpo_os
);
590 err
= bpobj_iterate_nofree(bpo
, space_range_cb
, &sra
, NULL
);
593 *uncompp
= sra
.uncomp
;