]>
git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/bptree.c
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2012 by Delphix. All rights reserved.
27 #include <sys/bptree.h>
29 #include <sys/dmu_objset.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/dmu_traverse.h>
32 #include <sys/dsl_dataset.h>
33 #include <sys/dsl_dir.h>
34 #include <sys/dsl_pool.h>
35 #include <sys/dnode.h>
36 #include <sys/refcount.h>
40 * A bptree is a queue of root block pointers from destroyed datasets. When a
41 * dataset is destroyed its root block pointer is put on the end of the pool's
42 * bptree queue so the dataset's blocks can be freed asynchronously by
43 * dsl_scan_sync. This allows the delete operation to finish without traversing
44 * all the dataset's blocks.
46 * Note that while bt_begin and bt_end are only ever incremented in this code
47 * they are effectively reset to 0 every time the entire bptree is freed because
48 * the bptree's object is destroyed and re-created.
52 bptree_phys_t
*ba_phys
; /* data in bonus buffer, dirtied if freeing */
53 boolean_t ba_free
; /* true if freeing during traversal */
55 bptree_itor_t
*ba_func
; /* function to call for each blockpointer */
56 void *ba_arg
; /* caller supplied argument to ba_func */
57 dmu_tx_t
*ba_tx
; /* caller supplied tx, NULL if not freeing */
61 bptree_alloc(objset_t
*os
, dmu_tx_t
*tx
)
67 obj
= dmu_object_alloc(os
, DMU_OTN_UINT64_METADATA
,
68 SPA_MAXBLOCKSIZE
, DMU_OTN_UINT64_METADATA
,
69 sizeof (bptree_phys_t
), tx
);
72 * Bonus buffer contents are already initialized to 0, but for
73 * readability we make it explicit.
75 VERIFY3U(0, ==, dmu_bonus_hold(os
, obj
, FTAG
, &db
));
76 dmu_buf_will_dirty(db
, tx
);
83 dmu_buf_rele(db
, FTAG
);
89 bptree_free(objset_t
*os
, uint64_t obj
, dmu_tx_t
*tx
)
94 VERIFY3U(0, ==, dmu_bonus_hold(os
, obj
, FTAG
, &db
));
96 ASSERT3U(bt
->bt_begin
, ==, bt
->bt_end
);
97 ASSERT0(bt
->bt_bytes
);
99 ASSERT0(bt
->bt_uncomp
);
100 dmu_buf_rele(db
, FTAG
);
102 return (dmu_object_free(os
, obj
, tx
));
106 bptree_add(objset_t
*os
, uint64_t obj
, blkptr_t
*bp
, uint64_t birth_txg
,
107 uint64_t bytes
, uint64_t comp
, uint64_t uncomp
, dmu_tx_t
*tx
)
111 bptree_entry_phys_t bte
;
114 * bptree objects are in the pool mos, therefore they can only be
115 * modified in syncing context. Furthermore, this is only modified
116 * by the sync thread, so no locking is necessary.
118 ASSERT(dmu_tx_is_syncing(tx
));
120 VERIFY3U(0, ==, dmu_bonus_hold(os
, obj
, FTAG
, &db
));
123 bte
.be_birth_txg
= birth_txg
;
125 bzero(&bte
.be_zb
, sizeof (bte
.be_zb
));
126 dmu_write(os
, obj
, bt
->bt_end
* sizeof (bte
), sizeof (bte
), &bte
, tx
);
128 dmu_buf_will_dirty(db
, tx
);
130 bt
->bt_bytes
+= bytes
;
132 bt
->bt_uncomp
+= uncomp
;
133 dmu_buf_rele(db
, FTAG
);
138 bptree_visit_cb(spa_t
*spa
, zilog_t
*zilog
, const blkptr_t
*bp
, arc_buf_t
*pbuf
,
139 const zbookmark_t
*zb
, const dnode_phys_t
*dnp
, void *arg
)
142 struct bptree_args
*ba
= arg
;
147 err
= ba
->ba_func(ba
->ba_arg
, bp
, ba
->ba_tx
);
148 if (err
== 0 && ba
->ba_free
) {
149 ba
->ba_phys
->bt_bytes
-= bp_get_dsize_sync(spa
, bp
);
150 ba
->ba_phys
->bt_comp
-= BP_GET_PSIZE(bp
);
151 ba
->ba_phys
->bt_uncomp
-= BP_GET_UCSIZE(bp
);
157 bptree_iterate(objset_t
*os
, uint64_t obj
, boolean_t free
, bptree_itor_t func
,
158 void *arg
, dmu_tx_t
*tx
)
163 struct bptree_args ba
;
165 ASSERT(!free
|| dmu_tx_is_syncing(tx
));
167 err
= dmu_bonus_hold(os
, obj
, FTAG
, &db
);
172 dmu_buf_will_dirty(db
, tx
);
174 ba
.ba_phys
= db
->db_data
;
181 for (i
= ba
.ba_phys
->bt_begin
; i
< ba
.ba_phys
->bt_end
; i
++) {
182 bptree_entry_phys_t bte
;
184 ASSERT(!free
|| i
== ba
.ba_phys
->bt_begin
);
186 err
= dmu_read(os
, obj
, i
* sizeof (bte
), sizeof (bte
),
187 &bte
, DMU_READ_NO_PREFETCH
);
191 err
= traverse_dataset_destroyed(os
->os_spa
, &bte
.be_bp
,
192 bte
.be_birth_txg
, &bte
.be_zb
, TRAVERSE_POST
,
193 bptree_visit_cb
, &ba
);
195 ASSERT(err
== 0 || err
== ERESTART
);
197 /* save bookmark for future resume */
198 ASSERT3U(bte
.be_zb
.zb_objset
, ==,
199 ZB_DESTROYED_OBJSET
);
200 ASSERT0(bte
.be_zb
.zb_level
);
201 dmu_write(os
, obj
, i
* sizeof (bte
),
202 sizeof (bte
), &bte
, tx
);
205 ba
.ba_phys
->bt_begin
++;
206 (void) dmu_free_range(os
, obj
,
207 i
* sizeof (bte
), sizeof (bte
), tx
);
212 ASSERT(!free
|| err
!= 0 || ba
.ba_phys
->bt_begin
== ba
.ba_phys
->bt_end
);
214 /* if all blocks are free there should be no used space */
215 if (ba
.ba_phys
->bt_begin
== ba
.ba_phys
->bt_end
) {
216 ASSERT0(ba
.ba_phys
->bt_bytes
);
217 ASSERT0(ba
.ba_phys
->bt_comp
);
218 ASSERT0(ba
.ba_phys
->bt_uncomp
);
221 dmu_buf_rele(db
, FTAG
);