]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/dsl_deadlist.c
Illumos 5314 - Remove "dbuf phys" db->db_data pointer aliases in ZFS
[mirror_zfs.git] / module / zfs / dsl_deadlist.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012 by Delphix. All rights reserved.
24 */
25
26 #include <sys/dsl_dataset.h>
27 #include <sys/dmu.h>
28 #include <sys/refcount.h>
29 #include <sys/zap.h>
30 #include <sys/zfs_context.h>
31 #include <sys/dsl_pool.h>
32
33 /*
34 * Deadlist concurrency:
35 *
36 * Deadlists can only be modified from the syncing thread.
37 *
38 * Except for dsl_deadlist_insert(), it can only be modified with the
39 * dp_config_rwlock held with RW_WRITER.
40 *
41 * The accessors (dsl_deadlist_space() and dsl_deadlist_space_range()) can
42 * be called concurrently, from open context, with the dl_config_rwlock held
43 * with RW_READER.
44 *
45 * Therefore, we only need to provide locking between dsl_deadlist_insert() and
46 * the accessors, protecting:
47 * dl_phys->dl_used,comp,uncomp
48 * and protecting the dl_tree from being loaded.
49 * The locking is provided by dl_lock. Note that locking on the bpobj_t
50 * provides its own locking, and dl_oldfmt is immutable.
51 */
52
53 static int
54 dsl_deadlist_compare(const void *arg1, const void *arg2)
55 {
56 const dsl_deadlist_entry_t *dle1 = arg1;
57 const dsl_deadlist_entry_t *dle2 = arg2;
58
59 if (dle1->dle_mintxg < dle2->dle_mintxg)
60 return (-1);
61 else if (dle1->dle_mintxg > dle2->dle_mintxg)
62 return (+1);
63 else
64 return (0);
65 }
66
67 static void
68 dsl_deadlist_load_tree(dsl_deadlist_t *dl)
69 {
70 zap_cursor_t zc;
71 zap_attribute_t za;
72
73 ASSERT(!dl->dl_oldfmt);
74 if (dl->dl_havetree)
75 return;
76
77 avl_create(&dl->dl_tree, dsl_deadlist_compare,
78 sizeof (dsl_deadlist_entry_t),
79 offsetof(dsl_deadlist_entry_t, dle_node));
80 for (zap_cursor_init(&zc, dl->dl_os, dl->dl_object);
81 zap_cursor_retrieve(&zc, &za) == 0;
82 zap_cursor_advance(&zc)) {
83 dsl_deadlist_entry_t *dle;
84
85 dle = kmem_alloc(sizeof (*dle), KM_SLEEP);
86 dle->dle_mintxg = strtonum(za.za_name, NULL);
87 VERIFY3U(0, ==, bpobj_open(&dle->dle_bpobj, dl->dl_os,
88 za.za_first_integer));
89 avl_add(&dl->dl_tree, dle);
90 }
91 zap_cursor_fini(&zc);
92 dl->dl_havetree = B_TRUE;
93 }
94
95 void
96 dsl_deadlist_open(dsl_deadlist_t *dl, objset_t *os, uint64_t object)
97 {
98 dmu_object_info_t doi;
99
100 mutex_init(&dl->dl_lock, NULL, MUTEX_DEFAULT, NULL);
101 dl->dl_os = os;
102 dl->dl_object = object;
103 VERIFY3U(0, ==, dmu_bonus_hold(os, object, dl, &dl->dl_dbuf));
104 dmu_object_info_from_db(dl->dl_dbuf, &doi);
105 if (doi.doi_type == DMU_OT_BPOBJ) {
106 dmu_buf_rele(dl->dl_dbuf, dl);
107 dl->dl_dbuf = NULL;
108 dl->dl_oldfmt = B_TRUE;
109 VERIFY3U(0, ==, bpobj_open(&dl->dl_bpobj, os, object));
110 return;
111 }
112
113 dl->dl_oldfmt = B_FALSE;
114 dl->dl_phys = dl->dl_dbuf->db_data;
115 dl->dl_havetree = B_FALSE;
116 }
117
118 void
119 dsl_deadlist_close(dsl_deadlist_t *dl)
120 {
121 void *cookie = NULL;
122 dsl_deadlist_entry_t *dle;
123
124 if (dl->dl_oldfmt) {
125 dl->dl_oldfmt = B_FALSE;
126 bpobj_close(&dl->dl_bpobj);
127 return;
128 }
129
130 if (dl->dl_havetree) {
131 while ((dle = avl_destroy_nodes(&dl->dl_tree, &cookie))
132 != NULL) {
133 bpobj_close(&dle->dle_bpobj);
134 kmem_free(dle, sizeof (*dle));
135 }
136 avl_destroy(&dl->dl_tree);
137 }
138 dmu_buf_rele(dl->dl_dbuf, dl);
139 mutex_destroy(&dl->dl_lock);
140 dl->dl_dbuf = NULL;
141 dl->dl_phys = NULL;
142 }
143
144 uint64_t
145 dsl_deadlist_alloc(objset_t *os, dmu_tx_t *tx)
146 {
147 if (spa_version(dmu_objset_spa(os)) < SPA_VERSION_DEADLISTS)
148 return (bpobj_alloc(os, SPA_MAXBLOCKSIZE, tx));
149 return (zap_create(os, DMU_OT_DEADLIST, DMU_OT_DEADLIST_HDR,
150 sizeof (dsl_deadlist_phys_t), tx));
151 }
152
153 void
154 dsl_deadlist_free(objset_t *os, uint64_t dlobj, dmu_tx_t *tx)
155 {
156 dmu_object_info_t doi;
157 zap_cursor_t zc;
158 zap_attribute_t za;
159
160 VERIFY3U(0, ==, dmu_object_info(os, dlobj, &doi));
161 if (doi.doi_type == DMU_OT_BPOBJ) {
162 bpobj_free(os, dlobj, tx);
163 return;
164 }
165
166 for (zap_cursor_init(&zc, os, dlobj);
167 zap_cursor_retrieve(&zc, &za) == 0;
168 zap_cursor_advance(&zc)) {
169 uint64_t obj = za.za_first_integer;
170 if (obj == dmu_objset_pool(os)->dp_empty_bpobj)
171 bpobj_decr_empty(os, tx);
172 else
173 bpobj_free(os, obj, tx);
174 }
175 zap_cursor_fini(&zc);
176 VERIFY3U(0, ==, dmu_object_free(os, dlobj, tx));
177 }
178
179 static void
180 dle_enqueue(dsl_deadlist_t *dl, dsl_deadlist_entry_t *dle,
181 const blkptr_t *bp, dmu_tx_t *tx)
182 {
183 if (dle->dle_bpobj.bpo_object ==
184 dmu_objset_pool(dl->dl_os)->dp_empty_bpobj) {
185 uint64_t obj = bpobj_alloc(dl->dl_os, SPA_MAXBLOCKSIZE, tx);
186 bpobj_close(&dle->dle_bpobj);
187 bpobj_decr_empty(dl->dl_os, tx);
188 VERIFY3U(0, ==, bpobj_open(&dle->dle_bpobj, dl->dl_os, obj));
189 VERIFY3U(0, ==, zap_update_int_key(dl->dl_os, dl->dl_object,
190 dle->dle_mintxg, obj, tx));
191 }
192 bpobj_enqueue(&dle->dle_bpobj, bp, tx);
193 }
194
195 static void
196 dle_enqueue_subobj(dsl_deadlist_t *dl, dsl_deadlist_entry_t *dle,
197 uint64_t obj, dmu_tx_t *tx)
198 {
199 if (dle->dle_bpobj.bpo_object !=
200 dmu_objset_pool(dl->dl_os)->dp_empty_bpobj) {
201 bpobj_enqueue_subobj(&dle->dle_bpobj, obj, tx);
202 } else {
203 bpobj_close(&dle->dle_bpobj);
204 bpobj_decr_empty(dl->dl_os, tx);
205 VERIFY3U(0, ==, bpobj_open(&dle->dle_bpobj, dl->dl_os, obj));
206 VERIFY3U(0, ==, zap_update_int_key(dl->dl_os, dl->dl_object,
207 dle->dle_mintxg, obj, tx));
208 }
209 }
210
211 void
212 dsl_deadlist_insert(dsl_deadlist_t *dl, const blkptr_t *bp, dmu_tx_t *tx)
213 {
214 dsl_deadlist_entry_t dle_tofind;
215 dsl_deadlist_entry_t *dle;
216 avl_index_t where;
217
218 if (dl->dl_oldfmt) {
219 bpobj_enqueue(&dl->dl_bpobj, bp, tx);
220 return;
221 }
222
223 dsl_deadlist_load_tree(dl);
224
225 dmu_buf_will_dirty(dl->dl_dbuf, tx);
226 mutex_enter(&dl->dl_lock);
227 dl->dl_phys->dl_used +=
228 bp_get_dsize_sync(dmu_objset_spa(dl->dl_os), bp);
229 dl->dl_phys->dl_comp += BP_GET_PSIZE(bp);
230 dl->dl_phys->dl_uncomp += BP_GET_UCSIZE(bp);
231 mutex_exit(&dl->dl_lock);
232
233 dle_tofind.dle_mintxg = bp->blk_birth;
234 dle = avl_find(&dl->dl_tree, &dle_tofind, &where);
235 if (dle == NULL)
236 dle = avl_nearest(&dl->dl_tree, where, AVL_BEFORE);
237 else
238 dle = AVL_PREV(&dl->dl_tree, dle);
239 dle_enqueue(dl, dle, bp, tx);
240 }
241
242 /*
243 * Insert new key in deadlist, which must be > all current entries.
244 * mintxg is not inclusive.
245 */
246 void
247 dsl_deadlist_add_key(dsl_deadlist_t *dl, uint64_t mintxg, dmu_tx_t *tx)
248 {
249 uint64_t obj;
250 dsl_deadlist_entry_t *dle;
251
252 if (dl->dl_oldfmt)
253 return;
254
255 dsl_deadlist_load_tree(dl);
256
257 dle = kmem_alloc(sizeof (*dle), KM_SLEEP);
258 dle->dle_mintxg = mintxg;
259 obj = bpobj_alloc_empty(dl->dl_os, SPA_MAXBLOCKSIZE, tx);
260 VERIFY3U(0, ==, bpobj_open(&dle->dle_bpobj, dl->dl_os, obj));
261 avl_add(&dl->dl_tree, dle);
262
263 VERIFY3U(0, ==, zap_add_int_key(dl->dl_os, dl->dl_object,
264 mintxg, obj, tx));
265 }
266
267 /*
268 * Remove this key, merging its entries into the previous key.
269 */
270 void
271 dsl_deadlist_remove_key(dsl_deadlist_t *dl, uint64_t mintxg, dmu_tx_t *tx)
272 {
273 dsl_deadlist_entry_t dle_tofind;
274 dsl_deadlist_entry_t *dle, *dle_prev;
275
276 if (dl->dl_oldfmt)
277 return;
278
279 dsl_deadlist_load_tree(dl);
280
281 dle_tofind.dle_mintxg = mintxg;
282 dle = avl_find(&dl->dl_tree, &dle_tofind, NULL);
283 dle_prev = AVL_PREV(&dl->dl_tree, dle);
284
285 dle_enqueue_subobj(dl, dle_prev, dle->dle_bpobj.bpo_object, tx);
286
287 avl_remove(&dl->dl_tree, dle);
288 bpobj_close(&dle->dle_bpobj);
289 kmem_free(dle, sizeof (*dle));
290
291 VERIFY3U(0, ==, zap_remove_int(dl->dl_os, dl->dl_object, mintxg, tx));
292 }
293
294 /*
295 * Walk ds's snapshots to regenerate generate ZAP & AVL.
296 */
297 static void
298 dsl_deadlist_regenerate(objset_t *os, uint64_t dlobj,
299 uint64_t mrs_obj, dmu_tx_t *tx)
300 {
301 dsl_deadlist_t dl;
302 dsl_pool_t *dp = dmu_objset_pool(os);
303
304 dsl_deadlist_open(&dl, os, dlobj);
305 if (dl.dl_oldfmt) {
306 dsl_deadlist_close(&dl);
307 return;
308 }
309
310 while (mrs_obj != 0) {
311 dsl_dataset_t *ds;
312 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, mrs_obj, FTAG, &ds));
313 dsl_deadlist_add_key(&dl,
314 dsl_dataset_phys(ds)->ds_prev_snap_txg, tx);
315 mrs_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
316 dsl_dataset_rele(ds, FTAG);
317 }
318 dsl_deadlist_close(&dl);
319 }
320
321 uint64_t
322 dsl_deadlist_clone(dsl_deadlist_t *dl, uint64_t maxtxg,
323 uint64_t mrs_obj, dmu_tx_t *tx)
324 {
325 dsl_deadlist_entry_t *dle;
326 uint64_t newobj;
327
328 newobj = dsl_deadlist_alloc(dl->dl_os, tx);
329
330 if (dl->dl_oldfmt) {
331 dsl_deadlist_regenerate(dl->dl_os, newobj, mrs_obj, tx);
332 return (newobj);
333 }
334
335 dsl_deadlist_load_tree(dl);
336
337 for (dle = avl_first(&dl->dl_tree); dle;
338 dle = AVL_NEXT(&dl->dl_tree, dle)) {
339 uint64_t obj;
340
341 if (dle->dle_mintxg >= maxtxg)
342 break;
343
344 obj = bpobj_alloc_empty(dl->dl_os, SPA_MAXBLOCKSIZE, tx);
345 VERIFY3U(0, ==, zap_add_int_key(dl->dl_os, newobj,
346 dle->dle_mintxg, obj, tx));
347 }
348 return (newobj);
349 }
350
351 void
352 dsl_deadlist_space(dsl_deadlist_t *dl,
353 uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
354 {
355 if (dl->dl_oldfmt) {
356 VERIFY3U(0, ==, bpobj_space(&dl->dl_bpobj,
357 usedp, compp, uncompp));
358 return;
359 }
360
361 mutex_enter(&dl->dl_lock);
362 *usedp = dl->dl_phys->dl_used;
363 *compp = dl->dl_phys->dl_comp;
364 *uncompp = dl->dl_phys->dl_uncomp;
365 mutex_exit(&dl->dl_lock);
366 }
367
368 /*
369 * return space used in the range (mintxg, maxtxg].
370 * Includes maxtxg, does not include mintxg.
371 * mintxg and maxtxg must both be keys in the deadlist (unless maxtxg is
372 * larger than any bp in the deadlist (eg. UINT64_MAX)).
373 */
374 void
375 dsl_deadlist_space_range(dsl_deadlist_t *dl, uint64_t mintxg, uint64_t maxtxg,
376 uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
377 {
378 dsl_deadlist_entry_t *dle;
379 dsl_deadlist_entry_t dle_tofind;
380 avl_index_t where;
381
382 if (dl->dl_oldfmt) {
383 VERIFY3U(0, ==, bpobj_space_range(&dl->dl_bpobj,
384 mintxg, maxtxg, usedp, compp, uncompp));
385 return;
386 }
387
388 *usedp = *compp = *uncompp = 0;
389
390 mutex_enter(&dl->dl_lock);
391 dsl_deadlist_load_tree(dl);
392 dle_tofind.dle_mintxg = mintxg;
393 dle = avl_find(&dl->dl_tree, &dle_tofind, &where);
394 /*
395 * If we don't find this mintxg, there shouldn't be anything
396 * after it either.
397 */
398 ASSERT(dle != NULL ||
399 avl_nearest(&dl->dl_tree, where, AVL_AFTER) == NULL);
400
401 for (; dle && dle->dle_mintxg < maxtxg;
402 dle = AVL_NEXT(&dl->dl_tree, dle)) {
403 uint64_t used, comp, uncomp;
404
405 VERIFY3U(0, ==, bpobj_space(&dle->dle_bpobj,
406 &used, &comp, &uncomp));
407
408 *usedp += used;
409 *compp += comp;
410 *uncompp += uncomp;
411 }
412 mutex_exit(&dl->dl_lock);
413 }
414
415 static void
416 dsl_deadlist_insert_bpobj(dsl_deadlist_t *dl, uint64_t obj, uint64_t birth,
417 dmu_tx_t *tx)
418 {
419 dsl_deadlist_entry_t dle_tofind;
420 dsl_deadlist_entry_t *dle;
421 avl_index_t where;
422 uint64_t used, comp, uncomp;
423 bpobj_t bpo;
424
425 VERIFY3U(0, ==, bpobj_open(&bpo, dl->dl_os, obj));
426 VERIFY3U(0, ==, bpobj_space(&bpo, &used, &comp, &uncomp));
427 bpobj_close(&bpo);
428
429 dsl_deadlist_load_tree(dl);
430
431 dmu_buf_will_dirty(dl->dl_dbuf, tx);
432 mutex_enter(&dl->dl_lock);
433 dl->dl_phys->dl_used += used;
434 dl->dl_phys->dl_comp += comp;
435 dl->dl_phys->dl_uncomp += uncomp;
436 mutex_exit(&dl->dl_lock);
437
438 dle_tofind.dle_mintxg = birth;
439 dle = avl_find(&dl->dl_tree, &dle_tofind, &where);
440 if (dle == NULL)
441 dle = avl_nearest(&dl->dl_tree, where, AVL_BEFORE);
442 dle_enqueue_subobj(dl, dle, obj, tx);
443 }
444
445 static int
446 dsl_deadlist_insert_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
447 {
448 dsl_deadlist_t *dl = arg;
449 dsl_deadlist_insert(dl, bp, tx);
450 return (0);
451 }
452
453 /*
454 * Merge the deadlist pointed to by 'obj' into dl. obj will be left as
455 * an empty deadlist.
456 */
457 void
458 dsl_deadlist_merge(dsl_deadlist_t *dl, uint64_t obj, dmu_tx_t *tx)
459 {
460 zap_cursor_t zc;
461 zap_attribute_t za;
462 dmu_buf_t *bonus;
463 dsl_deadlist_phys_t *dlp;
464 dmu_object_info_t doi;
465
466 VERIFY3U(0, ==, dmu_object_info(dl->dl_os, obj, &doi));
467 if (doi.doi_type == DMU_OT_BPOBJ) {
468 bpobj_t bpo;
469 VERIFY3U(0, ==, bpobj_open(&bpo, dl->dl_os, obj));
470 VERIFY3U(0, ==, bpobj_iterate(&bpo,
471 dsl_deadlist_insert_cb, dl, tx));
472 bpobj_close(&bpo);
473 return;
474 }
475
476 for (zap_cursor_init(&zc, dl->dl_os, obj);
477 zap_cursor_retrieve(&zc, &za) == 0;
478 zap_cursor_advance(&zc)) {
479 uint64_t mintxg = strtonum(za.za_name, NULL);
480 dsl_deadlist_insert_bpobj(dl, za.za_first_integer, mintxg, tx);
481 VERIFY3U(0, ==, zap_remove_int(dl->dl_os, obj, mintxg, tx));
482 }
483 zap_cursor_fini(&zc);
484
485 VERIFY3U(0, ==, dmu_bonus_hold(dl->dl_os, obj, FTAG, &bonus));
486 dlp = bonus->db_data;
487 dmu_buf_will_dirty(bonus, tx);
488 bzero(dlp, sizeof (*dlp));
489 dmu_buf_rele(bonus, FTAG);
490 }
491
492 /*
493 * Remove entries on dl that are >= mintxg, and put them on the bpobj.
494 */
495 void
496 dsl_deadlist_move_bpobj(dsl_deadlist_t *dl, bpobj_t *bpo, uint64_t mintxg,
497 dmu_tx_t *tx)
498 {
499 dsl_deadlist_entry_t dle_tofind;
500 dsl_deadlist_entry_t *dle;
501 avl_index_t where;
502
503 ASSERT(!dl->dl_oldfmt);
504 dmu_buf_will_dirty(dl->dl_dbuf, tx);
505 dsl_deadlist_load_tree(dl);
506
507 dle_tofind.dle_mintxg = mintxg;
508 dle = avl_find(&dl->dl_tree, &dle_tofind, &where);
509 if (dle == NULL)
510 dle = avl_nearest(&dl->dl_tree, where, AVL_AFTER);
511 while (dle) {
512 uint64_t used, comp, uncomp;
513 dsl_deadlist_entry_t *dle_next;
514
515 bpobj_enqueue_subobj(bpo, dle->dle_bpobj.bpo_object, tx);
516
517 VERIFY3U(0, ==, bpobj_space(&dle->dle_bpobj,
518 &used, &comp, &uncomp));
519 mutex_enter(&dl->dl_lock);
520 ASSERT3U(dl->dl_phys->dl_used, >=, used);
521 ASSERT3U(dl->dl_phys->dl_comp, >=, comp);
522 ASSERT3U(dl->dl_phys->dl_uncomp, >=, uncomp);
523 dl->dl_phys->dl_used -= used;
524 dl->dl_phys->dl_comp -= comp;
525 dl->dl_phys->dl_uncomp -= uncomp;
526 mutex_exit(&dl->dl_lock);
527
528 VERIFY3U(0, ==, zap_remove_int(dl->dl_os, dl->dl_object,
529 dle->dle_mintxg, tx));
530
531 dle_next = AVL_NEXT(&dl->dl_tree, dle);
532 avl_remove(&dl->dl_tree, dle);
533 bpobj_close(&dle->dle_bpobj);
534 kmem_free(dle, sizeof (*dle));
535 dle = dle_next;
536 }
537 }