]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/dsl_deadlist.c
Illumos 5027 - zfs large block support
[mirror_zfs.git] / module / zfs / dsl_deadlist.c
CommitLineData
428870ff
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
753c3839 23 * Copyright (c) 2012 by Delphix. All rights reserved.
0c66c32d 24 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
428870ff
BB
25 */
26
27#include <sys/dsl_dataset.h>
28#include <sys/dmu.h>
29#include <sys/refcount.h>
30#include <sys/zap.h>
31#include <sys/zfs_context.h>
32#include <sys/dsl_pool.h>
33
330d06f9
MA
34/*
35 * Deadlist concurrency:
36 *
37 * Deadlists can only be modified from the syncing thread.
38 *
39 * Except for dsl_deadlist_insert(), it can only be modified with the
40 * dp_config_rwlock held with RW_WRITER.
41 *
42 * The accessors (dsl_deadlist_space() and dsl_deadlist_space_range()) can
43 * be called concurrently, from open context, with the dl_config_rwlock held
44 * with RW_READER.
45 *
46 * Therefore, we only need to provide locking between dsl_deadlist_insert() and
47 * the accessors, protecting:
48 * dl_phys->dl_used,comp,uncomp
49 * and protecting the dl_tree from being loaded.
50 * The locking is provided by dl_lock. Note that locking on the bpobj_t
51 * provides its own locking, and dl_oldfmt is immutable.
52 */
53
428870ff
BB
54static int
55dsl_deadlist_compare(const void *arg1, const void *arg2)
56{
57 const dsl_deadlist_entry_t *dle1 = arg1;
58 const dsl_deadlist_entry_t *dle2 = arg2;
59
60 if (dle1->dle_mintxg < dle2->dle_mintxg)
61 return (-1);
62 else if (dle1->dle_mintxg > dle2->dle_mintxg)
63 return (+1);
64 else
65 return (0);
66}
67
68static void
69dsl_deadlist_load_tree(dsl_deadlist_t *dl)
70{
71 zap_cursor_t zc;
72 zap_attribute_t za;
73
74 ASSERT(!dl->dl_oldfmt);
75 if (dl->dl_havetree)
76 return;
77
78 avl_create(&dl->dl_tree, dsl_deadlist_compare,
79 sizeof (dsl_deadlist_entry_t),
80 offsetof(dsl_deadlist_entry_t, dle_node));
81 for (zap_cursor_init(&zc, dl->dl_os, dl->dl_object);
82 zap_cursor_retrieve(&zc, &za) == 0;
83 zap_cursor_advance(&zc)) {
b8d06fca
RY
84 dsl_deadlist_entry_t *dle;
85
79c76d5b 86 dle = kmem_alloc(sizeof (*dle), KM_SLEEP);
428870ff
BB
87 dle->dle_mintxg = strtonum(za.za_name, NULL);
88 VERIFY3U(0, ==, bpobj_open(&dle->dle_bpobj, dl->dl_os,
89 za.za_first_integer));
90 avl_add(&dl->dl_tree, dle);
91 }
92 zap_cursor_fini(&zc);
93 dl->dl_havetree = B_TRUE;
94}
95
96void
97dsl_deadlist_open(dsl_deadlist_t *dl, objset_t *os, uint64_t object)
98{
99 dmu_object_info_t doi;
100
101 mutex_init(&dl->dl_lock, NULL, MUTEX_DEFAULT, NULL);
102 dl->dl_os = os;
103 dl->dl_object = object;
104 VERIFY3U(0, ==, dmu_bonus_hold(os, object, dl, &dl->dl_dbuf));
105 dmu_object_info_from_db(dl->dl_dbuf, &doi);
106 if (doi.doi_type == DMU_OT_BPOBJ) {
107 dmu_buf_rele(dl->dl_dbuf, dl);
108 dl->dl_dbuf = NULL;
109 dl->dl_oldfmt = B_TRUE;
110 VERIFY3U(0, ==, bpobj_open(&dl->dl_bpobj, os, object));
111 return;
112 }
113
114 dl->dl_oldfmt = B_FALSE;
115 dl->dl_phys = dl->dl_dbuf->db_data;
116 dl->dl_havetree = B_FALSE;
117}
118
119void
120dsl_deadlist_close(dsl_deadlist_t *dl)
121{
122 void *cookie = NULL;
123 dsl_deadlist_entry_t *dle;
124
0c66c32d
JG
125 dl->dl_os = NULL;
126
428870ff
BB
127 if (dl->dl_oldfmt) {
128 dl->dl_oldfmt = B_FALSE;
129 bpobj_close(&dl->dl_bpobj);
130 return;
131 }
132
133 if (dl->dl_havetree) {
134 while ((dle = avl_destroy_nodes(&dl->dl_tree, &cookie))
135 != NULL) {
136 bpobj_close(&dle->dle_bpobj);
137 kmem_free(dle, sizeof (*dle));
138 }
139 avl_destroy(&dl->dl_tree);
140 }
141 dmu_buf_rele(dl->dl_dbuf, dl);
142 mutex_destroy(&dl->dl_lock);
143 dl->dl_dbuf = NULL;
144 dl->dl_phys = NULL;
145}
146
147uint64_t
148dsl_deadlist_alloc(objset_t *os, dmu_tx_t *tx)
149{
150 if (spa_version(dmu_objset_spa(os)) < SPA_VERSION_DEADLISTS)
f1512ee6 151 return (bpobj_alloc(os, SPA_OLD_MAXBLOCKSIZE, tx));
428870ff
BB
152 return (zap_create(os, DMU_OT_DEADLIST, DMU_OT_DEADLIST_HDR,
153 sizeof (dsl_deadlist_phys_t), tx));
154}
155
156void
157dsl_deadlist_free(objset_t *os, uint64_t dlobj, dmu_tx_t *tx)
158{
159 dmu_object_info_t doi;
160 zap_cursor_t zc;
161 zap_attribute_t za;
162
163 VERIFY3U(0, ==, dmu_object_info(os, dlobj, &doi));
164 if (doi.doi_type == DMU_OT_BPOBJ) {
165 bpobj_free(os, dlobj, tx);
166 return;
167 }
168
169 for (zap_cursor_init(&zc, os, dlobj);
170 zap_cursor_retrieve(&zc, &za) == 0;
753c3839
MA
171 zap_cursor_advance(&zc)) {
172 uint64_t obj = za.za_first_integer;
173 if (obj == dmu_objset_pool(os)->dp_empty_bpobj)
174 bpobj_decr_empty(os, tx);
175 else
176 bpobj_free(os, obj, tx);
177 }
428870ff
BB
178 zap_cursor_fini(&zc);
179 VERIFY3U(0, ==, dmu_object_free(os, dlobj, tx));
180}
181
753c3839
MA
182static void
183dle_enqueue(dsl_deadlist_t *dl, dsl_deadlist_entry_t *dle,
184 const blkptr_t *bp, dmu_tx_t *tx)
185{
186 if (dle->dle_bpobj.bpo_object ==
187 dmu_objset_pool(dl->dl_os)->dp_empty_bpobj) {
f1512ee6 188 uint64_t obj = bpobj_alloc(dl->dl_os, SPA_OLD_MAXBLOCKSIZE, tx);
753c3839
MA
189 bpobj_close(&dle->dle_bpobj);
190 bpobj_decr_empty(dl->dl_os, tx);
191 VERIFY3U(0, ==, bpobj_open(&dle->dle_bpobj, dl->dl_os, obj));
192 VERIFY3U(0, ==, zap_update_int_key(dl->dl_os, dl->dl_object,
193 dle->dle_mintxg, obj, tx));
194 }
195 bpobj_enqueue(&dle->dle_bpobj, bp, tx);
196}
197
198static void
199dle_enqueue_subobj(dsl_deadlist_t *dl, dsl_deadlist_entry_t *dle,
200 uint64_t obj, dmu_tx_t *tx)
201{
202 if (dle->dle_bpobj.bpo_object !=
203 dmu_objset_pool(dl->dl_os)->dp_empty_bpobj) {
204 bpobj_enqueue_subobj(&dle->dle_bpobj, obj, tx);
205 } else {
206 bpobj_close(&dle->dle_bpobj);
207 bpobj_decr_empty(dl->dl_os, tx);
208 VERIFY3U(0, ==, bpobj_open(&dle->dle_bpobj, dl->dl_os, obj));
209 VERIFY3U(0, ==, zap_update_int_key(dl->dl_os, dl->dl_object,
210 dle->dle_mintxg, obj, tx));
211 }
212}
213
428870ff
BB
214void
215dsl_deadlist_insert(dsl_deadlist_t *dl, const blkptr_t *bp, dmu_tx_t *tx)
216{
217 dsl_deadlist_entry_t dle_tofind;
218 dsl_deadlist_entry_t *dle;
219 avl_index_t where;
220
221 if (dl->dl_oldfmt) {
222 bpobj_enqueue(&dl->dl_bpobj, bp, tx);
223 return;
224 }
225
226 dsl_deadlist_load_tree(dl);
227
228 dmu_buf_will_dirty(dl->dl_dbuf, tx);
229 mutex_enter(&dl->dl_lock);
230 dl->dl_phys->dl_used +=
231 bp_get_dsize_sync(dmu_objset_spa(dl->dl_os), bp);
232 dl->dl_phys->dl_comp += BP_GET_PSIZE(bp);
233 dl->dl_phys->dl_uncomp += BP_GET_UCSIZE(bp);
234 mutex_exit(&dl->dl_lock);
235
236 dle_tofind.dle_mintxg = bp->blk_birth;
237 dle = avl_find(&dl->dl_tree, &dle_tofind, &where);
238 if (dle == NULL)
239 dle = avl_nearest(&dl->dl_tree, where, AVL_BEFORE);
240 else
241 dle = AVL_PREV(&dl->dl_tree, dle);
753c3839 242 dle_enqueue(dl, dle, bp, tx);
428870ff
BB
243}
244
245/*
246 * Insert new key in deadlist, which must be > all current entries.
247 * mintxg is not inclusive.
248 */
249void
250dsl_deadlist_add_key(dsl_deadlist_t *dl, uint64_t mintxg, dmu_tx_t *tx)
251{
252 uint64_t obj;
253 dsl_deadlist_entry_t *dle;
254
255 if (dl->dl_oldfmt)
256 return;
257
258 dsl_deadlist_load_tree(dl);
259
79c76d5b 260 dle = kmem_alloc(sizeof (*dle), KM_SLEEP);
428870ff 261 dle->dle_mintxg = mintxg;
f1512ee6 262 obj = bpobj_alloc_empty(dl->dl_os, SPA_OLD_MAXBLOCKSIZE, tx);
428870ff
BB
263 VERIFY3U(0, ==, bpobj_open(&dle->dle_bpobj, dl->dl_os, obj));
264 avl_add(&dl->dl_tree, dle);
265
266 VERIFY3U(0, ==, zap_add_int_key(dl->dl_os, dl->dl_object,
267 mintxg, obj, tx));
268}
269
270/*
271 * Remove this key, merging its entries into the previous key.
272 */
273void
274dsl_deadlist_remove_key(dsl_deadlist_t *dl, uint64_t mintxg, dmu_tx_t *tx)
275{
276 dsl_deadlist_entry_t dle_tofind;
277 dsl_deadlist_entry_t *dle, *dle_prev;
278
279 if (dl->dl_oldfmt)
280 return;
281
282 dsl_deadlist_load_tree(dl);
283
284 dle_tofind.dle_mintxg = mintxg;
285 dle = avl_find(&dl->dl_tree, &dle_tofind, NULL);
286 dle_prev = AVL_PREV(&dl->dl_tree, dle);
287
753c3839 288 dle_enqueue_subobj(dl, dle_prev, dle->dle_bpobj.bpo_object, tx);
428870ff
BB
289
290 avl_remove(&dl->dl_tree, dle);
291 bpobj_close(&dle->dle_bpobj);
292 kmem_free(dle, sizeof (*dle));
293
294 VERIFY3U(0, ==, zap_remove_int(dl->dl_os, dl->dl_object, mintxg, tx));
295}
296
297/*
298 * Walk ds's snapshots to regenerate generate ZAP & AVL.
299 */
300static void
301dsl_deadlist_regenerate(objset_t *os, uint64_t dlobj,
302 uint64_t mrs_obj, dmu_tx_t *tx)
303{
304 dsl_deadlist_t dl;
305 dsl_pool_t *dp = dmu_objset_pool(os);
306
307 dsl_deadlist_open(&dl, os, dlobj);
308 if (dl.dl_oldfmt) {
309 dsl_deadlist_close(&dl);
310 return;
311 }
312
313 while (mrs_obj != 0) {
314 dsl_dataset_t *ds;
315 VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, mrs_obj, FTAG, &ds));
d683ddbb
JG
316 dsl_deadlist_add_key(&dl,
317 dsl_dataset_phys(ds)->ds_prev_snap_txg, tx);
318 mrs_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
428870ff
BB
319 dsl_dataset_rele(ds, FTAG);
320 }
321 dsl_deadlist_close(&dl);
322}
323
324uint64_t
325dsl_deadlist_clone(dsl_deadlist_t *dl, uint64_t maxtxg,
326 uint64_t mrs_obj, dmu_tx_t *tx)
327{
328 dsl_deadlist_entry_t *dle;
329 uint64_t newobj;
330
331 newobj = dsl_deadlist_alloc(dl->dl_os, tx);
332
333 if (dl->dl_oldfmt) {
334 dsl_deadlist_regenerate(dl->dl_os, newobj, mrs_obj, tx);
335 return (newobj);
336 }
337
338 dsl_deadlist_load_tree(dl);
339
340 for (dle = avl_first(&dl->dl_tree); dle;
341 dle = AVL_NEXT(&dl->dl_tree, dle)) {
342 uint64_t obj;
343
344 if (dle->dle_mintxg >= maxtxg)
345 break;
346
f1512ee6 347 obj = bpobj_alloc_empty(dl->dl_os, SPA_OLD_MAXBLOCKSIZE, tx);
428870ff
BB
348 VERIFY3U(0, ==, zap_add_int_key(dl->dl_os, newobj,
349 dle->dle_mintxg, obj, tx));
350 }
351 return (newobj);
352}
353
354void
355dsl_deadlist_space(dsl_deadlist_t *dl,
356 uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
357{
358 if (dl->dl_oldfmt) {
359 VERIFY3U(0, ==, bpobj_space(&dl->dl_bpobj,
360 usedp, compp, uncompp));
361 return;
362 }
363
364 mutex_enter(&dl->dl_lock);
365 *usedp = dl->dl_phys->dl_used;
366 *compp = dl->dl_phys->dl_comp;
367 *uncompp = dl->dl_phys->dl_uncomp;
368 mutex_exit(&dl->dl_lock);
369}
370
371/*
372 * return space used in the range (mintxg, maxtxg].
373 * Includes maxtxg, does not include mintxg.
374 * mintxg and maxtxg must both be keys in the deadlist (unless maxtxg is
330d06f9 375 * larger than any bp in the deadlist (eg. UINT64_MAX)).
428870ff
BB
376 */
377void
378dsl_deadlist_space_range(dsl_deadlist_t *dl, uint64_t mintxg, uint64_t maxtxg,
379 uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
380{
428870ff 381 dsl_deadlist_entry_t *dle;
330d06f9 382 dsl_deadlist_entry_t dle_tofind;
428870ff
BB
383 avl_index_t where;
384
385 if (dl->dl_oldfmt) {
386 VERIFY3U(0, ==, bpobj_space_range(&dl->dl_bpobj,
387 mintxg, maxtxg, usedp, compp, uncompp));
388 return;
389 }
390
428870ff
BB
391 *usedp = *compp = *uncompp = 0;
392
330d06f9
MA
393 mutex_enter(&dl->dl_lock);
394 dsl_deadlist_load_tree(dl);
428870ff
BB
395 dle_tofind.dle_mintxg = mintxg;
396 dle = avl_find(&dl->dl_tree, &dle_tofind, &where);
397 /*
398 * If we don't find this mintxg, there shouldn't be anything
399 * after it either.
400 */
401 ASSERT(dle != NULL ||
402 avl_nearest(&dl->dl_tree, where, AVL_AFTER) == NULL);
330d06f9 403
428870ff
BB
404 for (; dle && dle->dle_mintxg < maxtxg;
405 dle = AVL_NEXT(&dl->dl_tree, dle)) {
406 uint64_t used, comp, uncomp;
407
408 VERIFY3U(0, ==, bpobj_space(&dle->dle_bpobj,
409 &used, &comp, &uncomp));
410
411 *usedp += used;
412 *compp += comp;
413 *uncompp += uncomp;
414 }
330d06f9 415 mutex_exit(&dl->dl_lock);
428870ff
BB
416}
417
418static void
419dsl_deadlist_insert_bpobj(dsl_deadlist_t *dl, uint64_t obj, uint64_t birth,
420 dmu_tx_t *tx)
421{
422 dsl_deadlist_entry_t dle_tofind;
423 dsl_deadlist_entry_t *dle;
424 avl_index_t where;
425 uint64_t used, comp, uncomp;
426 bpobj_t bpo;
427
428 VERIFY3U(0, ==, bpobj_open(&bpo, dl->dl_os, obj));
429 VERIFY3U(0, ==, bpobj_space(&bpo, &used, &comp, &uncomp));
430 bpobj_close(&bpo);
431
432 dsl_deadlist_load_tree(dl);
433
434 dmu_buf_will_dirty(dl->dl_dbuf, tx);
435 mutex_enter(&dl->dl_lock);
436 dl->dl_phys->dl_used += used;
437 dl->dl_phys->dl_comp += comp;
438 dl->dl_phys->dl_uncomp += uncomp;
439 mutex_exit(&dl->dl_lock);
440
441 dle_tofind.dle_mintxg = birth;
442 dle = avl_find(&dl->dl_tree, &dle_tofind, &where);
443 if (dle == NULL)
444 dle = avl_nearest(&dl->dl_tree, where, AVL_BEFORE);
753c3839 445 dle_enqueue_subobj(dl, dle, obj, tx);
428870ff
BB
446}
447
448static int
449dsl_deadlist_insert_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
450{
451 dsl_deadlist_t *dl = arg;
452 dsl_deadlist_insert(dl, bp, tx);
453 return (0);
454}
455
456/*
457 * Merge the deadlist pointed to by 'obj' into dl. obj will be left as
458 * an empty deadlist.
459 */
460void
461dsl_deadlist_merge(dsl_deadlist_t *dl, uint64_t obj, dmu_tx_t *tx)
462{
463 zap_cursor_t zc;
464 zap_attribute_t za;
465 dmu_buf_t *bonus;
466 dsl_deadlist_phys_t *dlp;
467 dmu_object_info_t doi;
468
469 VERIFY3U(0, ==, dmu_object_info(dl->dl_os, obj, &doi));
470 if (doi.doi_type == DMU_OT_BPOBJ) {
471 bpobj_t bpo;
472 VERIFY3U(0, ==, bpobj_open(&bpo, dl->dl_os, obj));
473 VERIFY3U(0, ==, bpobj_iterate(&bpo,
474 dsl_deadlist_insert_cb, dl, tx));
475 bpobj_close(&bpo);
476 return;
477 }
478
479 for (zap_cursor_init(&zc, dl->dl_os, obj);
480 zap_cursor_retrieve(&zc, &za) == 0;
481 zap_cursor_advance(&zc)) {
482 uint64_t mintxg = strtonum(za.za_name, NULL);
483 dsl_deadlist_insert_bpobj(dl, za.za_first_integer, mintxg, tx);
484 VERIFY3U(0, ==, zap_remove_int(dl->dl_os, obj, mintxg, tx));
485 }
486 zap_cursor_fini(&zc);
487
488 VERIFY3U(0, ==, dmu_bonus_hold(dl->dl_os, obj, FTAG, &bonus));
489 dlp = bonus->db_data;
490 dmu_buf_will_dirty(bonus, tx);
491 bzero(dlp, sizeof (*dlp));
492 dmu_buf_rele(bonus, FTAG);
493}
494
495/*
496 * Remove entries on dl that are >= mintxg, and put them on the bpobj.
497 */
498void
499dsl_deadlist_move_bpobj(dsl_deadlist_t *dl, bpobj_t *bpo, uint64_t mintxg,
500 dmu_tx_t *tx)
501{
502 dsl_deadlist_entry_t dle_tofind;
503 dsl_deadlist_entry_t *dle;
504 avl_index_t where;
505
506 ASSERT(!dl->dl_oldfmt);
507 dmu_buf_will_dirty(dl->dl_dbuf, tx);
508 dsl_deadlist_load_tree(dl);
509
510 dle_tofind.dle_mintxg = mintxg;
511 dle = avl_find(&dl->dl_tree, &dle_tofind, &where);
512 if (dle == NULL)
513 dle = avl_nearest(&dl->dl_tree, where, AVL_AFTER);
514 while (dle) {
515 uint64_t used, comp, uncomp;
516 dsl_deadlist_entry_t *dle_next;
517
518 bpobj_enqueue_subobj(bpo, dle->dle_bpobj.bpo_object, tx);
519
520 VERIFY3U(0, ==, bpobj_space(&dle->dle_bpobj,
521 &used, &comp, &uncomp));
522 mutex_enter(&dl->dl_lock);
523 ASSERT3U(dl->dl_phys->dl_used, >=, used);
524 ASSERT3U(dl->dl_phys->dl_comp, >=, comp);
525 ASSERT3U(dl->dl_phys->dl_uncomp, >=, uncomp);
526 dl->dl_phys->dl_used -= used;
527 dl->dl_phys->dl_comp -= comp;
528 dl->dl_phys->dl_uncomp -= uncomp;
529 mutex_exit(&dl->dl_lock);
530
531 VERIFY3U(0, ==, zap_remove_int(dl->dl_os, dl->dl_object,
532 dle->dle_mintxg, tx));
533
534 dle_next = AVL_NEXT(&dl->dl_tree, dle);
535 avl_remove(&dl->dl_tree, dle);
536 bpobj_close(&dle->dle_bpobj);
537 kmem_free(dle, sizeof (*dle));
538 dle = dle_next;
539 }
540}