]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/dmu_redact.c
BRT: Skip getting length in brt_entry_lookup()
[mirror_zfs.git] / module / zfs / dmu_redact.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2017, 2018 by Delphix. All rights reserved.
23 */
24
25 #include <sys/zfs_context.h>
26 #include <sys/txg.h>
27 #include <sys/dmu_objset.h>
28 #include <sys/dmu_traverse.h>
29 #include <sys/dmu_redact.h>
30 #include <sys/bqueue.h>
31 #include <sys/objlist.h>
32 #include <sys/dmu_tx.h>
33 #ifdef _KERNEL
34 #include <sys/zfs_vfsops.h>
35 #include <sys/zap.h>
36 #include <sys/zfs_znode.h>
37 #endif
38
39 /*
40 * This controls the number of entries in the buffer the redaction_list_update
41 * synctask uses to buffer writes to the redaction list.
42 */
43 static const int redact_sync_bufsize = 1024;
44
45 /*
46 * Controls how often to update the redaction list when creating a redaction
47 * list.
48 */
49 static const uint64_t redaction_list_update_interval_ns =
50 1000 * 1000 * 1000ULL; /* 1s */
51
52 /*
53 * This tunable controls the length of the queues that zfs redact worker threads
54 * use to communicate. If the dmu_redact_snap thread is blocking on these
55 * queues, this variable may need to be increased. If there is a significant
56 * slowdown at the start of a redact operation as these threads consume all the
57 * available IO resources, or the queues are consuming too much memory, this
58 * variable may need to be decreased.
59 */
60 static const int zfs_redact_queue_length = 1024 * 1024;
61
62 /*
63 * These tunables control the fill fraction of the queues by zfs redact. The
64 * fill fraction controls the frequency with which threads have to be
65 * cv_signaled. If a lot of cpu time is being spent on cv_signal, then these
66 * should be tuned down. If the queues empty before the signalled thread can
67 * catch up, then these should be tuned up.
68 */
69 static const uint64_t zfs_redact_queue_ff = 20;
70
71 struct redact_record {
72 bqueue_node_t ln;
73 boolean_t eos_marker; /* Marks the end of the stream */
74 uint64_t start_object;
75 uint64_t start_blkid;
76 uint64_t end_object;
77 uint64_t end_blkid;
78 uint8_t indblkshift;
79 uint32_t datablksz;
80 };
81
82 struct redact_thread_arg {
83 bqueue_t q;
84 objset_t *os; /* Objset to traverse */
85 dsl_dataset_t *ds; /* Dataset to traverse */
86 struct redact_record *current_record;
87 int error_code;
88 boolean_t cancel;
89 zbookmark_phys_t resume;
90 objlist_t *deleted_objs;
91 uint64_t *num_blocks_visited;
92 uint64_t ignore_object; /* ignore further callbacks on this */
93 uint64_t txg; /* txg to traverse since */
94 };
95
96 /*
97 * The redaction node is a wrapper around the redaction record that is used
98 * by the redaction merging thread to sort the records and determine overlaps.
99 *
100 * It contains two nodes; one sorts the records by their start_zb, and the other
101 * sorts the records by their end_zb.
102 */
103 struct redact_node {
104 avl_node_t avl_node_start;
105 avl_node_t avl_node_end;
106 struct redact_record *record;
107 struct redact_thread_arg *rt_arg;
108 uint32_t thread_num;
109 };
110
111 struct merge_data {
112 list_t md_redact_block_pending;
113 redact_block_phys_t md_coalesce_block;
114 uint64_t md_last_time;
115 redact_block_phys_t md_furthest[TXG_SIZE];
116 /* Lists of struct redact_block_list_node. */
117 list_t md_blocks[TXG_SIZE];
118 boolean_t md_synctask_txg[TXG_SIZE];
119 uint64_t md_latest_synctask_txg;
120 redaction_list_t *md_redaction_list;
121 };
122
123 /*
124 * A wrapper around struct redact_block so it can be stored in a list_t.
125 */
126 struct redact_block_list_node {
127 redact_block_phys_t block;
128 list_node_t node;
129 };
130
131 /*
132 * We've found a new redaction candidate. In order to improve performance, we
133 * coalesce these blocks when they're adjacent to each other. This function
134 * handles that. If the new candidate block range is immediately after the
135 * range we're building, coalesce it into the range we're building. Otherwise,
136 * put the record we're building on the queue, and update the build pointer to
137 * point to the new record.
138 */
139 static void
140 record_merge_enqueue(bqueue_t *q, struct redact_record **build,
141 struct redact_record *new)
142 {
143 if (new->eos_marker) {
144 if (*build != NULL)
145 bqueue_enqueue(q, *build, sizeof (**build));
146 bqueue_enqueue_flush(q, new, sizeof (*new));
147 return;
148 }
149 if (*build == NULL) {
150 *build = new;
151 return;
152 }
153 struct redact_record *curbuild = *build;
154 if ((curbuild->end_object == new->start_object &&
155 curbuild->end_blkid + 1 == new->start_blkid &&
156 curbuild->end_blkid != UINT64_MAX) ||
157 (curbuild->end_object + 1 == new->start_object &&
158 curbuild->end_blkid == UINT64_MAX && new->start_blkid == 0)) {
159 curbuild->end_object = new->end_object;
160 curbuild->end_blkid = new->end_blkid;
161 kmem_free(new, sizeof (*new));
162 } else {
163 bqueue_enqueue(q, curbuild, sizeof (*curbuild));
164 *build = new;
165 }
166 }
167 #ifdef _KERNEL
168 struct objnode {
169 avl_node_t node;
170 uint64_t obj;
171 };
172
173 static int
174 objnode_compare(const void *o1, const void *o2)
175 {
176 const struct objnode *obj1 = o1;
177 const struct objnode *obj2 = o2;
178 if (obj1->obj < obj2->obj)
179 return (-1);
180 if (obj1->obj > obj2->obj)
181 return (1);
182 return (0);
183 }
184
185
186 static objlist_t *
187 zfs_get_deleteq(objset_t *os)
188 {
189 objlist_t *deleteq_objlist = objlist_create();
190 uint64_t deleteq_obj;
191 zap_cursor_t zc;
192 zap_attribute_t za;
193 dmu_object_info_t doi;
194
195 ASSERT3U(os->os_phys->os_type, ==, DMU_OST_ZFS);
196 VERIFY0(dmu_object_info(os, MASTER_NODE_OBJ, &doi));
197 ASSERT3U(doi.doi_type, ==, DMU_OT_MASTER_NODE);
198
199 VERIFY0(zap_lookup(os, MASTER_NODE_OBJ,
200 ZFS_UNLINKED_SET, sizeof (uint64_t), 1, &deleteq_obj));
201
202 /*
203 * In order to insert objects into the objlist, they must be in sorted
204 * order. We don't know what order we'll get them out of the ZAP in, so
205 * we insert them into and remove them from an avl_tree_t to sort them.
206 */
207 avl_tree_t at;
208 avl_create(&at, objnode_compare, sizeof (struct objnode),
209 offsetof(struct objnode, node));
210
211 for (zap_cursor_init(&zc, os, deleteq_obj);
212 zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) {
213 struct objnode *obj = kmem_zalloc(sizeof (*obj), KM_SLEEP);
214 obj->obj = za.za_first_integer;
215 avl_add(&at, obj);
216 }
217 zap_cursor_fini(&zc);
218
219 struct objnode *next, *found = avl_first(&at);
220 while (found != NULL) {
221 next = AVL_NEXT(&at, found);
222 objlist_insert(deleteq_objlist, found->obj);
223 found = next;
224 }
225
226 void *cookie = NULL;
227 while ((found = avl_destroy_nodes(&at, &cookie)) != NULL)
228 kmem_free(found, sizeof (*found));
229 avl_destroy(&at);
230 return (deleteq_objlist);
231 }
232 #endif
233
234 /*
235 * This is the callback function to traverse_dataset for the redaction threads
236 * for dmu_redact_snap. This thread is responsible for creating redaction
237 * records for all the data that is modified by the snapshots we're redacting
238 * with respect to. Redaction records represent ranges of data that have been
239 * modified by one of the redaction snapshots, and are stored in the
240 * redact_record struct. We need to create redaction records for three
241 * cases:
242 *
243 * First, if there's a normal write, we need to create a redaction record for
244 * that block.
245 *
246 * Second, if there's a hole, we need to create a redaction record that covers
247 * the whole range of the hole. If the hole is in the meta-dnode, it must cover
248 * every block in all of the objects in the hole.
249 *
250 * Third, if there is a deleted object, we need to create a redaction record for
251 * all of the blocks in that object.
252 */
253 static int
254 redact_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
255 const zbookmark_phys_t *zb, const struct dnode_phys *dnp, void *arg)
256 {
257 (void) spa, (void) zilog;
258 struct redact_thread_arg *rta = arg;
259 struct redact_record *record;
260
261 ASSERT(zb->zb_object == DMU_META_DNODE_OBJECT ||
262 zb->zb_object >= rta->resume.zb_object);
263
264 if (rta->cancel)
265 return (SET_ERROR(EINTR));
266
267 if (rta->ignore_object == zb->zb_object)
268 return (0);
269
270 /*
271 * If we're visiting a dnode, we need to handle the case where the
272 * object has been deleted.
273 */
274 if (zb->zb_level == ZB_DNODE_LEVEL) {
275 ASSERT3U(zb->zb_level, ==, ZB_DNODE_LEVEL);
276
277 if (zb->zb_object == 0)
278 return (0);
279
280 /*
281 * If the object has been deleted, redact all of the blocks in
282 * it.
283 */
284 if (dnp->dn_type == DMU_OT_NONE ||
285 objlist_exists(rta->deleted_objs, zb->zb_object)) {
286 rta->ignore_object = zb->zb_object;
287 record = kmem_zalloc(sizeof (struct redact_record),
288 KM_SLEEP);
289
290 record->eos_marker = B_FALSE;
291 record->start_object = record->end_object =
292 zb->zb_object;
293 record->start_blkid = 0;
294 record->end_blkid = UINT64_MAX;
295 record_merge_enqueue(&rta->q,
296 &rta->current_record, record);
297 }
298 return (0);
299 } else if (zb->zb_level < 0) {
300 return (0);
301 } else if (zb->zb_level > 0 && !BP_IS_HOLE(bp)) {
302 /*
303 * If this is an indirect block, but not a hole, it doesn't
304 * provide any useful information for redaction, so ignore it.
305 */
306 return (0);
307 }
308
309 /*
310 * At this point, there are two options left for the type of block we're
311 * looking at. Either this is a hole (which could be in the dnode or
312 * the meta-dnode), or it's a level 0 block of some sort. If it's a
313 * hole, we create a redaction record that covers the whole range. If
314 * the hole is in a dnode, we need to redact all the blocks in that
315 * hole. If the hole is in the meta-dnode, we instead need to redact
316 * all blocks in every object covered by that hole. If it's a level 0
317 * block, we only need to redact that single block.
318 */
319 record = kmem_zalloc(sizeof (struct redact_record), KM_SLEEP);
320 record->eos_marker = B_FALSE;
321
322 record->start_object = record->end_object = zb->zb_object;
323 if (BP_IS_HOLE(bp)) {
324 record->start_blkid = zb->zb_blkid *
325 bp_span_in_blocks(dnp->dn_indblkshift, zb->zb_level);
326
327 record->end_blkid = ((zb->zb_blkid + 1) *
328 bp_span_in_blocks(dnp->dn_indblkshift, zb->zb_level)) - 1;
329
330 if (zb->zb_object == DMU_META_DNODE_OBJECT) {
331 record->start_object = record->start_blkid *
332 ((SPA_MINBLOCKSIZE * dnp->dn_datablkszsec) /
333 sizeof (dnode_phys_t));
334 record->start_blkid = 0;
335 record->end_object = ((record->end_blkid +
336 1) * ((SPA_MINBLOCKSIZE * dnp->dn_datablkszsec) /
337 sizeof (dnode_phys_t))) - 1;
338 record->end_blkid = UINT64_MAX;
339 }
340 } else if (zb->zb_level != 0 ||
341 zb->zb_object == DMU_META_DNODE_OBJECT) {
342 kmem_free(record, sizeof (*record));
343 return (0);
344 } else {
345 record->start_blkid = record->end_blkid = zb->zb_blkid;
346 }
347 record->indblkshift = dnp->dn_indblkshift;
348 record->datablksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
349 record_merge_enqueue(&rta->q, &rta->current_record, record);
350
351 return (0);
352 }
353
354 static __attribute__((noreturn)) void
355 redact_traverse_thread(void *arg)
356 {
357 struct redact_thread_arg *rt_arg = arg;
358 int err;
359 struct redact_record *data;
360 #ifdef _KERNEL
361 if (rt_arg->os->os_phys->os_type == DMU_OST_ZFS)
362 rt_arg->deleted_objs = zfs_get_deleteq(rt_arg->os);
363 else
364 rt_arg->deleted_objs = objlist_create();
365 #else
366 rt_arg->deleted_objs = objlist_create();
367 #endif
368
369 err = traverse_dataset_resume(rt_arg->ds, rt_arg->txg,
370 &rt_arg->resume, TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA,
371 redact_cb, rt_arg);
372
373 if (err != EINTR)
374 rt_arg->error_code = err;
375 objlist_destroy(rt_arg->deleted_objs);
376 data = kmem_zalloc(sizeof (*data), KM_SLEEP);
377 data->eos_marker = B_TRUE;
378 record_merge_enqueue(&rt_arg->q, &rt_arg->current_record, data);
379 thread_exit();
380 }
381
382 static inline void
383 create_zbookmark_from_obj_off(zbookmark_phys_t *zb, uint64_t object,
384 uint64_t blkid)
385 {
386 zb->zb_object = object;
387 zb->zb_level = 0;
388 zb->zb_blkid = blkid;
389 }
390
391 /*
392 * This is a utility function that can do the comparison for the start or ends
393 * of the ranges in a redact_record.
394 */
395 static int
396 redact_range_compare(uint64_t obj1, uint64_t off1, uint32_t dbss1,
397 uint64_t obj2, uint64_t off2, uint32_t dbss2)
398 {
399 zbookmark_phys_t z1, z2;
400 create_zbookmark_from_obj_off(&z1, obj1, off1);
401 create_zbookmark_from_obj_off(&z2, obj2, off2);
402
403 return (zbookmark_compare(dbss1 >> SPA_MINBLOCKSHIFT, 0,
404 dbss2 >> SPA_MINBLOCKSHIFT, 0, &z1, &z2));
405 }
406
407 /*
408 * Compare two redaction records by their range's start location. Also makes
409 * eos records always compare last. We use the thread number in the redact_node
410 * to ensure that records do not compare equal (which is not allowed in our avl
411 * trees).
412 */
413 static int
414 redact_node_compare_start(const void *arg1, const void *arg2)
415 {
416 const struct redact_node *rn1 = arg1;
417 const struct redact_node *rn2 = arg2;
418 const struct redact_record *rr1 = rn1->record;
419 const struct redact_record *rr2 = rn2->record;
420 if (rr1->eos_marker)
421 return (1);
422 if (rr2->eos_marker)
423 return (-1);
424
425 int cmp = redact_range_compare(rr1->start_object, rr1->start_blkid,
426 rr1->datablksz, rr2->start_object, rr2->start_blkid,
427 rr2->datablksz);
428 if (cmp == 0)
429 cmp = (rn1->thread_num < rn2->thread_num ? -1 : 1);
430 return (cmp);
431 }
432
433 /*
434 * Compare two redaction records by their range's end location. Also makes
435 * eos records always compare last. We use the thread number in the redact_node
436 * to ensure that records do not compare equal (which is not allowed in our avl
437 * trees).
438 */
439 static int
440 redact_node_compare_end(const void *arg1, const void *arg2)
441 {
442 const struct redact_node *rn1 = arg1;
443 const struct redact_node *rn2 = arg2;
444 const struct redact_record *srr1 = rn1->record;
445 const struct redact_record *srr2 = rn2->record;
446 if (srr1->eos_marker)
447 return (1);
448 if (srr2->eos_marker)
449 return (-1);
450
451 int cmp = redact_range_compare(srr1->end_object, srr1->end_blkid,
452 srr1->datablksz, srr2->end_object, srr2->end_blkid,
453 srr2->datablksz);
454 if (cmp == 0)
455 cmp = (rn1->thread_num < rn2->thread_num ? -1 : 1);
456 return (cmp);
457 }
458
459 /*
460 * Utility function that compares two redaction records to determine if any part
461 * of the "from" record is before any part of the "to" record. Also causes End
462 * of Stream redaction records to compare after all others, so that the
463 * redaction merging logic can stay simple.
464 */
465 static boolean_t
466 redact_record_before(const struct redact_record *from,
467 const struct redact_record *to)
468 {
469 if (from->eos_marker == B_TRUE)
470 return (B_FALSE);
471 else if (to->eos_marker == B_TRUE)
472 return (B_TRUE);
473 return (redact_range_compare(from->start_object, from->start_blkid,
474 from->datablksz, to->end_object, to->end_blkid,
475 to->datablksz) <= 0);
476 }
477
478 /*
479 * Pop a new redaction record off the queue, check that the records are in the
480 * right order, and free the old data.
481 */
482 static struct redact_record *
483 get_next_redact_record(bqueue_t *bq, struct redact_record *prev)
484 {
485 struct redact_record *next = bqueue_dequeue(bq);
486 ASSERT(redact_record_before(prev, next));
487 kmem_free(prev, sizeof (*prev));
488 return (next);
489 }
490
491 /*
492 * Remove the given redaction node from both trees, pull a new redaction record
493 * off the queue, free the old redaction record, update the redaction node, and
494 * reinsert the node into the trees.
495 */
496 static int
497 update_avl_trees(avl_tree_t *start_tree, avl_tree_t *end_tree,
498 struct redact_node *redact_node)
499 {
500 avl_remove(start_tree, redact_node);
501 avl_remove(end_tree, redact_node);
502 redact_node->record = get_next_redact_record(&redact_node->rt_arg->q,
503 redact_node->record);
504 avl_add(end_tree, redact_node);
505 avl_add(start_tree, redact_node);
506 return (redact_node->rt_arg->error_code);
507 }
508
509 /*
510 * Synctask for updating redaction lists. We first take this txg's list of
511 * redacted blocks and append those to the redaction list. We then update the
512 * redaction list's bonus buffer. We store the furthest blocks we visited and
513 * the list of snapshots that we're redacting with respect to. We need these so
514 * that redacted sends and receives can be correctly resumed.
515 */
516 static void
517 redaction_list_update_sync(void *arg, dmu_tx_t *tx)
518 {
519 struct merge_data *md = arg;
520 uint64_t txg = dmu_tx_get_txg(tx);
521 list_t *list = &md->md_blocks[txg & TXG_MASK];
522 redact_block_phys_t *furthest_visited =
523 &md->md_furthest[txg & TXG_MASK];
524 objset_t *mos = tx->tx_pool->dp_meta_objset;
525 redaction_list_t *rl = md->md_redaction_list;
526 int bufsize = redact_sync_bufsize;
527 redact_block_phys_t *buf = kmem_alloc(bufsize * sizeof (*buf),
528 KM_SLEEP);
529 int index = 0;
530
531 dmu_buf_will_dirty(rl->rl_dbuf, tx);
532
533 for (struct redact_block_list_node *rbln = list_remove_head(list);
534 rbln != NULL; rbln = list_remove_head(list)) {
535 ASSERT3U(rbln->block.rbp_object, <=,
536 furthest_visited->rbp_object);
537 ASSERT(rbln->block.rbp_object < furthest_visited->rbp_object ||
538 rbln->block.rbp_blkid <= furthest_visited->rbp_blkid);
539 buf[index] = rbln->block;
540 index++;
541 if (index == bufsize) {
542 dmu_write(mos, rl->rl_object,
543 rl->rl_phys->rlp_num_entries * sizeof (*buf),
544 bufsize * sizeof (*buf), buf, tx);
545 rl->rl_phys->rlp_num_entries += bufsize;
546 index = 0;
547 }
548 kmem_free(rbln, sizeof (*rbln));
549 }
550 if (index > 0) {
551 dmu_write(mos, rl->rl_object, rl->rl_phys->rlp_num_entries *
552 sizeof (*buf), index * sizeof (*buf), buf, tx);
553 rl->rl_phys->rlp_num_entries += index;
554 }
555 kmem_free(buf, bufsize * sizeof (*buf));
556
557 md->md_synctask_txg[txg & TXG_MASK] = B_FALSE;
558 rl->rl_phys->rlp_last_object = furthest_visited->rbp_object;
559 rl->rl_phys->rlp_last_blkid = furthest_visited->rbp_blkid;
560 }
561
562 static void
563 commit_rl_updates(objset_t *os, struct merge_data *md, uint64_t object,
564 uint64_t blkid)
565 {
566 dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(os->os_spa)->dp_mos_dir);
567 dmu_tx_hold_space(tx, sizeof (struct redact_block_list_node));
568 VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
569 uint64_t txg = dmu_tx_get_txg(tx);
570 if (!md->md_synctask_txg[txg & TXG_MASK]) {
571 dsl_sync_task_nowait(dmu_tx_pool(tx),
572 redaction_list_update_sync, md, tx);
573 md->md_synctask_txg[txg & TXG_MASK] = B_TRUE;
574 md->md_latest_synctask_txg = txg;
575 }
576 md->md_furthest[txg & TXG_MASK].rbp_object = object;
577 md->md_furthest[txg & TXG_MASK].rbp_blkid = blkid;
578 list_move_tail(&md->md_blocks[txg & TXG_MASK],
579 &md->md_redact_block_pending);
580 dmu_tx_commit(tx);
581 md->md_last_time = gethrtime();
582 }
583
584 /*
585 * We want to store the list of blocks that we're redacting in the bookmark's
586 * redaction list. However, this list is stored in the MOS, which means it can
587 * only be written to in syncing context. To get around this, we create a
588 * synctask that will write to the mos for us. We tell it what to write by
589 * a linked list for each current transaction group; every time we decide to
590 * redact a block, we append it to the transaction group that is currently in
591 * open context. We also update some progress information that the synctask
592 * will store to enable resumable redacted sends.
593 */
594 static void
595 update_redaction_list(struct merge_data *md, objset_t *os,
596 uint64_t object, uint64_t blkid, uint64_t endblkid, uint32_t blksz)
597 {
598 boolean_t enqueue = B_FALSE;
599 redact_block_phys_t cur = {0};
600 uint64_t count = endblkid - blkid + 1;
601 while (count > REDACT_BLOCK_MAX_COUNT) {
602 update_redaction_list(md, os, object, blkid,
603 blkid + REDACT_BLOCK_MAX_COUNT - 1, blksz);
604 blkid += REDACT_BLOCK_MAX_COUNT;
605 count -= REDACT_BLOCK_MAX_COUNT;
606 }
607 redact_block_phys_t *coalesce = &md->md_coalesce_block;
608 boolean_t new;
609 if (coalesce->rbp_size_count == 0) {
610 new = B_TRUE;
611 enqueue = B_FALSE;
612 } else {
613 uint64_t old_count = redact_block_get_count(coalesce);
614 if (coalesce->rbp_object == object &&
615 coalesce->rbp_blkid + old_count == blkid &&
616 old_count + count <= REDACT_BLOCK_MAX_COUNT) {
617 ASSERT3U(redact_block_get_size(coalesce), ==, blksz);
618 redact_block_set_count(coalesce, old_count + count);
619 new = B_FALSE;
620 enqueue = B_FALSE;
621 } else {
622 new = B_TRUE;
623 enqueue = B_TRUE;
624 }
625 }
626
627 if (new) {
628 cur = *coalesce;
629 coalesce->rbp_blkid = blkid;
630 coalesce->rbp_object = object;
631
632 redact_block_set_count(coalesce, count);
633 redact_block_set_size(coalesce, blksz);
634 }
635
636 if (enqueue && redact_block_get_size(&cur) != 0) {
637 struct redact_block_list_node *rbln =
638 kmem_alloc(sizeof (struct redact_block_list_node),
639 KM_SLEEP);
640 rbln->block = cur;
641 list_insert_tail(&md->md_redact_block_pending, rbln);
642 }
643
644 if (gethrtime() > md->md_last_time +
645 redaction_list_update_interval_ns) {
646 commit_rl_updates(os, md, object, blkid);
647 }
648 }
649
650 /*
651 * This thread merges all the redaction records provided by the worker threads,
652 * and determines which blocks are redacted by all the snapshots. The algorithm
653 * for doing so is similar to performing a merge in mergesort with n sub-lists
654 * instead of 2, with some added complexity due to the fact that the entries are
655 * ranges, not just single blocks. This algorithm relies on the fact that the
656 * queues are sorted, which is ensured by the fact that traverse_dataset
657 * traverses the dataset in a consistent order. We pull one entry off the front
658 * of the queues of each secure dataset traversal thread. Then we repeat the
659 * following: each record represents a range of blocks modified by one of the
660 * redaction snapshots, and each block in that range may need to be redacted in
661 * the send stream. Find the record with the latest start of its range, and the
662 * record with the earliest end of its range. If the last start is before the
663 * first end, then we know that the blocks in the range [last_start, first_end]
664 * are covered by all of the ranges at the front of the queues, which means
665 * every thread redacts that whole range. For example, let's say the ranges on
666 * each queue look like this:
667 *
668 * Block Id 1 2 3 4 5 6 7 8 9 10 11
669 * Thread 1 | [====================]
670 * Thread 2 | [========]
671 * Thread 3 | [=================]
672 *
673 * Thread 3 has the last start (5), and the thread 2 has the last end (6). All
674 * three threads modified the range [5,6], so that data should not be sent over
675 * the wire. After we've determined whether or not to redact anything, we take
676 * the record with the first end. We discard that record, and pull a new one
677 * off the front of the queue it came from. In the above example, we would
678 * discard Thread 2's record, and pull a new one. Let's say the next record we
679 * pulled from Thread 2 covered range [10,11]. The new layout would look like
680 * this:
681 *
682 * Block Id 1 2 3 4 5 6 7 8 9 10 11
683 * Thread 1 | [====================]
684 * Thread 2 | [==]
685 * Thread 3 | [=================]
686 *
687 * When we compare the last start (10, from Thread 2) and the first end (9, from
688 * Thread 1), we see that the last start is greater than the first end.
689 * Therefore, we do not redact anything from these records. We'll iterate by
690 * replacing the record from Thread 1.
691 *
692 * We iterate by replacing the record with the lowest end because we know
693 * that the record with the lowest end has helped us as much as it can. All the
694 * ranges before it that we will ever redact have been redacted. In addition,
695 * by replacing the one with the lowest end, we guarantee we catch all ranges
696 * that need to be redacted. For example, if in the case above we had replaced
697 * the record from Thread 1 instead, we might have ended up with the following:
698 *
699 * Block Id 1 2 3 4 5 6 7 8 9 10 11 12
700 * Thread 1 | [==]
701 * Thread 2 | [========]
702 * Thread 3 | [=================]
703 *
704 * If the next record from Thread 2 had been [8,10], for example, we should have
705 * redacted part of that range, but because we updated Thread 1's record, we
706 * missed it.
707 *
708 * We implement this algorithm by using two trees. The first sorts the
709 * redaction records by their start_zb, and the second sorts them by their
710 * end_zb. We use these to find the record with the last start and the record
711 * with the first end. We create a record with that start and end, and send it
712 * on. The overall runtime of this implementation is O(n log m), where n is the
713 * total number of redaction records from all the different redaction snapshots,
714 * and m is the number of redaction snapshots.
715 *
716 * If we redact with respect to zero snapshots, we create a redaction
717 * record with the start object and blkid to 0, and the end object and blkid to
718 * UINT64_MAX. This will result in us redacting every block.
719 */
720 static int
721 perform_thread_merge(bqueue_t *q, uint32_t num_threads,
722 struct redact_thread_arg *thread_args, boolean_t *cancel)
723 {
724 struct redact_node *redact_nodes = NULL;
725 avl_tree_t start_tree, end_tree;
726 struct redact_record *record;
727 struct redact_record *current_record = NULL;
728 int err = 0;
729 struct merge_data md = { {0} };
730 list_create(&md.md_redact_block_pending,
731 sizeof (struct redact_block_list_node),
732 offsetof(struct redact_block_list_node, node));
733
734 /*
735 * If we're redacting with respect to zero snapshots, then no data is
736 * permitted to be sent. We enqueue a record that redacts all blocks,
737 * and an eos marker.
738 */
739 if (num_threads == 0) {
740 record = kmem_zalloc(sizeof (struct redact_record),
741 KM_SLEEP);
742 // We can't redact object 0, so don't try.
743 record->start_object = 1;
744 record->start_blkid = 0;
745 record->end_object = record->end_blkid = UINT64_MAX;
746 bqueue_enqueue(q, record, sizeof (*record));
747 return (0);
748 }
749 redact_nodes = vmem_zalloc(num_threads *
750 sizeof (*redact_nodes), KM_SLEEP);
751
752 avl_create(&start_tree, redact_node_compare_start,
753 sizeof (struct redact_node),
754 offsetof(struct redact_node, avl_node_start));
755 avl_create(&end_tree, redact_node_compare_end,
756 sizeof (struct redact_node),
757 offsetof(struct redact_node, avl_node_end));
758
759 for (int i = 0; i < num_threads; i++) {
760 struct redact_node *node = &redact_nodes[i];
761 struct redact_thread_arg *targ = &thread_args[i];
762 node->record = bqueue_dequeue(&targ->q);
763 node->rt_arg = targ;
764 node->thread_num = i;
765 avl_add(&start_tree, node);
766 avl_add(&end_tree, node);
767 }
768
769 /*
770 * Once the first record in the end tree has returned EOS, every record
771 * must be an EOS record, so we should stop.
772 */
773 while (err == 0 && !((struct redact_node *)avl_first(&end_tree))->
774 record->eos_marker) {
775 if (*cancel) {
776 err = EINTR;
777 break;
778 }
779 struct redact_node *last_start = avl_last(&start_tree);
780 struct redact_node *first_end = avl_first(&end_tree);
781
782 /*
783 * If the last start record is before the first end record,
784 * then we have blocks that are redacted by all threads.
785 * Therefore, we should redact them. Copy the record, and send
786 * it to the main thread.
787 */
788 if (redact_record_before(last_start->record,
789 first_end->record)) {
790 record = kmem_zalloc(sizeof (struct redact_record),
791 KM_SLEEP);
792 *record = *first_end->record;
793 record->start_object = last_start->record->start_object;
794 record->start_blkid = last_start->record->start_blkid;
795 record_merge_enqueue(q, &current_record,
796 record);
797 }
798 err = update_avl_trees(&start_tree, &end_tree, first_end);
799 }
800
801 /*
802 * We're done; if we were cancelled, we need to cancel our workers and
803 * clear out their queues. Either way, we need to remove every thread's
804 * redact_node struct from the avl trees.
805 */
806 for (int i = 0; i < num_threads; i++) {
807 if (err != 0) {
808 thread_args[i].cancel = B_TRUE;
809 while (!redact_nodes[i].record->eos_marker) {
810 (void) update_avl_trees(&start_tree, &end_tree,
811 &redact_nodes[i]);
812 }
813 }
814 avl_remove(&start_tree, &redact_nodes[i]);
815 avl_remove(&end_tree, &redact_nodes[i]);
816 kmem_free(redact_nodes[i].record,
817 sizeof (struct redact_record));
818 bqueue_destroy(&thread_args[i].q);
819 }
820
821 avl_destroy(&start_tree);
822 avl_destroy(&end_tree);
823 vmem_free(redact_nodes, num_threads * sizeof (*redact_nodes));
824 if (current_record != NULL)
825 bqueue_enqueue(q, current_record, sizeof (*current_record));
826 return (err);
827 }
828
829 struct redact_merge_thread_arg {
830 bqueue_t q;
831 spa_t *spa;
832 int numsnaps;
833 struct redact_thread_arg *thr_args;
834 boolean_t cancel;
835 int error_code;
836 };
837
838 static __attribute__((noreturn)) void
839 redact_merge_thread(void *arg)
840 {
841 struct redact_merge_thread_arg *rmta = arg;
842 rmta->error_code = perform_thread_merge(&rmta->q,
843 rmta->numsnaps, rmta->thr_args, &rmta->cancel);
844 struct redact_record *rec = kmem_zalloc(sizeof (*rec), KM_SLEEP);
845 rec->eos_marker = B_TRUE;
846 bqueue_enqueue_flush(&rmta->q, rec, 1);
847 thread_exit();
848 }
849
850 /*
851 * Find the next object in or after the redaction range passed in, and hold
852 * its dnode with the provided tag. Also update *object to contain the new
853 * object number.
854 */
855 static int
856 hold_next_object(objset_t *os, struct redact_record *rec, const void *tag,
857 uint64_t *object, dnode_t **dn)
858 {
859 int err = 0;
860 if (*dn != NULL)
861 dnode_rele(*dn, tag);
862 *dn = NULL;
863 if (*object < rec->start_object) {
864 *object = rec->start_object - 1;
865 }
866 err = dmu_object_next(os, object, B_FALSE, 0);
867 if (err != 0)
868 return (err);
869
870 err = dnode_hold(os, *object, tag, dn);
871 while (err == 0 && (*object < rec->start_object ||
872 DMU_OT_IS_METADATA((*dn)->dn_type))) {
873 dnode_rele(*dn, tag);
874 *dn = NULL;
875 err = dmu_object_next(os, object, B_FALSE, 0);
876 if (err != 0)
877 break;
878 err = dnode_hold(os, *object, tag, dn);
879 }
880 return (err);
881 }
882
883 static int
884 perform_redaction(objset_t *os, redaction_list_t *rl,
885 struct redact_merge_thread_arg *rmta)
886 {
887 int err = 0;
888 bqueue_t *q = &rmta->q;
889 struct redact_record *rec = NULL;
890 struct merge_data md = { {0} };
891
892 list_create(&md.md_redact_block_pending,
893 sizeof (struct redact_block_list_node),
894 offsetof(struct redact_block_list_node, node));
895 md.md_redaction_list = rl;
896
897 for (int i = 0; i < TXG_SIZE; i++) {
898 list_create(&md.md_blocks[i],
899 sizeof (struct redact_block_list_node),
900 offsetof(struct redact_block_list_node, node));
901 }
902 dnode_t *dn = NULL;
903 uint64_t prev_obj = 0;
904 for (rec = bqueue_dequeue(q); !rec->eos_marker && err == 0;
905 rec = get_next_redact_record(q, rec)) {
906 ASSERT3U(rec->start_object, !=, 0);
907 uint64_t object;
908 if (prev_obj != rec->start_object) {
909 object = rec->start_object - 1;
910 err = hold_next_object(os, rec, FTAG, &object, &dn);
911 } else {
912 object = prev_obj;
913 }
914 while (err == 0 && object <= rec->end_object) {
915 if (issig(JUSTLOOKING) && issig(FORREAL)) {
916 err = EINTR;
917 break;
918 }
919 /*
920 * Part of the current object is contained somewhere in
921 * the range covered by rec.
922 */
923 uint64_t startblkid;
924 uint64_t endblkid;
925 uint64_t maxblkid = dn->dn_phys->dn_maxblkid;
926
927 if (rec->start_object < object)
928 startblkid = 0;
929 else if (rec->start_blkid > maxblkid)
930 break;
931 else
932 startblkid = rec->start_blkid;
933
934 if (rec->end_object > object || rec->end_blkid >
935 maxblkid) {
936 endblkid = maxblkid;
937 } else {
938 endblkid = rec->end_blkid;
939 }
940 update_redaction_list(&md, os, object, startblkid,
941 endblkid, dn->dn_datablksz);
942
943 if (object == rec->end_object)
944 break;
945 err = hold_next_object(os, rec, FTAG, &object, &dn);
946 }
947 if (err == ESRCH)
948 err = 0;
949 if (dn != NULL)
950 prev_obj = object;
951 }
952 if (err == 0 && dn != NULL)
953 dnode_rele(dn, FTAG);
954
955 if (err == ESRCH)
956 err = 0;
957 rmta->cancel = B_TRUE;
958 while (!rec->eos_marker)
959 rec = get_next_redact_record(q, rec);
960 kmem_free(rec, sizeof (*rec));
961
962 /*
963 * There may be a block that's being coalesced, sync that out before we
964 * return.
965 */
966 if (err == 0 && md.md_coalesce_block.rbp_size_count != 0) {
967 struct redact_block_list_node *rbln =
968 kmem_alloc(sizeof (struct redact_block_list_node),
969 KM_SLEEP);
970 rbln->block = md.md_coalesce_block;
971 list_insert_tail(&md.md_redact_block_pending, rbln);
972 }
973 commit_rl_updates(os, &md, UINT64_MAX, UINT64_MAX);
974
975 /*
976 * Wait for all the redaction info to sync out before we return, so that
977 * anyone who attempts to resume this redaction will have all the data
978 * they need.
979 */
980 dsl_pool_t *dp = spa_get_dsl(os->os_spa);
981 if (md.md_latest_synctask_txg != 0)
982 txg_wait_synced(dp, md.md_latest_synctask_txg);
983 for (int i = 0; i < TXG_SIZE; i++)
984 list_destroy(&md.md_blocks[i]);
985 return (err);
986 }
987
988 static boolean_t
989 redact_snaps_contains(uint64_t *snaps, uint64_t num_snaps, uint64_t guid)
990 {
991 for (int i = 0; i < num_snaps; i++) {
992 if (snaps[i] == guid)
993 return (B_TRUE);
994 }
995 return (B_FALSE);
996 }
997
998 int
999 dmu_redact_snap(const char *snapname, nvlist_t *redactnvl,
1000 const char *redactbook)
1001 {
1002 int err = 0;
1003 dsl_pool_t *dp = NULL;
1004 dsl_dataset_t *ds = NULL;
1005 int numsnaps = 0;
1006 objset_t *os;
1007 struct redact_thread_arg *args = NULL;
1008 redaction_list_t *new_rl = NULL;
1009 char *newredactbook;
1010
1011 if ((err = dsl_pool_hold(snapname, FTAG, &dp)) != 0)
1012 return (err);
1013
1014 newredactbook = kmem_zalloc(sizeof (char) * ZFS_MAX_DATASET_NAME_LEN,
1015 KM_SLEEP);
1016
1017 if ((err = dsl_dataset_hold_flags(dp, snapname, DS_HOLD_FLAG_DECRYPT,
1018 FTAG, &ds)) != 0) {
1019 goto out;
1020 }
1021 dsl_dataset_long_hold(ds, FTAG);
1022 if (!ds->ds_is_snapshot || dmu_objset_from_ds(ds, &os) != 0) {
1023 err = EINVAL;
1024 goto out;
1025 }
1026 if (dsl_dataset_feature_is_active(ds, SPA_FEATURE_REDACTED_DATASETS)) {
1027 err = EALREADY;
1028 goto out;
1029 }
1030
1031 numsnaps = fnvlist_num_pairs(redactnvl);
1032 if (numsnaps > 0)
1033 args = vmem_zalloc(numsnaps * sizeof (*args), KM_SLEEP);
1034
1035 nvpair_t *pair = NULL;
1036 for (int i = 0; i < numsnaps; i++) {
1037 pair = nvlist_next_nvpair(redactnvl, pair);
1038 const char *name = nvpair_name(pair);
1039 struct redact_thread_arg *rta = &args[i];
1040 err = dsl_dataset_hold_flags(dp, name, DS_HOLD_FLAG_DECRYPT,
1041 FTAG, &rta->ds);
1042 if (err != 0)
1043 break;
1044 /*
1045 * We want to do the long hold before we can get any other
1046 * errors, because the cleanup code will release the long
1047 * hold if rta->ds is filled in.
1048 */
1049 dsl_dataset_long_hold(rta->ds, FTAG);
1050
1051 err = dmu_objset_from_ds(rta->ds, &rta->os);
1052 if (err != 0)
1053 break;
1054 if (!dsl_dataset_is_before(rta->ds, ds, 0)) {
1055 err = EINVAL;
1056 break;
1057 }
1058 if (dsl_dataset_feature_is_active(rta->ds,
1059 SPA_FEATURE_REDACTED_DATASETS)) {
1060 err = EALREADY;
1061 break;
1062
1063 }
1064 }
1065 if (err != 0)
1066 goto out;
1067 VERIFY3P(nvlist_next_nvpair(redactnvl, pair), ==, NULL);
1068
1069 boolean_t resuming = B_FALSE;
1070 zfs_bookmark_phys_t bookmark;
1071
1072 (void) strlcpy(newredactbook, snapname, ZFS_MAX_DATASET_NAME_LEN);
1073 char *c = strchr(newredactbook, '@');
1074 ASSERT3P(c, !=, NULL);
1075 int n = snprintf(c, ZFS_MAX_DATASET_NAME_LEN - (c - newredactbook),
1076 "#%s", redactbook);
1077 if (n >= ZFS_MAX_DATASET_NAME_LEN - (c - newredactbook)) {
1078 dsl_pool_rele(dp, FTAG);
1079 kmem_free(newredactbook,
1080 sizeof (char) * ZFS_MAX_DATASET_NAME_LEN);
1081 if (args != NULL)
1082 vmem_free(args, numsnaps * sizeof (*args));
1083 return (SET_ERROR(ENAMETOOLONG));
1084 }
1085 err = dsl_bookmark_lookup(dp, newredactbook, NULL, &bookmark);
1086 if (err == 0) {
1087 resuming = B_TRUE;
1088 if (bookmark.zbm_redaction_obj == 0) {
1089 err = EEXIST;
1090 goto out;
1091 }
1092 err = dsl_redaction_list_hold_obj(dp,
1093 bookmark.zbm_redaction_obj, FTAG, &new_rl);
1094 if (err != 0) {
1095 err = EIO;
1096 goto out;
1097 }
1098 dsl_redaction_list_long_hold(dp, new_rl, FTAG);
1099 if (new_rl->rl_phys->rlp_num_snaps != numsnaps) {
1100 err = ESRCH;
1101 goto out;
1102 }
1103 for (int i = 0; i < numsnaps; i++) {
1104 struct redact_thread_arg *rta = &args[i];
1105 if (!redact_snaps_contains(new_rl->rl_phys->rlp_snaps,
1106 new_rl->rl_phys->rlp_num_snaps,
1107 dsl_dataset_phys(rta->ds)->ds_guid)) {
1108 err = ESRCH;
1109 goto out;
1110 }
1111 }
1112 if (new_rl->rl_phys->rlp_last_blkid == UINT64_MAX &&
1113 new_rl->rl_phys->rlp_last_object == UINT64_MAX) {
1114 err = EEXIST;
1115 goto out;
1116 }
1117 dsl_pool_rele(dp, FTAG);
1118 dp = NULL;
1119 } else {
1120 uint64_t *guids = NULL;
1121 if (numsnaps > 0) {
1122 guids = vmem_zalloc(numsnaps * sizeof (uint64_t),
1123 KM_SLEEP);
1124 }
1125 for (int i = 0; i < numsnaps; i++) {
1126 struct redact_thread_arg *rta = &args[i];
1127 guids[i] = dsl_dataset_phys(rta->ds)->ds_guid;
1128 }
1129
1130 dsl_pool_rele(dp, FTAG);
1131 dp = NULL;
1132 err = dsl_bookmark_create_redacted(newredactbook, snapname,
1133 numsnaps, guids, FTAG, &new_rl);
1134 vmem_free(guids, numsnaps * sizeof (uint64_t));
1135 if (err != 0)
1136 goto out;
1137 }
1138
1139 for (int i = 0; i < numsnaps; i++) {
1140 struct redact_thread_arg *rta = &args[i];
1141 (void) bqueue_init(&rta->q, zfs_redact_queue_ff,
1142 zfs_redact_queue_length,
1143 offsetof(struct redact_record, ln));
1144 if (resuming) {
1145 rta->resume.zb_blkid =
1146 new_rl->rl_phys->rlp_last_blkid;
1147 rta->resume.zb_object =
1148 new_rl->rl_phys->rlp_last_object;
1149 }
1150 rta->txg = dsl_dataset_phys(ds)->ds_creation_txg;
1151 (void) thread_create(NULL, 0, redact_traverse_thread, rta,
1152 0, curproc, TS_RUN, minclsyspri);
1153 }
1154
1155 struct redact_merge_thread_arg *rmta;
1156 rmta = kmem_zalloc(sizeof (struct redact_merge_thread_arg), KM_SLEEP);
1157
1158 (void) bqueue_init(&rmta->q, zfs_redact_queue_ff,
1159 zfs_redact_queue_length, offsetof(struct redact_record, ln));
1160 rmta->numsnaps = numsnaps;
1161 rmta->spa = os->os_spa;
1162 rmta->thr_args = args;
1163 (void) thread_create(NULL, 0, redact_merge_thread, rmta, 0, curproc,
1164 TS_RUN, minclsyspri);
1165 err = perform_redaction(os, new_rl, rmta);
1166 bqueue_destroy(&rmta->q);
1167 kmem_free(rmta, sizeof (struct redact_merge_thread_arg));
1168
1169 out:
1170 kmem_free(newredactbook, sizeof (char) * ZFS_MAX_DATASET_NAME_LEN);
1171
1172 if (new_rl != NULL) {
1173 dsl_redaction_list_long_rele(new_rl, FTAG);
1174 dsl_redaction_list_rele(new_rl, FTAG);
1175 }
1176 for (int i = 0; i < numsnaps; i++) {
1177 struct redact_thread_arg *rta = &args[i];
1178 /*
1179 * rta->ds may be NULL if we got an error while filling
1180 * it in.
1181 */
1182 if (rta->ds != NULL) {
1183 dsl_dataset_long_rele(rta->ds, FTAG);
1184 dsl_dataset_rele_flags(rta->ds,
1185 DS_HOLD_FLAG_DECRYPT, FTAG);
1186 }
1187 }
1188
1189 if (args != NULL)
1190 vmem_free(args, numsnaps * sizeof (*args));
1191 if (dp != NULL)
1192 dsl_pool_rele(dp, FTAG);
1193 if (ds != NULL) {
1194 dsl_dataset_long_rele(ds, FTAG);
1195 dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
1196 }
1197 return (SET_ERROR(err));
1198
1199 }