]>
Commit | Line | Data |
---|---|---|
30af21b0 PD |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
22 | * Copyright (c) 2017, 2018 by Delphix. All rights reserved. | |
23 | */ | |
24 | ||
25 | #include <sys/zfs_context.h> | |
26 | #include <sys/txg.h> | |
27 | #include <sys/dmu_objset.h> | |
28 | #include <sys/dmu_traverse.h> | |
29 | #include <sys/dmu_redact.h> | |
30 | #include <sys/bqueue.h> | |
31 | #include <sys/objlist.h> | |
32 | #include <sys/dmu_tx.h> | |
33 | #ifdef _KERNEL | |
34 | #include <sys/zfs_vfsops.h> | |
35 | #endif | |
36 | ||
37 | /* | |
38 | * This controls the number of entries in the buffer the redaction_list_update | |
39 | * synctask uses to buffer writes to the redaction list. | |
40 | */ | |
41 | int redact_sync_bufsize = 1024; | |
42 | ||
43 | /* | |
44 | * Controls how often to update the redaction list when creating a redaction | |
45 | * list. | |
46 | */ | |
47 | uint64_t redaction_list_update_interval_ns = 1000 * 1000 * 1000ULL; /* NS */ | |
48 | ||
49 | /* | |
50 | * This tunable controls the length of the queues that zfs redact worker threads | |
51 | * use to communicate. If the dmu_redact_snap thread is blocking on these | |
52 | * queues, this variable may need to be increased. If there is a significant | |
53 | * slowdown at the start of a redact operation as these threads consume all the | |
54 | * available IO resources, or the queues are consuming too much memory, this | |
55 | * variable may need to be decreased. | |
56 | */ | |
57 | int zfs_redact_queue_length = 1024 * 1024; | |
58 | ||
59 | /* | |
60 | * These tunables control the fill fraction of the queues by zfs redact. The | |
61 | * fill fraction controls the frequency with which threads have to be | |
62 | * cv_signaled. If a lot of cpu time is being spent on cv_signal, then these | |
63 | * should be tuned down. If the queues empty before the signalled thread can | |
64 | * catch up, then these should be tuned up. | |
65 | */ | |
66 | uint64_t zfs_redact_queue_ff = 20; | |
67 | ||
68 | struct redact_record { | |
69 | bqueue_node_t ln; | |
70 | boolean_t eos_marker; /* Marks the end of the stream */ | |
71 | uint64_t start_object; | |
72 | uint64_t start_blkid; | |
73 | uint64_t end_object; | |
74 | uint64_t end_blkid; | |
75 | uint8_t indblkshift; | |
76 | uint32_t datablksz; | |
77 | }; | |
78 | ||
79 | struct redact_thread_arg { | |
80 | bqueue_t q; | |
81 | dsl_dataset_t *ds; /* Dataset to traverse */ | |
82 | struct redact_record *current_record; | |
83 | int error_code; | |
84 | boolean_t cancel; | |
85 | zbookmark_phys_t resume; | |
86 | objlist_t *deleted_objs; | |
87 | uint64_t *num_blocks_visited; | |
88 | uint64_t ignore_object; /* ignore further callbacks on this */ | |
89 | uint64_t txg; /* txg to traverse since */ | |
90 | }; | |
91 | ||
92 | /* | |
93 | * The redaction node is a wrapper around the redaction record that is used | |
94 | * by the redaction merging thread to sort the records and determine overlaps. | |
95 | * | |
96 | * It contains two nodes; one sorts the records by their start_zb, and the other | |
97 | * sorts the records by their end_zb. | |
98 | */ | |
99 | struct redact_node { | |
100 | avl_node_t avl_node_start; | |
101 | avl_node_t avl_node_end; | |
102 | struct redact_record *record; | |
103 | struct redact_thread_arg *rt_arg; | |
104 | uint32_t thread_num; | |
105 | }; | |
106 | ||
107 | struct merge_data { | |
108 | list_t md_redact_block_pending; | |
109 | redact_block_phys_t md_coalesce_block; | |
110 | uint64_t md_last_time; | |
111 | redact_block_phys_t md_furthest[TXG_SIZE]; | |
112 | /* Lists of struct redact_block_list_node. */ | |
113 | list_t md_blocks[TXG_SIZE]; | |
114 | boolean_t md_synctask_txg[TXG_SIZE]; | |
115 | uint64_t md_latest_synctask_txg; | |
116 | redaction_list_t *md_redaction_list; | |
117 | }; | |
118 | ||
119 | /* | |
120 | * A wrapper around struct redact_block so it can be stored in a list_t. | |
121 | */ | |
122 | struct redact_block_list_node { | |
123 | redact_block_phys_t block; | |
124 | list_node_t node; | |
125 | }; | |
126 | ||
127 | /* | |
128 | * We've found a new redaction candidate. In order to improve performance, we | |
129 | * coalesce these blocks when they're adjacent to each other. This function | |
130 | * handles that. If the new candidate block range is immediately after the | |
131 | * range we're building, coalesce it into the range we're building. Otherwise, | |
132 | * put the record we're building on the queue, and update the build pointer to | |
133 | * point to the new record. | |
134 | */ | |
135 | static void | |
136 | record_merge_enqueue(bqueue_t *q, struct redact_record **build, | |
137 | struct redact_record *new) | |
138 | { | |
139 | if (new->eos_marker) { | |
140 | if (*build != NULL) | |
141 | bqueue_enqueue(q, *build, sizeof (*build)); | |
142 | bqueue_enqueue_flush(q, new, sizeof (*new)); | |
143 | return; | |
144 | } | |
145 | if (*build == NULL) { | |
146 | *build = new; | |
147 | return; | |
148 | } | |
149 | struct redact_record *curbuild = *build; | |
150 | if ((curbuild->end_object == new->start_object && | |
151 | curbuild->end_blkid + 1 == new->start_blkid && | |
152 | curbuild->end_blkid != UINT64_MAX) || | |
153 | (curbuild->end_object + 1 == new->start_object && | |
154 | curbuild->end_blkid == UINT64_MAX && new->start_blkid == 0)) { | |
155 | curbuild->end_object = new->end_object; | |
156 | curbuild->end_blkid = new->end_blkid; | |
157 | kmem_free(new, sizeof (*new)); | |
158 | } else { | |
159 | bqueue_enqueue(q, curbuild, sizeof (*curbuild)); | |
160 | *build = new; | |
161 | } | |
162 | } | |
163 | ||
164 | /* | |
165 | * This is the callback function to traverse_dataset for the redaction threads | |
166 | * for dmu_redact_snap. This thread is responsible for creating redaction | |
167 | * records for all the data that is modified by the snapshots we're redacting | |
168 | * with respect to. Redaction records represent ranges of data that have been | |
169 | * modified by one of the redaction snapshots, and are stored in the | |
170 | * redact_record struct. We need to create redaction records for three | |
171 | * cases: | |
172 | * | |
173 | * First, if there's a normal write, we need to create a redaction record for | |
174 | * that block. | |
175 | * | |
176 | * Second, if there's a hole, we need to create a redaction record that covers | |
177 | * the whole range of the hole. If the hole is in the meta-dnode, it must cover | |
178 | * every block in all of the objects in the hole. | |
179 | * | |
180 | * Third, if there is a deleted object, we need to create a redaction record for | |
181 | * all of the blocks in that object. | |
182 | */ | |
183 | /*ARGSUSED*/ | |
184 | static int | |
185 | redact_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, | |
186 | const zbookmark_phys_t *zb, const struct dnode_phys *dnp, void *arg) | |
187 | { | |
188 | struct redact_thread_arg *rta = arg; | |
189 | struct redact_record *record; | |
190 | ||
191 | ASSERT(zb->zb_object == DMU_META_DNODE_OBJECT || | |
192 | zb->zb_object >= rta->resume.zb_object); | |
193 | ||
194 | if (rta->cancel) | |
195 | return (SET_ERROR(EINTR)); | |
196 | ||
197 | if (rta->ignore_object == zb->zb_object) | |
198 | return (0); | |
199 | ||
200 | /* | |
201 | * If we're visiting a dnode, we need to handle the case where the | |
202 | * object has been deleted. | |
203 | */ | |
204 | if (zb->zb_level == ZB_DNODE_LEVEL) { | |
205 | ASSERT3U(zb->zb_level, ==, ZB_DNODE_LEVEL); | |
206 | ||
207 | if (zb->zb_object == 0) | |
208 | return (0); | |
209 | ||
210 | /* | |
211 | * If the object has been deleted, redact all of the blocks in | |
212 | * it. | |
213 | */ | |
214 | if (dnp->dn_type == DMU_OT_NONE || | |
215 | objlist_exists(rta->deleted_objs, zb->zb_object)) { | |
216 | rta->ignore_object = zb->zb_object; | |
217 | record = kmem_zalloc(sizeof (struct redact_record), | |
218 | KM_SLEEP); | |
219 | ||
220 | record->eos_marker = B_FALSE; | |
221 | record->start_object = record->end_object = | |
222 | zb->zb_object; | |
223 | record->start_blkid = 0; | |
224 | record->end_blkid = UINT64_MAX; | |
225 | record_merge_enqueue(&rta->q, | |
226 | &rta->current_record, record); | |
227 | } | |
228 | return (0); | |
229 | } else if (zb->zb_level < 0) { | |
230 | return (0); | |
231 | } else if (zb->zb_level > 0 && !BP_IS_HOLE(bp)) { | |
232 | /* | |
233 | * If this is an indirect block, but not a hole, it doesn't | |
234 | * provide any useful information for redaction, so ignore it. | |
235 | */ | |
236 | return (0); | |
237 | } | |
238 | ||
239 | /* | |
240 | * At this point, there are two options left for the type of block we're | |
241 | * looking at. Either this is a hole (which could be in the dnode or | |
242 | * the meta-dnode), or it's a level 0 block of some sort. If it's a | |
243 | * hole, we create a redaction record that covers the whole range. If | |
244 | * the hole is in a dnode, we need to redact all the blocks in that | |
245 | * hole. If the hole is in the meta-dnode, we instead need to redact | |
246 | * all blocks in every object covered by that hole. If it's a level 0 | |
247 | * block, we only need to redact that single block. | |
248 | */ | |
249 | record = kmem_zalloc(sizeof (struct redact_record), KM_SLEEP); | |
250 | record->eos_marker = B_FALSE; | |
251 | ||
252 | record->start_object = record->end_object = zb->zb_object; | |
253 | if (BP_IS_HOLE(bp)) { | |
254 | record->start_blkid = zb->zb_blkid * | |
255 | bp_span_in_blocks(dnp->dn_indblkshift, zb->zb_level); | |
256 | ||
257 | record->end_blkid = ((zb->zb_blkid + 1) * | |
258 | bp_span_in_blocks(dnp->dn_indblkshift, zb->zb_level)) - 1; | |
259 | ||
260 | if (zb->zb_object == DMU_META_DNODE_OBJECT) { | |
261 | record->start_object = record->start_blkid * | |
262 | ((SPA_MINBLOCKSIZE * dnp->dn_datablkszsec) / | |
263 | sizeof (dnode_phys_t)); | |
264 | record->start_blkid = 0; | |
265 | record->end_object = ((record->end_blkid + | |
266 | 1) * ((SPA_MINBLOCKSIZE * dnp->dn_datablkszsec) / | |
267 | sizeof (dnode_phys_t))) - 1; | |
268 | record->end_blkid = UINT64_MAX; | |
269 | } | |
270 | } else if (zb->zb_level != 0 || | |
271 | zb->zb_object == DMU_META_DNODE_OBJECT) { | |
272 | kmem_free(record, sizeof (*record)); | |
273 | return (0); | |
274 | } else { | |
275 | record->start_blkid = record->end_blkid = zb->zb_blkid; | |
276 | } | |
277 | record->indblkshift = dnp->dn_indblkshift; | |
278 | record->datablksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT; | |
279 | record_merge_enqueue(&rta->q, &rta->current_record, record); | |
280 | ||
281 | return (0); | |
282 | } | |
283 | ||
284 | static void | |
285 | redact_traverse_thread(void *arg) | |
286 | { | |
287 | struct redact_thread_arg *rt_arg = arg; | |
288 | int err; | |
289 | struct redact_record *data; | |
290 | objset_t *os; | |
291 | VERIFY0(dmu_objset_from_ds(rt_arg->ds, &os)); | |
292 | #ifdef _KERNEL | |
293 | if (os->os_phys->os_type == DMU_OST_ZFS) | |
294 | rt_arg->deleted_objs = zfs_get_deleteq(os); | |
295 | else | |
296 | rt_arg->deleted_objs = objlist_create(); | |
297 | #else | |
298 | rt_arg->deleted_objs = objlist_create(); | |
299 | #endif | |
300 | ||
301 | err = traverse_dataset_resume(rt_arg->ds, rt_arg->txg, | |
302 | &rt_arg->resume, TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, | |
303 | redact_cb, rt_arg); | |
304 | ||
305 | if (err != EINTR) | |
306 | rt_arg->error_code = err; | |
307 | objlist_destroy(rt_arg->deleted_objs); | |
308 | data = kmem_zalloc(sizeof (*data), KM_SLEEP); | |
309 | data->eos_marker = B_TRUE; | |
310 | record_merge_enqueue(&rt_arg->q, &rt_arg->current_record, data); | |
311 | thread_exit(); | |
312 | } | |
313 | ||
314 | static inline void | |
315 | create_zbookmark_from_obj_off(zbookmark_phys_t *zb, uint64_t object, | |
316 | uint64_t blkid) | |
317 | { | |
318 | zb->zb_object = object; | |
319 | zb->zb_level = 0; | |
320 | zb->zb_blkid = blkid; | |
321 | } | |
322 | ||
323 | /* | |
324 | * This is a utility function that can do the comparison for the start or ends | |
325 | * of the ranges in a redact_record. | |
326 | */ | |
327 | static int | |
328 | redact_range_compare(uint64_t obj1, uint64_t off1, uint32_t dbss1, | |
329 | uint64_t obj2, uint64_t off2, uint32_t dbss2) | |
330 | { | |
331 | zbookmark_phys_t z1, z2; | |
332 | create_zbookmark_from_obj_off(&z1, obj1, off1); | |
333 | create_zbookmark_from_obj_off(&z2, obj2, off2); | |
334 | ||
335 | return (zbookmark_compare(dbss1 >> SPA_MINBLOCKSHIFT, 0, | |
336 | dbss2 >> SPA_MINBLOCKSHIFT, 0, &z1, &z2)); | |
337 | } | |
338 | ||
339 | /* | |
340 | * Compare two redaction records by their range's start location. Also makes | |
341 | * eos records always compare last. We use the thread number in the redact_node | |
342 | * to ensure that records do not compare equal (which is not allowed in our avl | |
343 | * trees). | |
344 | */ | |
345 | static int | |
346 | redact_node_compare_start(const void *arg1, const void *arg2) | |
347 | { | |
348 | const struct redact_node *rn1 = arg1; | |
349 | const struct redact_node *rn2 = arg2; | |
350 | const struct redact_record *rr1 = rn1->record; | |
351 | const struct redact_record *rr2 = rn2->record; | |
352 | if (rr1->eos_marker) | |
353 | return (1); | |
354 | if (rr2->eos_marker) | |
355 | return (-1); | |
356 | ||
357 | int cmp = redact_range_compare(rr1->start_object, rr1->start_blkid, | |
358 | rr1->datablksz, rr2->start_object, rr2->start_blkid, | |
359 | rr2->datablksz); | |
360 | if (cmp == 0) | |
361 | cmp = (rn1->thread_num < rn2->thread_num ? -1 : 1); | |
362 | return (cmp); | |
363 | } | |
364 | ||
365 | /* | |
366 | * Compare two redaction records by their range's end location. Also makes | |
367 | * eos records always compare last. We use the thread number in the redact_node | |
368 | * to ensure that records do not compare equal (which is not allowed in our avl | |
369 | * trees). | |
370 | */ | |
371 | static int | |
372 | redact_node_compare_end(const void *arg1, const void *arg2) | |
373 | { | |
374 | const struct redact_node *rn1 = arg1; | |
375 | const struct redact_node *rn2 = arg2; | |
376 | const struct redact_record *srr1 = rn1->record; | |
377 | const struct redact_record *srr2 = rn2->record; | |
378 | if (srr1->eos_marker) | |
379 | return (1); | |
380 | if (srr2->eos_marker) | |
381 | return (-1); | |
382 | ||
383 | int cmp = redact_range_compare(srr1->end_object, srr1->end_blkid, | |
384 | srr1->datablksz, srr2->end_object, srr2->end_blkid, | |
385 | srr2->datablksz); | |
386 | if (cmp == 0) | |
387 | cmp = (rn1->thread_num < rn2->thread_num ? -1 : 1); | |
388 | return (cmp); | |
389 | } | |
390 | ||
391 | /* | |
392 | * Utility function that compares two redaction records to determine if any part | |
393 | * of the "from" record is before any part of the "to" record. Also causes End | |
394 | * of Stream redaction records to compare after all others, so that the | |
395 | * redaction merging logic can stay simple. | |
396 | */ | |
397 | static boolean_t | |
398 | redact_record_before(const struct redact_record *from, | |
399 | const struct redact_record *to) | |
400 | { | |
401 | if (from->eos_marker == B_TRUE) | |
402 | return (B_FALSE); | |
403 | else if (to->eos_marker == B_TRUE) | |
404 | return (B_TRUE); | |
405 | return (redact_range_compare(from->start_object, from->start_blkid, | |
406 | from->datablksz, to->end_object, to->end_blkid, | |
407 | to->datablksz) <= 0); | |
408 | } | |
409 | ||
410 | /* | |
411 | * Pop a new redaction record off the queue, check that the records are in the | |
412 | * right order, and free the old data. | |
413 | */ | |
414 | static struct redact_record * | |
415 | get_next_redact_record(bqueue_t *bq, struct redact_record *prev) | |
416 | { | |
417 | struct redact_record *next = bqueue_dequeue(bq); | |
418 | ASSERT(redact_record_before(prev, next)); | |
419 | kmem_free(prev, sizeof (*prev)); | |
420 | return (next); | |
421 | } | |
422 | ||
423 | /* | |
424 | * Remove the given redaction node from both trees, pull a new redaction record | |
425 | * off the queue, free the old redaction record, update the redaction node, and | |
426 | * reinsert the node into the trees. | |
427 | */ | |
428 | static int | |
429 | update_avl_trees(avl_tree_t *start_tree, avl_tree_t *end_tree, | |
430 | struct redact_node *redact_node) | |
431 | { | |
432 | avl_remove(start_tree, redact_node); | |
433 | avl_remove(end_tree, redact_node); | |
434 | redact_node->record = get_next_redact_record(&redact_node->rt_arg->q, | |
435 | redact_node->record); | |
436 | avl_add(end_tree, redact_node); | |
437 | avl_add(start_tree, redact_node); | |
438 | return (redact_node->rt_arg->error_code); | |
439 | } | |
440 | ||
441 | /* | |
442 | * Synctask for updating redaction lists. We first take this txg's list of | |
443 | * redacted blocks and append those to the redaction list. We then update the | |
444 | * redaction list's bonus buffer. We store the furthest blocks we visited and | |
445 | * the list of snapshots that we're redacting with respect to. We need these so | |
446 | * that redacted sends and receives can be correctly resumed. | |
447 | */ | |
448 | static void | |
449 | redaction_list_update_sync(void *arg, dmu_tx_t *tx) | |
450 | { | |
451 | struct merge_data *md = arg; | |
452 | uint64_t txg = dmu_tx_get_txg(tx); | |
453 | list_t *list = &md->md_blocks[txg & TXG_MASK]; | |
454 | redact_block_phys_t *furthest_visited = | |
455 | &md->md_furthest[txg & TXG_MASK]; | |
456 | objset_t *mos = tx->tx_pool->dp_meta_objset; | |
457 | redaction_list_t *rl = md->md_redaction_list; | |
458 | int bufsize = redact_sync_bufsize; | |
459 | redact_block_phys_t *buf = kmem_alloc(bufsize * sizeof (*buf), | |
460 | KM_SLEEP); | |
461 | int index = 0; | |
462 | ||
463 | dmu_buf_will_dirty(rl->rl_dbuf, tx); | |
464 | ||
465 | for (struct redact_block_list_node *rbln = list_remove_head(list); | |
466 | rbln != NULL; rbln = list_remove_head(list)) { | |
467 | ASSERT3U(rbln->block.rbp_object, <=, | |
468 | furthest_visited->rbp_object); | |
469 | ASSERT(rbln->block.rbp_object < furthest_visited->rbp_object || | |
470 | rbln->block.rbp_blkid <= furthest_visited->rbp_blkid); | |
471 | buf[index] = rbln->block; | |
472 | index++; | |
473 | if (index == bufsize) { | |
474 | dmu_write(mos, rl->rl_object, | |
475 | rl->rl_phys->rlp_num_entries * sizeof (*buf), | |
476 | bufsize * sizeof (*buf), buf, tx); | |
477 | rl->rl_phys->rlp_num_entries += bufsize; | |
478 | index = 0; | |
479 | } | |
480 | kmem_free(rbln, sizeof (*rbln)); | |
481 | } | |
482 | if (index > 0) { | |
483 | dmu_write(mos, rl->rl_object, rl->rl_phys->rlp_num_entries * | |
484 | sizeof (*buf), index * sizeof (*buf), buf, tx); | |
485 | rl->rl_phys->rlp_num_entries += index; | |
486 | } | |
487 | kmem_free(buf, bufsize * sizeof (*buf)); | |
488 | ||
489 | md->md_synctask_txg[txg & TXG_MASK] = B_FALSE; | |
490 | rl->rl_phys->rlp_last_object = furthest_visited->rbp_object; | |
491 | rl->rl_phys->rlp_last_blkid = furthest_visited->rbp_blkid; | |
492 | } | |
493 | ||
494 | void | |
495 | commit_rl_updates(objset_t *os, struct merge_data *md, uint64_t object, | |
496 | uint64_t blkid) | |
497 | { | |
498 | dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(os->os_spa)->dp_mos_dir); | |
499 | dmu_tx_hold_space(tx, sizeof (struct redact_block_list_node)); | |
500 | VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); | |
501 | uint64_t txg = dmu_tx_get_txg(tx); | |
502 | if (!md->md_synctask_txg[txg & TXG_MASK]) { | |
503 | dsl_sync_task_nowait(dmu_tx_pool(tx), | |
504 | redaction_list_update_sync, md, 5, ZFS_SPACE_CHECK_NONE, | |
505 | tx); | |
506 | md->md_synctask_txg[txg & TXG_MASK] = B_TRUE; | |
507 | md->md_latest_synctask_txg = txg; | |
508 | } | |
509 | md->md_furthest[txg & TXG_MASK].rbp_object = object; | |
510 | md->md_furthest[txg & TXG_MASK].rbp_blkid = blkid; | |
511 | list_move_tail(&md->md_blocks[txg & TXG_MASK], | |
512 | &md->md_redact_block_pending); | |
513 | dmu_tx_commit(tx); | |
514 | md->md_last_time = gethrtime(); | |
515 | } | |
516 | ||
517 | /* | |
518 | * We want to store the list of blocks that we're redacting in the bookmark's | |
519 | * redaction list. However, this list is stored in the MOS, which means it can | |
520 | * only be written to in syncing context. To get around this, we create a | |
521 | * synctask that will write to the mos for us. We tell it what to write by | |
522 | * a linked list for each current transaction group; every time we decide to | |
523 | * redact a block, we append it to the transaction group that is currently in | |
524 | * open context. We also update some progress information that the synctask | |
525 | * will store to enable resumable redacted sends. | |
526 | */ | |
527 | static void | |
528 | update_redaction_list(struct merge_data *md, objset_t *os, | |
529 | uint64_t object, uint64_t blkid, uint64_t endblkid, uint32_t blksz) | |
530 | { | |
531 | boolean_t enqueue = B_FALSE; | |
532 | redact_block_phys_t cur = {0}; | |
533 | uint64_t count = endblkid - blkid + 1; | |
534 | while (count > REDACT_BLOCK_MAX_COUNT) { | |
535 | update_redaction_list(md, os, object, blkid, | |
536 | blkid + REDACT_BLOCK_MAX_COUNT - 1, blksz); | |
537 | blkid += REDACT_BLOCK_MAX_COUNT; | |
538 | count -= REDACT_BLOCK_MAX_COUNT; | |
539 | } | |
540 | redact_block_phys_t *coalesce = &md->md_coalesce_block; | |
541 | boolean_t new; | |
542 | if (coalesce->rbp_size_count == 0) { | |
543 | new = B_TRUE; | |
544 | enqueue = B_FALSE; | |
545 | } else { | |
546 | uint64_t old_count = redact_block_get_count(coalesce); | |
547 | if (coalesce->rbp_object == object && | |
548 | coalesce->rbp_blkid + old_count == blkid && | |
549 | old_count + count <= REDACT_BLOCK_MAX_COUNT) { | |
550 | ASSERT3U(redact_block_get_size(coalesce), ==, blksz); | |
551 | redact_block_set_count(coalesce, old_count + count); | |
552 | new = B_FALSE; | |
553 | enqueue = B_FALSE; | |
554 | } else { | |
555 | new = B_TRUE; | |
556 | enqueue = B_TRUE; | |
557 | } | |
558 | } | |
559 | ||
560 | if (new) { | |
561 | cur = *coalesce; | |
562 | coalesce->rbp_blkid = blkid; | |
563 | coalesce->rbp_object = object; | |
564 | ||
565 | redact_block_set_count(coalesce, count); | |
566 | redact_block_set_size(coalesce, blksz); | |
567 | } | |
568 | ||
569 | if (enqueue && redact_block_get_size(&cur) != 0) { | |
570 | struct redact_block_list_node *rbln = | |
571 | kmem_alloc(sizeof (struct redact_block_list_node), | |
572 | KM_SLEEP); | |
573 | rbln->block = cur; | |
574 | list_insert_tail(&md->md_redact_block_pending, rbln); | |
575 | } | |
576 | ||
577 | if (gethrtime() > md->md_last_time + | |
578 | redaction_list_update_interval_ns) { | |
579 | commit_rl_updates(os, md, object, blkid); | |
580 | } | |
581 | } | |
582 | ||
583 | /* | |
584 | * This thread merges all the redaction records provided by the worker threads, | |
585 | * and determines which blocks are redacted by all the snapshots. The algorithm | |
586 | * for doing so is similar to performing a merge in mergesort with n sub-lists | |
587 | * instead of 2, with some added complexity due to the fact that the entries are | |
588 | * ranges, not just single blocks. This algorithm relies on the fact that the | |
589 | * queues are sorted, which is ensured by the fact that traverse_dataset | |
590 | * traverses the dataset in a consistent order. We pull one entry off the front | |
591 | * of the queues of each secure dataset traversal thread. Then we repeat the | |
592 | * following: each record represents a range of blocks modified by one of the | |
593 | * redaction snapshots, and each block in that range may need to be redacted in | |
594 | * the send stream. Find the record with the latest start of its range, and the | |
595 | * record with the earliest end of its range. If the last start is before the | |
596 | * first end, then we know that the blocks in the range [last_start, first_end] | |
597 | * are covered by all of the ranges at the front of the queues, which means | |
598 | * every thread redacts that whole range. For example, let's say the ranges on | |
599 | * each queue look like this: | |
600 | * | |
601 | * Block Id 1 2 3 4 5 6 7 8 9 10 11 | |
602 | * Thread 1 | [====================] | |
603 | * Thread 2 | [========] | |
604 | * Thread 3 | [=================] | |
605 | * | |
606 | * Thread 3 has the last start (5), and the thread 2 has the last end (6). All | |
607 | * three threads modified the range [5,6], so that data should not be sent over | |
608 | * the wire. After we've determined whether or not to redact anything, we take | |
609 | * the record with the first end. We discard that record, and pull a new one | |
610 | * off the front of the queue it came from. In the above example, we would | |
611 | * discard Thread 2's record, and pull a new one. Let's say the next record we | |
612 | * pulled from Thread 2 covered range [10,11]. The new layout would look like | |
613 | * this: | |
614 | * | |
615 | * Block Id 1 2 3 4 5 6 7 8 9 10 11 | |
616 | * Thread 1 | [====================] | |
617 | * Thread 2 | [==] | |
618 | * Thread 3 | [=================] | |
619 | * | |
620 | * When we compare the last start (10, from Thread 2) and the first end (9, from | |
621 | * Thread 1), we see that the last start is greater than the first end. | |
622 | * Therefore, we do not redact anything from these records. We'll iterate by | |
623 | * replacing the record from Thread 1. | |
624 | * | |
625 | * We iterate by replacing the record with the lowest end because we know | |
626 | * that the record with the lowest end has helped us as much as it can. All the | |
627 | * ranges before it that we will ever redact have been redacted. In addition, | |
628 | * by replacing the one with the lowest end, we guarantee we catch all ranges | |
629 | * that need to be redacted. For example, if in the case above we had replaced | |
630 | * the record from Thread 1 instead, we might have ended up with the following: | |
631 | * | |
632 | * Block Id 1 2 3 4 5 6 7 8 9 10 11 12 | |
633 | * Thread 1 | [==] | |
634 | * Thread 2 | [========] | |
635 | * Thread 3 | [=================] | |
636 | * | |
637 | * If the next record from Thread 2 had been [8,10], for example, we should have | |
638 | * redacted part of that range, but because we updated Thread 1's record, we | |
639 | * missed it. | |
640 | * | |
641 | * We implement this algorithm by using two trees. The first sorts the | |
642 | * redaction records by their start_zb, and the second sorts them by their | |
643 | * end_zb. We use these to find the record with the last start and the record | |
644 | * with the first end. We create a record with that start and end, and send it | |
645 | * on. The overall runtime of this implementation is O(n log m), where n is the | |
646 | * total number of redaction records from all the different redaction snapshots, | |
647 | * and m is the number of redaction snapshots. | |
648 | * | |
649 | * If we redact with respect to zero snapshots, we create a redaction | |
650 | * record with the start object and blkid to 0, and the end object and blkid to | |
651 | * UINT64_MAX. This will result in us redacting every block. | |
652 | */ | |
653 | static int | |
654 | perform_thread_merge(bqueue_t *q, uint32_t num_threads, | |
655 | struct redact_thread_arg *thread_args, boolean_t *cancel) | |
656 | { | |
657 | struct redact_node *redact_nodes = NULL; | |
658 | avl_tree_t start_tree, end_tree; | |
659 | struct redact_record *record; | |
660 | struct redact_record *current_record = NULL; | |
661 | int err = 0; | |
662 | struct merge_data md = { {0} }; | |
663 | list_create(&md.md_redact_block_pending, | |
664 | sizeof (struct redact_block_list_node), | |
665 | offsetof(struct redact_block_list_node, node)); | |
666 | ||
667 | /* | |
668 | * If we're redacting with respect to zero snapshots, then no data is | |
669 | * permitted to be sent. We enqueue a record that redacts all blocks, | |
670 | * and an eos marker. | |
671 | */ | |
672 | if (num_threads == 0) { | |
673 | record = kmem_zalloc(sizeof (struct redact_record), | |
674 | KM_SLEEP); | |
675 | // We can't redact object 0, so don't try. | |
676 | record->start_object = 1; | |
677 | record->start_blkid = 0; | |
678 | record->end_object = record->end_blkid = UINT64_MAX; | |
679 | bqueue_enqueue(q, record, sizeof (*record)); | |
680 | return (0); | |
681 | } | |
682 | if (num_threads > 0) { | |
683 | redact_nodes = kmem_zalloc(num_threads * | |
684 | sizeof (*redact_nodes), KM_SLEEP); | |
685 | } | |
686 | ||
687 | avl_create(&start_tree, redact_node_compare_start, | |
688 | sizeof (struct redact_node), | |
689 | offsetof(struct redact_node, avl_node_start)); | |
690 | avl_create(&end_tree, redact_node_compare_end, | |
691 | sizeof (struct redact_node), | |
692 | offsetof(struct redact_node, avl_node_end)); | |
693 | ||
694 | for (int i = 0; i < num_threads; i++) { | |
695 | struct redact_node *node = &redact_nodes[i]; | |
696 | struct redact_thread_arg *targ = &thread_args[i]; | |
697 | node->record = bqueue_dequeue(&targ->q); | |
698 | node->rt_arg = targ; | |
699 | node->thread_num = i; | |
700 | avl_add(&start_tree, node); | |
701 | avl_add(&end_tree, node); | |
702 | } | |
703 | ||
704 | /* | |
705 | * Once the first record in the end tree has returned EOS, every record | |
706 | * must be an EOS record, so we should stop. | |
707 | */ | |
708 | while (err == 0 && !((struct redact_node *)avl_first(&end_tree))-> | |
709 | record->eos_marker) { | |
710 | if (*cancel) { | |
711 | err = EINTR; | |
712 | break; | |
713 | } | |
714 | struct redact_node *last_start = avl_last(&start_tree); | |
715 | struct redact_node *first_end = avl_first(&end_tree); | |
716 | ||
717 | /* | |
718 | * If the last start record is before the first end record, | |
719 | * then we have blocks that are redacted by all threads. | |
720 | * Therefore, we should redact them. Copy the record, and send | |
721 | * it to the main thread. | |
722 | */ | |
723 | if (redact_record_before(last_start->record, | |
724 | first_end->record)) { | |
725 | record = kmem_zalloc(sizeof (struct redact_record), | |
726 | KM_SLEEP); | |
727 | *record = *first_end->record; | |
728 | record->start_object = last_start->record->start_object; | |
729 | record->start_blkid = last_start->record->start_blkid; | |
730 | record_merge_enqueue(q, ¤t_record, | |
731 | record); | |
732 | } | |
733 | err = update_avl_trees(&start_tree, &end_tree, first_end); | |
734 | } | |
735 | ||
736 | /* | |
737 | * We're done; if we were cancelled, we need to cancel our workers and | |
738 | * clear out their queues. Either way, we need to remove every thread's | |
739 | * redact_node struct from the avl trees. | |
740 | */ | |
741 | for (int i = 0; i < num_threads; i++) { | |
742 | if (err != 0) { | |
743 | thread_args[i].cancel = B_TRUE; | |
744 | while (!redact_nodes[i].record->eos_marker) { | |
745 | (void) update_avl_trees(&start_tree, &end_tree, | |
746 | &redact_nodes[i]); | |
747 | } | |
748 | } | |
749 | avl_remove(&start_tree, &redact_nodes[i]); | |
750 | avl_remove(&end_tree, &redact_nodes[i]); | |
751 | kmem_free(redact_nodes[i].record, | |
752 | sizeof (struct redact_record)); | |
753 | } | |
754 | ||
755 | avl_destroy(&start_tree); | |
756 | avl_destroy(&end_tree); | |
757 | kmem_free(redact_nodes, num_threads * sizeof (*redact_nodes)); | |
758 | if (current_record != NULL) | |
759 | bqueue_enqueue(q, current_record, sizeof (current_record)); | |
760 | return (err); | |
761 | } | |
762 | ||
763 | struct redact_merge_thread_arg { | |
764 | bqueue_t q; | |
765 | spa_t *spa; | |
766 | int numsnaps; | |
767 | struct redact_thread_arg *thr_args; | |
768 | boolean_t cancel; | |
769 | int error_code; | |
770 | }; | |
771 | ||
772 | static void | |
773 | redact_merge_thread(void *arg) | |
774 | { | |
775 | struct redact_merge_thread_arg *rmta = arg; | |
776 | rmta->error_code = perform_thread_merge(&rmta->q, | |
777 | rmta->numsnaps, rmta->thr_args, &rmta->cancel); | |
778 | struct redact_record *rec = kmem_zalloc(sizeof (*rec), KM_SLEEP); | |
779 | rec->eos_marker = B_TRUE; | |
780 | bqueue_enqueue_flush(&rmta->q, rec, 1); | |
781 | thread_exit(); | |
782 | } | |
783 | ||
784 | /* | |
785 | * Find the next object in or after the redaction range passed in, and hold | |
786 | * its dnode with the provided tag. Also update *object to contain the new | |
787 | * object number. | |
788 | */ | |
789 | static int | |
790 | hold_next_object(objset_t *os, struct redact_record *rec, void *tag, | |
791 | uint64_t *object, dnode_t **dn) | |
792 | { | |
793 | int err = 0; | |
794 | if (*dn != NULL) | |
795 | dnode_rele(*dn, FTAG); | |
796 | *dn = NULL; | |
797 | if (*object < rec->start_object) { | |
798 | *object = rec->start_object - 1; | |
799 | } | |
800 | err = dmu_object_next(os, object, B_FALSE, 0); | |
801 | if (err != 0) | |
802 | return (err); | |
803 | ||
804 | err = dnode_hold(os, *object, tag, dn); | |
805 | while (err == 0 && (*object < rec->start_object || | |
806 | DMU_OT_IS_METADATA((*dn)->dn_type))) { | |
807 | dnode_rele(*dn, tag); | |
808 | *dn = NULL; | |
809 | err = dmu_object_next(os, object, B_FALSE, 0); | |
810 | if (err != 0) | |
811 | break; | |
812 | err = dnode_hold(os, *object, tag, dn); | |
813 | } | |
814 | return (err); | |
815 | } | |
816 | ||
817 | static int | |
818 | perform_redaction(objset_t *os, redaction_list_t *rl, | |
819 | struct redact_merge_thread_arg *rmta) | |
820 | { | |
821 | int err = 0; | |
822 | bqueue_t *q = &rmta->q; | |
823 | struct redact_record *rec = NULL; | |
824 | struct merge_data md = { {0} }; | |
825 | ||
826 | list_create(&md.md_redact_block_pending, | |
827 | sizeof (struct redact_block_list_node), | |
828 | offsetof(struct redact_block_list_node, node)); | |
829 | md.md_redaction_list = rl; | |
830 | ||
831 | for (int i = 0; i < TXG_SIZE; i++) { | |
832 | list_create(&md.md_blocks[i], | |
833 | sizeof (struct redact_block_list_node), | |
834 | offsetof(struct redact_block_list_node, node)); | |
835 | } | |
836 | dnode_t *dn = NULL; | |
837 | uint64_t prev_obj = 0; | |
838 | for (rec = bqueue_dequeue(q); !rec->eos_marker && err == 0; | |
839 | rec = get_next_redact_record(q, rec)) { | |
840 | ASSERT3U(rec->start_object, !=, 0); | |
841 | uint64_t object; | |
842 | if (prev_obj != rec->start_object) { | |
843 | object = rec->start_object - 1; | |
844 | err = hold_next_object(os, rec, FTAG, &object, &dn); | |
845 | } else { | |
846 | object = prev_obj; | |
847 | } | |
848 | while (err == 0 && object <= rec->end_object) { | |
849 | if (issig(JUSTLOOKING) && issig(FORREAL)) { | |
850 | err = EINTR; | |
851 | break; | |
852 | } | |
853 | /* | |
854 | * Part of the current object is contained somewhere in | |
855 | * the range covered by rec. | |
856 | */ | |
857 | uint64_t startblkid; | |
858 | uint64_t endblkid; | |
859 | uint64_t maxblkid = dn->dn_phys->dn_maxblkid; | |
860 | ||
861 | if (rec->start_object < object) | |
862 | startblkid = 0; | |
863 | else if (rec->start_blkid > maxblkid) | |
864 | break; | |
865 | else | |
866 | startblkid = rec->start_blkid; | |
867 | ||
868 | if (rec->end_object > object || rec->end_blkid > | |
869 | maxblkid) { | |
870 | endblkid = maxblkid; | |
871 | } else { | |
872 | endblkid = rec->end_blkid; | |
873 | } | |
874 | update_redaction_list(&md, os, object, startblkid, | |
875 | endblkid, dn->dn_datablksz); | |
876 | ||
877 | if (object == rec->end_object) | |
878 | break; | |
879 | err = hold_next_object(os, rec, FTAG, &object, &dn); | |
880 | } | |
881 | if (err == ESRCH) | |
882 | err = 0; | |
883 | if (dn != NULL) | |
884 | prev_obj = object; | |
885 | } | |
886 | if (err == 0 && dn != NULL) | |
887 | dnode_rele(dn, FTAG); | |
888 | ||
889 | if (err == ESRCH) | |
890 | err = 0; | |
891 | rmta->cancel = B_TRUE; | |
892 | while (!rec->eos_marker) | |
893 | rec = get_next_redact_record(q, rec); | |
894 | kmem_free(rec, sizeof (*rec)); | |
895 | ||
896 | /* | |
897 | * There may be a block that's being coalesced, sync that out before we | |
898 | * return. | |
899 | */ | |
900 | if (err == 0 && md.md_coalesce_block.rbp_size_count != 0) { | |
901 | struct redact_block_list_node *rbln = | |
902 | kmem_alloc(sizeof (struct redact_block_list_node), | |
903 | KM_SLEEP); | |
904 | rbln->block = md.md_coalesce_block; | |
905 | list_insert_tail(&md.md_redact_block_pending, rbln); | |
906 | } | |
907 | commit_rl_updates(os, &md, UINT64_MAX, UINT64_MAX); | |
908 | ||
909 | /* | |
910 | * Wait for all the redaction info to sync out before we return, so that | |
911 | * anyone who attempts to resume this redaction will have all the data | |
912 | * they need. | |
913 | */ | |
914 | dsl_pool_t *dp = spa_get_dsl(os->os_spa); | |
915 | if (md.md_latest_synctask_txg != 0) | |
916 | txg_wait_synced(dp, md.md_latest_synctask_txg); | |
917 | for (int i = 0; i < TXG_SIZE; i++) | |
918 | list_destroy(&md.md_blocks[i]); | |
919 | return (err); | |
920 | } | |
921 | ||
922 | static boolean_t | |
923 | redact_snaps_contains(uint64_t *snaps, uint64_t num_snaps, uint64_t guid) | |
924 | { | |
925 | for (int i = 0; i < num_snaps; i++) { | |
926 | if (snaps[i] == guid) | |
927 | return (B_TRUE); | |
928 | } | |
929 | return (B_FALSE); | |
930 | } | |
931 | ||
932 | int | |
933 | dmu_redact_snap(const char *snapname, nvlist_t *redactnvl, | |
934 | const char *redactbook) | |
935 | { | |
936 | int err = 0; | |
937 | dsl_pool_t *dp = NULL; | |
938 | dsl_dataset_t *ds = NULL; | |
939 | objset_t *os; | |
940 | int numsnaps = 0; | |
941 | dsl_dataset_t **redactsnaparr = NULL; | |
942 | struct redact_thread_arg *args = NULL; | |
943 | redaction_list_t *new_rl = NULL; | |
944 | ||
945 | if ((err = dsl_pool_hold(snapname, FTAG, &dp)) != 0) | |
946 | return (err); | |
947 | ||
948 | if ((err = dsl_dataset_hold_flags(dp, snapname, DS_HOLD_FLAG_DECRYPT, | |
949 | FTAG, &ds)) != 0) { | |
950 | goto out; | |
951 | } | |
952 | dsl_dataset_long_hold(ds, FTAG); | |
953 | if (!ds->ds_is_snapshot || dmu_objset_from_ds(ds, &os) != 0) { | |
954 | err = EINVAL; | |
955 | goto out; | |
956 | } | |
957 | if (dsl_dataset_feature_is_active(ds, SPA_FEATURE_REDACTED_DATASETS)) { | |
958 | err = EALREADY; | |
959 | goto out; | |
960 | } | |
961 | nvpair_t *pair; | |
962 | ||
963 | if (fnvlist_num_pairs(redactnvl) > 0 && err == 0) { | |
964 | redactsnaparr = kmem_zalloc(fnvlist_num_pairs(redactnvl) * | |
965 | sizeof (dsl_dataset_t *), KM_SLEEP); | |
966 | } | |
967 | for (pair = nvlist_next_nvpair(redactnvl, NULL); err == 0 && | |
968 | pair != NULL; pair = nvlist_next_nvpair(redactnvl, pair)) { | |
969 | const char *name = nvpair_name(pair); | |
970 | err = dsl_dataset_hold_flags(dp, name, DS_HOLD_FLAG_DECRYPT, | |
971 | FTAG, redactsnaparr + numsnaps); | |
972 | if (err != 0) | |
973 | break; | |
974 | dsl_dataset_long_hold(redactsnaparr[numsnaps], FTAG); | |
975 | if (!dsl_dataset_is_before(redactsnaparr[numsnaps], ds, 0)) { | |
976 | err = EINVAL; | |
977 | numsnaps++; | |
978 | break; | |
979 | } | |
980 | if (dsl_dataset_feature_is_active(redactsnaparr[numsnaps], | |
981 | SPA_FEATURE_REDACTED_DATASETS)) { | |
982 | err = EALREADY; | |
983 | numsnaps++; | |
984 | break; | |
985 | ||
986 | } | |
987 | numsnaps++; | |
988 | } | |
989 | if (err != 0) | |
990 | goto out; | |
991 | ||
992 | ASSERT3U(fnvlist_num_pairs(redactnvl), ==, numsnaps); | |
993 | ||
994 | boolean_t resuming = B_FALSE; | |
995 | char newredactbook[ZFS_MAX_DATASET_NAME_LEN]; | |
996 | zfs_bookmark_phys_t bookmark; | |
997 | ||
998 | (void) strlcpy(newredactbook, snapname, ZFS_MAX_DATASET_NAME_LEN); | |
999 | char *c = strchr(newredactbook, '@'); | |
1000 | ASSERT3P(c, !=, NULL); | |
1001 | int n = snprintf(c, ZFS_MAX_DATASET_NAME_LEN - (c - newredactbook), | |
1002 | "#%s", redactbook); | |
1003 | if (n >= ZFS_MAX_DATASET_NAME_LEN - (c - newredactbook)) { | |
1004 | dsl_pool_rele(dp, FTAG); | |
1005 | return (SET_ERROR(ENAMETOOLONG)); | |
1006 | } | |
1007 | err = dsl_bookmark_lookup(dp, newredactbook, NULL, &bookmark); | |
1008 | if (err == 0) { | |
1009 | resuming = B_TRUE; | |
1010 | if (bookmark.zbm_redaction_obj == 0) { | |
1011 | err = EEXIST; | |
1012 | goto out; | |
1013 | } | |
1014 | err = dsl_redaction_list_hold_obj(dp, | |
1015 | bookmark.zbm_redaction_obj, FTAG, &new_rl); | |
1016 | if (err != 0) { | |
1017 | err = EIO; | |
1018 | goto out; | |
1019 | } | |
1020 | dsl_redaction_list_long_hold(dp, new_rl, FTAG); | |
1021 | if (new_rl->rl_phys->rlp_num_snaps != numsnaps) { | |
1022 | err = ESRCH; | |
1023 | goto out; | |
1024 | } | |
1025 | for (int i = 0; i < numsnaps; i++) { | |
1026 | if (!redact_snaps_contains(new_rl->rl_phys->rlp_snaps, | |
1027 | new_rl->rl_phys->rlp_num_snaps, | |
1028 | dsl_dataset_phys(redactsnaparr[i])->ds_guid)) { | |
1029 | err = ESRCH; | |
1030 | goto out; | |
1031 | } | |
1032 | } | |
1033 | if (numsnaps > 0) | |
1034 | args = kmem_zalloc(numsnaps * sizeof (*args), KM_SLEEP); | |
1035 | if (new_rl->rl_phys->rlp_last_blkid == UINT64_MAX && | |
1036 | new_rl->rl_phys->rlp_last_object == UINT64_MAX) { | |
1037 | err = EEXIST; | |
1038 | goto out; | |
1039 | } | |
1040 | dsl_pool_rele(dp, FTAG); | |
1041 | dp = NULL; | |
1042 | } else { | |
1043 | uint64_t *guids = NULL; | |
1044 | if (numsnaps > 0) { | |
1045 | guids = kmem_zalloc(numsnaps * sizeof (uint64_t), | |
1046 | KM_SLEEP); | |
1047 | args = kmem_zalloc(numsnaps * sizeof (*args), KM_SLEEP); | |
1048 | } | |
1049 | for (int i = 0; i < numsnaps; i++) | |
1050 | guids[i] = dsl_dataset_phys(redactsnaparr[i])->ds_guid; | |
1051 | ||
1052 | dsl_pool_rele(dp, FTAG); | |
1053 | dp = NULL; | |
1054 | err = dsl_bookmark_create_redacted(newredactbook, snapname, | |
1055 | numsnaps, guids, FTAG, &new_rl); | |
1056 | kmem_free(guids, numsnaps * sizeof (uint64_t)); | |
1057 | if (err != 0) { | |
1058 | goto out; | |
1059 | } | |
1060 | } | |
1061 | ||
1062 | for (int i = 0; i < numsnaps; i++) { | |
1063 | args[i].ds = redactsnaparr[i]; | |
1064 | (void) bqueue_init(&args[i].q, zfs_redact_queue_ff, | |
1065 | zfs_redact_queue_length, | |
1066 | offsetof(struct redact_record, ln)); | |
1067 | if (resuming) { | |
1068 | args[i].resume.zb_blkid = | |
1069 | new_rl->rl_phys->rlp_last_blkid; | |
1070 | args[i].resume.zb_object = | |
1071 | new_rl->rl_phys->rlp_last_object; | |
1072 | } | |
1073 | args[i].txg = dsl_dataset_phys(ds)->ds_creation_txg; | |
1074 | (void) thread_create(NULL, 0, redact_traverse_thread, &args[i], | |
1075 | 0, curproc, TS_RUN, minclsyspri); | |
1076 | } | |
1077 | struct redact_merge_thread_arg rmta = { { {0} } }; | |
1078 | (void) bqueue_init(&rmta.q, zfs_redact_queue_ff, | |
1079 | zfs_redact_queue_length, offsetof(struct redact_record, ln)); | |
1080 | rmta.numsnaps = numsnaps; | |
1081 | rmta.spa = os->os_spa; | |
1082 | rmta.thr_args = args; | |
1083 | (void) thread_create(NULL, 0, redact_merge_thread, &rmta, 0, curproc, | |
1084 | TS_RUN, minclsyspri); | |
1085 | err = perform_redaction(os, new_rl, &rmta); | |
1086 | out: | |
1087 | if (args != NULL) { | |
1088 | kmem_free(args, numsnaps * sizeof (*args)); | |
1089 | } | |
1090 | if (new_rl != NULL) { | |
1091 | dsl_redaction_list_long_rele(new_rl, FTAG); | |
1092 | dsl_redaction_list_rele(new_rl, FTAG); | |
1093 | } | |
1094 | for (int i = 0; i < numsnaps; i++) { | |
1095 | dsl_dataset_long_rele(redactsnaparr[i], FTAG); | |
1096 | dsl_dataset_rele_flags(redactsnaparr[i], DS_HOLD_FLAG_DECRYPT, | |
1097 | FTAG); | |
1098 | } | |
1099 | ||
1100 | if (redactsnaparr != NULL) { | |
1101 | kmem_free(redactsnaparr, fnvlist_num_pairs(redactnvl) * | |
1102 | sizeof (dsl_dataset_t *)); | |
1103 | } | |
1104 | if (dp != NULL) | |
1105 | dsl_pool_rele(dp, FTAG); | |
1106 | if (ds != NULL) { | |
1107 | dsl_dataset_long_rele(ds, FTAG); | |
1108 | dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG); | |
1109 | } | |
1110 | return (SET_ERROR(err)); | |
1111 | ||
1112 | } |