]>
Commit | Line | Data |
---|---|---|
34dc7c2f BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
428870ff | 22 | * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. |
8d35c149 | 23 | * Copyright 2011 Nexenta Systems, Inc. All rights reserved. |
30af21b0 | 24 | * Copyright (c) 2011, 2018 by Delphix. All rights reserved. |
788eb90c | 25 | * Copyright (c) 2014, Joyent, Inc. All rights reserved. |
47dfff3b | 26 | * Copyright 2014 HybridCluster. All rights reserved. |
b607405f | 27 | * Copyright 2016 RackTop Systems. |
a0bd735a | 28 | * Copyright (c) 2016 Actifio, Inc. All rights reserved. |
10b3c7f5 MN |
29 | * Copyright (c) 2019, Klara Inc. |
30 | * Copyright (c) 2019, Allan Jude | |
8d35c149 | 31 | */ |
34dc7c2f | 32 | |
34dc7c2f BB |
33 | #include <sys/dmu.h> |
34 | #include <sys/dmu_impl.h> | |
35 | #include <sys/dmu_tx.h> | |
36 | #include <sys/dbuf.h> | |
37 | #include <sys/dnode.h> | |
38 | #include <sys/zfs_context.h> | |
39 | #include <sys/dmu_objset.h> | |
40 | #include <sys/dmu_traverse.h> | |
41 | #include <sys/dsl_dataset.h> | |
42 | #include <sys/dsl_dir.h> | |
428870ff | 43 | #include <sys/dsl_prop.h> |
34dc7c2f BB |
44 | #include <sys/dsl_pool.h> |
45 | #include <sys/dsl_synctask.h> | |
044baf00 | 46 | #include <sys/spa_impl.h> |
34dc7c2f BB |
47 | #include <sys/zfs_ioctl.h> |
48 | #include <sys/zap.h> | |
49 | #include <sys/zio_checksum.h> | |
428870ff BB |
50 | #include <sys/zfs_znode.h> |
51 | #include <zfs_fletcher.h> | |
52 | #include <sys/avl.h> | |
53 | #include <sys/ddt.h> | |
572e2857 | 54 | #include <sys/zfs_onexit.h> |
13fe0198 | 55 | #include <sys/dmu_send.h> |
ba0ba69e | 56 | #include <sys/dmu_recv.h> |
13fe0198 | 57 | #include <sys/dsl_destroy.h> |
9b67f605 | 58 | #include <sys/blkptr.h> |
da536844 | 59 | #include <sys/dsl_bookmark.h> |
9b67f605 | 60 | #include <sys/zfeature.h> |
fcff0f35 | 61 | #include <sys/bqueue.h> |
a0bd735a | 62 | #include <sys/zvol.h> |
f74b821a | 63 | #include <sys/policy.h> |
30af21b0 PD |
64 | #include <sys/objlist.h> |
65 | #ifdef _KERNEL | |
66 | #include <sys/zfs_vfsops.h> | |
67 | #endif | |
34dc7c2f | 68 | |
330d06f9 | 69 | /* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */ |
18168da7 | 70 | static int zfs_send_corrupt_data = B_FALSE; |
30af21b0 PD |
71 | /* |
72 | * This tunable controls the amount of data (measured in bytes) that will be | |
73 | * prefetched by zfs send. If the main thread is blocking on reads that haven't | |
74 | * completed, this variable might need to be increased. If instead the main | |
75 | * thread is issuing new reads because the prefetches have fallen out of the | |
76 | * cache, this may need to be decreased. | |
77 | */ | |
18168da7 | 78 | static int zfs_send_queue_length = SPA_MAXBLOCKSIZE; |
30af21b0 PD |
79 | /* |
80 | * This tunable controls the length of the queues that zfs send worker threads | |
81 | * use to communicate. If the send_main_thread is blocking on these queues, | |
82 | * this variable may need to be increased. If there is a significant slowdown | |
83 | * at the start of a send as these threads consume all the available IO | |
84 | * resources, this variable may need to be decreased. | |
85 | */ | |
18168da7 | 86 | static int zfs_send_no_prefetch_queue_length = 1024 * 1024; |
30af21b0 PD |
87 | /* |
88 | * These tunables control the fill fraction of the queues by zfs send. The fill | |
89 | * fraction controls the frequency with which threads have to be cv_signaled. | |
90 | * If a lot of cpu time is being spent on cv_signal, then these should be tuned | |
91 | * down. If the queues empty before the signalled thread can catch up, then | |
92 | * these should be tuned up. | |
93 | */ | |
18168da7 AZ |
94 | static int zfs_send_queue_ff = 20; |
95 | static int zfs_send_no_prefetch_queue_ff = 20; | |
30af21b0 PD |
96 | |
97 | /* | |
98 | * Use this to override the recordsize calculation for fast zfs send estimates. | |
99 | */ | |
18168da7 | 100 | static int zfs_override_estimate_recordsize = 0; |
30af21b0 | 101 | |
b607405f | 102 | /* Set this tunable to FALSE to disable setting of DRR_FLAG_FREERECORDS */ |
18168da7 | 103 | static const boolean_t zfs_send_set_freerecords_bit = B_TRUE; |
30af21b0 | 104 | |
caf9dd20 | 105 | /* Set this tunable to FALSE is disable sending unmodified spill blocks. */ |
18168da7 | 106 | static int zfs_send_unmodified_spill_blocks = B_TRUE; |
330d06f9 | 107 | |
30af21b0 PD |
108 | static inline boolean_t |
109 | overflow_multiply(uint64_t a, uint64_t b, uint64_t *c) | |
110 | { | |
111 | uint64_t temp = a * b; | |
112 | if (b != 0 && temp / b != a) | |
113 | return (B_FALSE); | |
114 | *c = temp; | |
115 | return (B_TRUE); | |
116 | } | |
117 | ||
fcff0f35 PD |
118 | struct send_thread_arg { |
119 | bqueue_t q; | |
0fdd6106 | 120 | objset_t *os; /* Objset to traverse */ |
fcff0f35 PD |
121 | uint64_t fromtxg; /* Traverse from this txg */ |
122 | int flags; /* flags to pass to traverse_dataset */ | |
123 | int error_code; | |
124 | boolean_t cancel; | |
47dfff3b | 125 | zbookmark_phys_t resume; |
30af21b0 | 126 | uint64_t *num_blocks_visited; |
fcff0f35 PD |
127 | }; |
128 | ||
30af21b0 PD |
129 | struct redact_list_thread_arg { |
130 | boolean_t cancel; | |
131 | bqueue_t q; | |
132 | zbookmark_phys_t resume; | |
133 | redaction_list_t *rl; | |
134 | boolean_t mark_redact; | |
135 | int error_code; | |
136 | uint64_t *num_blocks_visited; | |
fcff0f35 PD |
137 | }; |
138 | ||
30af21b0 PD |
139 | struct send_merge_thread_arg { |
140 | bqueue_t q; | |
141 | objset_t *os; | |
142 | struct redact_list_thread_arg *from_arg; | |
143 | struct send_thread_arg *to_arg; | |
144 | struct redact_list_thread_arg *redact_arg; | |
145 | int error; | |
146 | boolean_t cancel; | |
30af21b0 | 147 | }; |
f8866f8a | 148 | |
30af21b0 PD |
149 | struct send_range { |
150 | boolean_t eos_marker; /* Marks the end of the stream */ | |
151 | uint64_t object; | |
152 | uint64_t start_blkid; | |
153 | uint64_t end_blkid; | |
154 | bqueue_node_t ln; | |
155 | enum type {DATA, HOLE, OBJECT, OBJECT_RANGE, REDACT, | |
156 | PREVIOUSLY_REDACTED} type; | |
157 | union { | |
158 | struct srd { | |
159 | dmu_object_type_t obj_type; | |
1dc32a67 MA |
160 | uint32_t datablksz; // logical size |
161 | uint32_t datasz; // payload size | |
30af21b0 | 162 | blkptr_t bp; |
1dc32a67 MA |
163 | arc_buf_t *abuf; |
164 | abd_t *abd; | |
165 | kmutex_t lock; | |
166 | kcondvar_t cv; | |
167 | boolean_t io_outstanding; | |
37602733 | 168 | boolean_t io_compressed; |
1dc32a67 | 169 | int io_err; |
30af21b0 PD |
170 | } data; |
171 | struct srh { | |
172 | uint32_t datablksz; | |
173 | } hole; | |
174 | struct sro { | |
175 | /* | |
176 | * This is a pointer because embedding it in the | |
177 | * struct causes these structures to be massively larger | |
178 | * for all range types; this makes the code much less | |
179 | * memory efficient. | |
180 | */ | |
181 | dnode_phys_t *dnp; | |
182 | blkptr_t bp; | |
183 | } object; | |
184 | struct srr { | |
185 | uint32_t datablksz; | |
186 | } redact; | |
187 | struct sror { | |
188 | blkptr_t bp; | |
189 | } object_range; | |
190 | } sru; | |
191 | }; | |
37abac6d | 192 | |
30af21b0 PD |
193 | /* |
194 | * The list of data whose inclusion in a send stream can be pending from | |
195 | * one call to backup_cb to another. Multiple calls to dump_free(), | |
196 | * dump_freeobjects(), and dump_redact() can be aggregated into a single | |
197 | * DRR_FREE, DRR_FREEOBJECTS, or DRR_REDACT replay record. | |
198 | */ | |
199 | typedef enum { | |
200 | PENDING_NONE, | |
201 | PENDING_FREE, | |
202 | PENDING_FREEOBJECTS, | |
203 | PENDING_REDACT | |
204 | } dmu_pendop_t; | |
205 | ||
206 | typedef struct dmu_send_cookie { | |
207 | dmu_replay_record_t *dsc_drr; | |
208 | dmu_send_outparams_t *dsc_dso; | |
209 | offset_t *dsc_off; | |
210 | objset_t *dsc_os; | |
211 | zio_cksum_t dsc_zc; | |
212 | uint64_t dsc_toguid; | |
213 | uint64_t dsc_fromtxg; | |
214 | int dsc_err; | |
215 | dmu_pendop_t dsc_pending_op; | |
216 | uint64_t dsc_featureflags; | |
217 | uint64_t dsc_last_data_object; | |
218 | uint64_t dsc_last_data_offset; | |
219 | uint64_t dsc_resume_object; | |
220 | uint64_t dsc_resume_offset; | |
221 | boolean_t dsc_sent_begin; | |
222 | boolean_t dsc_sent_end; | |
223 | } dmu_send_cookie_t; | |
224 | ||
225 | static int do_dump(dmu_send_cookie_t *dscp, struct send_range *range); | |
044baf00 | 226 | |
30af21b0 PD |
227 | static void |
228 | range_free(struct send_range *range) | |
044baf00 | 229 | { |
30af21b0 PD |
230 | if (range->type == OBJECT) { |
231 | size_t size = sizeof (dnode_phys_t) * | |
232 | (range->sru.object.dnp->dn_extra_slots + 1); | |
233 | kmem_free(range->sru.object.dnp, size); | |
1dc32a67 MA |
234 | } else if (range->type == DATA) { |
235 | mutex_enter(&range->sru.data.lock); | |
236 | while (range->sru.data.io_outstanding) | |
237 | cv_wait(&range->sru.data.cv, &range->sru.data.lock); | |
238 | if (range->sru.data.abd != NULL) | |
239 | abd_free(range->sru.data.abd); | |
240 | if (range->sru.data.abuf != NULL) { | |
241 | arc_buf_destroy(range->sru.data.abuf, | |
242 | &range->sru.data.abuf); | |
243 | } | |
244 | mutex_exit(&range->sru.data.lock); | |
245 | ||
246 | cv_destroy(&range->sru.data.cv); | |
247 | mutex_destroy(&range->sru.data.lock); | |
30af21b0 PD |
248 | } |
249 | kmem_free(range, sizeof (*range)); | |
34dc7c2f BB |
250 | } |
251 | ||
37f8a883 MA |
252 | /* |
253 | * For all record types except BEGIN, fill in the checksum (overlaid in | |
254 | * drr_u.drr_checksum.drr_checksum). The checksum verifies everything | |
255 | * up to the start of the checksum itself. | |
256 | */ | |
257 | static int | |
30af21b0 | 258 | dump_record(dmu_send_cookie_t *dscp, void *payload, int payload_len) |
37f8a883 | 259 | { |
30af21b0 | 260 | dmu_send_outparams_t *dso = dscp->dsc_dso; |
37f8a883 MA |
261 | ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum), |
262 | ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t)); | |
30af21b0 | 263 | (void) fletcher_4_incremental_native(dscp->dsc_drr, |
37f8a883 | 264 | offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum), |
30af21b0 PD |
265 | &dscp->dsc_zc); |
266 | if (dscp->dsc_drr->drr_type == DRR_BEGIN) { | |
267 | dscp->dsc_sent_begin = B_TRUE; | |
51907a31 | 268 | } else { |
30af21b0 | 269 | ASSERT(ZIO_CHECKSUM_IS_ZERO(&dscp->dsc_drr->drr_u. |
37f8a883 | 270 | drr_checksum.drr_checksum)); |
30af21b0 | 271 | dscp->dsc_drr->drr_u.drr_checksum.drr_checksum = dscp->dsc_zc; |
37f8a883 | 272 | } |
30af21b0 PD |
273 | if (dscp->dsc_drr->drr_type == DRR_END) { |
274 | dscp->dsc_sent_end = B_TRUE; | |
51907a31 | 275 | } |
30af21b0 | 276 | (void) fletcher_4_incremental_native(&dscp->dsc_drr-> |
37f8a883 | 277 | drr_u.drr_checksum.drr_checksum, |
30af21b0 PD |
278 | sizeof (zio_cksum_t), &dscp->dsc_zc); |
279 | *dscp->dsc_off += sizeof (dmu_replay_record_t); | |
280 | dscp->dsc_err = dso->dso_outfunc(dscp->dsc_os, dscp->dsc_drr, | |
281 | sizeof (dmu_replay_record_t), dso->dso_arg); | |
282 | if (dscp->dsc_err != 0) | |
37f8a883 MA |
283 | return (SET_ERROR(EINTR)); |
284 | if (payload_len != 0) { | |
30af21b0 PD |
285 | *dscp->dsc_off += payload_len; |
286 | /* | |
2c6fa6ea | 287 | * payload is null when dso_dryrun == B_TRUE (i.e. when we're |
30af21b0 PD |
288 | * doing a send size calculation) |
289 | */ | |
290 | if (payload != NULL) { | |
291 | (void) fletcher_4_incremental_native( | |
292 | payload, payload_len, &dscp->dsc_zc); | |
293 | } | |
294 | ||
295 | /* | |
296 | * The code does not rely on this (len being a multiple of 8). | |
297 | * We keep this assertion because of the corresponding assertion | |
298 | * in receive_read(). Keeping this assertion ensures that we do | |
299 | * not inadvertently break backwards compatibility (causing the | |
300 | * assertion in receive_read() to trigger on old software). | |
301 | * | |
302 | * Raw sends cannot be received on old software, and so can | |
303 | * bypass this assertion. | |
304 | */ | |
305 | ||
306 | ASSERT((payload_len % 8 == 0) || | |
307 | (dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW)); | |
308 | ||
309 | dscp->dsc_err = dso->dso_outfunc(dscp->dsc_os, payload, | |
310 | payload_len, dso->dso_arg); | |
311 | if (dscp->dsc_err != 0) | |
37f8a883 MA |
312 | return (SET_ERROR(EINTR)); |
313 | } | |
314 | return (0); | |
315 | } | |
316 | ||
e6d3a843 PD |
317 | /* |
318 | * Fill in the drr_free struct, or perform aggregation if the previous record is | |
319 | * also a free record, and the two are adjacent. | |
320 | * | |
321 | * Note that we send free records even for a full send, because we want to be | |
322 | * able to receive a full send as a clone, which requires a list of all the free | |
323 | * and freeobject records that were generated on the source. | |
324 | */ | |
34dc7c2f | 325 | static int |
30af21b0 | 326 | dump_free(dmu_send_cookie_t *dscp, uint64_t object, uint64_t offset, |
34dc7c2f BB |
327 | uint64_t length) |
328 | { | |
30af21b0 | 329 | struct drr_free *drrf = &(dscp->dsc_drr->drr_u.drr_free); |
428870ff | 330 | |
ea97f8ce MA |
331 | /* |
332 | * When we receive a free record, dbuf_free_range() assumes | |
333 | * that the receiving system doesn't have any dbufs in the range | |
334 | * being freed. This is always true because there is a one-record | |
335 | * constraint: we only send one WRITE record for any given | |
47dfff3b | 336 | * object,offset. We know that the one-record constraint is |
ea97f8ce MA |
337 | * true because we always send data in increasing order by |
338 | * object,offset. | |
339 | * | |
340 | * If the increasing-order constraint ever changes, we should find | |
341 | * another way to assert that the one-record constraint is still | |
342 | * satisfied. | |
343 | */ | |
30af21b0 PD |
344 | ASSERT(object > dscp->dsc_last_data_object || |
345 | (object == dscp->dsc_last_data_object && | |
346 | offset > dscp->dsc_last_data_offset)); | |
ea97f8ce | 347 | |
428870ff BB |
348 | /* |
349 | * If there is a pending op, but it's not PENDING_FREE, push it out, | |
350 | * since free block aggregation can only be done for blocks of the | |
351 | * same type (i.e., DRR_FREE records can only be aggregated with | |
352 | * other DRR_FREE records. DRR_FREEOBJECTS records can only be | |
30af21b0 | 353 | * aggregated with other DRR_FREEOBJECTS records). |
428870ff | 354 | */ |
30af21b0 PD |
355 | if (dscp->dsc_pending_op != PENDING_NONE && |
356 | dscp->dsc_pending_op != PENDING_FREE) { | |
357 | if (dump_record(dscp, NULL, 0) != 0) | |
2e528b49 | 358 | return (SET_ERROR(EINTR)); |
30af21b0 | 359 | dscp->dsc_pending_op = PENDING_NONE; |
428870ff BB |
360 | } |
361 | ||
30af21b0 | 362 | if (dscp->dsc_pending_op == PENDING_FREE) { |
428870ff BB |
363 | /* |
364 | * Check to see whether this free block can be aggregated | |
365 | * with pending one. | |
366 | */ | |
367 | if (drrf->drr_object == object && drrf->drr_offset + | |
368 | drrf->drr_length == offset) { | |
30af21b0 PD |
369 | if (offset + length < offset || length == UINT64_MAX) |
370 | drrf->drr_length = UINT64_MAX; | |
ee45fbd8 | 371 | else |
372 | drrf->drr_length += length; | |
428870ff BB |
373 | return (0); |
374 | } else { | |
375 | /* not a continuation. Push out pending record */ | |
30af21b0 | 376 | if (dump_record(dscp, NULL, 0) != 0) |
2e528b49 | 377 | return (SET_ERROR(EINTR)); |
30af21b0 | 378 | dscp->dsc_pending_op = PENDING_NONE; |
428870ff BB |
379 | } |
380 | } | |
381 | /* create a FREE record and make it pending */ | |
30af21b0 PD |
382 | bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t)); |
383 | dscp->dsc_drr->drr_type = DRR_FREE; | |
428870ff BB |
384 | drrf->drr_object = object; |
385 | drrf->drr_offset = offset; | |
ee45fbd8 | 386 | if (offset + length < offset) |
387 | drrf->drr_length = DMU_OBJECT_END; | |
388 | else | |
389 | drrf->drr_length = length; | |
30af21b0 | 390 | drrf->drr_toguid = dscp->dsc_toguid; |
ee45fbd8 | 391 | if (length == DMU_OBJECT_END) { |
30af21b0 | 392 | if (dump_record(dscp, NULL, 0) != 0) |
2e528b49 | 393 | return (SET_ERROR(EINTR)); |
428870ff | 394 | } else { |
30af21b0 PD |
395 | dscp->dsc_pending_op = PENDING_FREE; |
396 | } | |
397 | ||
398 | return (0); | |
399 | } | |
400 | ||
401 | /* | |
402 | * Fill in the drr_redact struct, or perform aggregation if the previous record | |
403 | * is also a redaction record, and the two are adjacent. | |
404 | */ | |
405 | static int | |
406 | dump_redact(dmu_send_cookie_t *dscp, uint64_t object, uint64_t offset, | |
407 | uint64_t length) | |
408 | { | |
409 | struct drr_redact *drrr = &dscp->dsc_drr->drr_u.drr_redact; | |
410 | ||
411 | /* | |
412 | * If there is a pending op, but it's not PENDING_REDACT, push it out, | |
413 | * since free block aggregation can only be done for blocks of the | |
414 | * same type (i.e., DRR_REDACT records can only be aggregated with | |
415 | * other DRR_REDACT records). | |
416 | */ | |
417 | if (dscp->dsc_pending_op != PENDING_NONE && | |
418 | dscp->dsc_pending_op != PENDING_REDACT) { | |
419 | if (dump_record(dscp, NULL, 0) != 0) | |
420 | return (SET_ERROR(EINTR)); | |
421 | dscp->dsc_pending_op = PENDING_NONE; | |
422 | } | |
423 | ||
424 | if (dscp->dsc_pending_op == PENDING_REDACT) { | |
425 | /* | |
426 | * Check to see whether this redacted block can be aggregated | |
427 | * with pending one. | |
428 | */ | |
429 | if (drrr->drr_object == object && drrr->drr_offset + | |
430 | drrr->drr_length == offset) { | |
431 | drrr->drr_length += length; | |
432 | return (0); | |
433 | } else { | |
434 | /* not a continuation. Push out pending record */ | |
435 | if (dump_record(dscp, NULL, 0) != 0) | |
436 | return (SET_ERROR(EINTR)); | |
437 | dscp->dsc_pending_op = PENDING_NONE; | |
438 | } | |
428870ff | 439 | } |
30af21b0 PD |
440 | /* create a REDACT record and make it pending */ |
441 | bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t)); | |
442 | dscp->dsc_drr->drr_type = DRR_REDACT; | |
443 | drrr->drr_object = object; | |
444 | drrr->drr_offset = offset; | |
445 | drrr->drr_length = length; | |
446 | drrr->drr_toguid = dscp->dsc_toguid; | |
447 | dscp->dsc_pending_op = PENDING_REDACT; | |
34dc7c2f | 448 | |
34dc7c2f BB |
449 | return (0); |
450 | } | |
451 | ||
452 | static int | |
177c79df | 453 | dmu_dump_write(dmu_send_cookie_t *dscp, dmu_object_type_t type, uint64_t object, |
37602733 PD |
454 | uint64_t offset, int lsize, int psize, const blkptr_t *bp, |
455 | boolean_t io_compressed, void *data) | |
34dc7c2f | 456 | { |
2aa34383 | 457 | uint64_t payload_size; |
30af21b0 PD |
458 | boolean_t raw = (dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW); |
459 | struct drr_write *drrw = &(dscp->dsc_drr->drr_u.drr_write); | |
428870ff | 460 | |
ea97f8ce MA |
461 | /* |
462 | * We send data in increasing object, offset order. | |
463 | * See comment in dump_free() for details. | |
464 | */ | |
30af21b0 PD |
465 | ASSERT(object > dscp->dsc_last_data_object || |
466 | (object == dscp->dsc_last_data_object && | |
467 | offset > dscp->dsc_last_data_offset)); | |
468 | dscp->dsc_last_data_object = object; | |
469 | dscp->dsc_last_data_offset = offset + lsize - 1; | |
428870ff BB |
470 | |
471 | /* | |
472 | * If there is any kind of pending aggregation (currently either | |
473 | * a grouping of free objects or free blocks), push it out to | |
474 | * the stream, since aggregation can't be done across operations | |
475 | * of different types. | |
476 | */ | |
30af21b0 PD |
477 | if (dscp->dsc_pending_op != PENDING_NONE) { |
478 | if (dump_record(dscp, NULL, 0) != 0) | |
2e528b49 | 479 | return (SET_ERROR(EINTR)); |
30af21b0 | 480 | dscp->dsc_pending_op = PENDING_NONE; |
428870ff | 481 | } |
37f8a883 | 482 | /* write a WRITE record */ |
30af21b0 PD |
483 | bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t)); |
484 | dscp->dsc_drr->drr_type = DRR_WRITE; | |
428870ff BB |
485 | drrw->drr_object = object; |
486 | drrw->drr_type = type; | |
487 | drrw->drr_offset = offset; | |
30af21b0 | 488 | drrw->drr_toguid = dscp->dsc_toguid; |
2aa34383 DK |
489 | drrw->drr_logical_size = lsize; |
490 | ||
b5256303 | 491 | /* only set the compression fields if the buf is compressed or raw */ |
37602733 PD |
492 | boolean_t compressed = |
493 | (bp != NULL ? BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF && | |
494 | io_compressed : lsize != psize); | |
495 | if (raw || compressed) { | |
30af21b0 PD |
496 | ASSERT(raw || dscp->dsc_featureflags & |
497 | DMU_BACKUP_FEATURE_COMPRESSED); | |
2aa34383 | 498 | ASSERT(!BP_IS_EMBEDDED(bp)); |
2aa34383 | 499 | ASSERT3S(psize, >, 0); |
2aa34383 | 500 | |
b5256303 TC |
501 | if (raw) { |
502 | ASSERT(BP_IS_PROTECTED(bp)); | |
503 | ||
504 | /* | |
9b840763 TC |
505 | * This is a raw protected block so we need to pass |
506 | * along everything the receiving side will need to | |
507 | * interpret this block, including the byteswap, salt, | |
508 | * IV, and MAC. | |
b5256303 | 509 | */ |
b5256303 TC |
510 | if (BP_SHOULD_BYTESWAP(bp)) |
511 | drrw->drr_flags |= DRR_RAW_BYTESWAP; | |
512 | zio_crypt_decode_params_bp(bp, drrw->drr_salt, | |
513 | drrw->drr_iv); | |
514 | zio_crypt_decode_mac_bp(bp, drrw->drr_mac); | |
515 | } else { | |
516 | /* this is a compressed block */ | |
30af21b0 | 517 | ASSERT(dscp->dsc_featureflags & |
b5256303 TC |
518 | DMU_BACKUP_FEATURE_COMPRESSED); |
519 | ASSERT(!BP_SHOULD_BYTESWAP(bp)); | |
520 | ASSERT(!DMU_OT_IS_METADATA(BP_GET_TYPE(bp))); | |
521 | ASSERT3U(BP_GET_COMPRESS(bp), !=, ZIO_COMPRESS_OFF); | |
522 | ASSERT3S(lsize, >=, psize); | |
523 | } | |
524 | ||
525 | /* set fields common to compressed and raw sends */ | |
2aa34383 DK |
526 | drrw->drr_compressiontype = BP_GET_COMPRESS(bp); |
527 | drrw->drr_compressed_size = psize; | |
528 | payload_size = drrw->drr_compressed_size; | |
529 | } else { | |
530 | payload_size = drrw->drr_logical_size; | |
531 | } | |
532 | ||
b5256303 | 533 | if (bp == NULL || BP_IS_EMBEDDED(bp) || (BP_IS_PROTECTED(bp) && !raw)) { |
9b67f605 | 534 | /* |
b5256303 TC |
535 | * There's no pre-computed checksum for partial-block writes, |
536 | * embedded BP's, or encrypted BP's that are being sent as | |
e1cfd73f | 537 | * plaintext, so (like fletcher4-checksummed blocks) userland |
b5256303 | 538 | * will have to compute a dedup-capable checksum itself. |
9b67f605 MA |
539 | */ |
540 | drrw->drr_checksumtype = ZIO_CHECKSUM_OFF; | |
541 | } else { | |
542 | drrw->drr_checksumtype = BP_GET_CHECKSUM(bp); | |
3c67d83a TH |
543 | if (zio_checksum_table[drrw->drr_checksumtype].ci_flags & |
544 | ZCHECKSUM_FLAG_DEDUP) | |
b5256303 | 545 | drrw->drr_flags |= DRR_CHECKSUM_DEDUP; |
9b67f605 MA |
546 | DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp)); |
547 | DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp)); | |
548 | DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp)); | |
b5256303 | 549 | DDK_SET_CRYPT(&drrw->drr_key, BP_IS_PROTECTED(bp)); |
9b67f605 MA |
550 | drrw->drr_key.ddk_cksum = bp->blk_cksum; |
551 | } | |
428870ff | 552 | |
30af21b0 | 553 | if (dump_record(dscp, data, payload_size) != 0) |
2e528b49 | 554 | return (SET_ERROR(EINTR)); |
428870ff BB |
555 | return (0); |
556 | } | |
557 | ||
9b67f605 | 558 | static int |
30af21b0 | 559 | dump_write_embedded(dmu_send_cookie_t *dscp, uint64_t object, uint64_t offset, |
9b67f605 MA |
560 | int blksz, const blkptr_t *bp) |
561 | { | |
562 | char buf[BPE_PAYLOAD_SIZE]; | |
563 | struct drr_write_embedded *drrw = | |
30af21b0 | 564 | &(dscp->dsc_drr->drr_u.drr_write_embedded); |
9b67f605 | 565 | |
30af21b0 PD |
566 | if (dscp->dsc_pending_op != PENDING_NONE) { |
567 | if (dump_record(dscp, NULL, 0) != 0) | |
ecb2b7dc | 568 | return (SET_ERROR(EINTR)); |
30af21b0 | 569 | dscp->dsc_pending_op = PENDING_NONE; |
9b67f605 MA |
570 | } |
571 | ||
572 | ASSERT(BP_IS_EMBEDDED(bp)); | |
573 | ||
30af21b0 PD |
574 | bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t)); |
575 | dscp->dsc_drr->drr_type = DRR_WRITE_EMBEDDED; | |
9b67f605 MA |
576 | drrw->drr_object = object; |
577 | drrw->drr_offset = offset; | |
578 | drrw->drr_length = blksz; | |
30af21b0 | 579 | drrw->drr_toguid = dscp->dsc_toguid; |
9b67f605 MA |
580 | drrw->drr_compression = BP_GET_COMPRESS(bp); |
581 | drrw->drr_etype = BPE_GET_ETYPE(bp); | |
582 | drrw->drr_lsize = BPE_GET_LSIZE(bp); | |
583 | drrw->drr_psize = BPE_GET_PSIZE(bp); | |
584 | ||
585 | decode_embedded_bp_compressed(bp, buf); | |
586 | ||
30af21b0 | 587 | if (dump_record(dscp, buf, P2ROUNDUP(drrw->drr_psize, 8)) != 0) |
ecb2b7dc | 588 | return (SET_ERROR(EINTR)); |
9b67f605 MA |
589 | return (0); |
590 | } | |
591 | ||
428870ff | 592 | static int |
30af21b0 PD |
593 | dump_spill(dmu_send_cookie_t *dscp, const blkptr_t *bp, uint64_t object, |
594 | void *data) | |
428870ff | 595 | { |
30af21b0 | 596 | struct drr_spill *drrs = &(dscp->dsc_drr->drr_u.drr_spill); |
b5256303 | 597 | uint64_t blksz = BP_GET_LSIZE(bp); |
b0ee5946 | 598 | uint64_t payload_size = blksz; |
428870ff | 599 | |
30af21b0 PD |
600 | if (dscp->dsc_pending_op != PENDING_NONE) { |
601 | if (dump_record(dscp, NULL, 0) != 0) | |
2e528b49 | 602 | return (SET_ERROR(EINTR)); |
30af21b0 | 603 | dscp->dsc_pending_op = PENDING_NONE; |
428870ff BB |
604 | } |
605 | ||
606 | /* write a SPILL record */ | |
30af21b0 PD |
607 | bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t)); |
608 | dscp->dsc_drr->drr_type = DRR_SPILL; | |
428870ff BB |
609 | drrs->drr_object = object; |
610 | drrs->drr_length = blksz; | |
30af21b0 | 611 | drrs->drr_toguid = dscp->dsc_toguid; |
34dc7c2f | 612 | |
caf9dd20 BB |
613 | /* See comment in dump_dnode() for full details */ |
614 | if (zfs_send_unmodified_spill_blocks && | |
30af21b0 | 615 | (bp->blk_birth <= dscp->dsc_fromtxg)) { |
caf9dd20 BB |
616 | drrs->drr_flags |= DRR_SPILL_UNMODIFIED; |
617 | } | |
618 | ||
b5256303 | 619 | /* handle raw send fields */ |
30af21b0 | 620 | if (dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW) { |
9b840763 TC |
621 | ASSERT(BP_IS_PROTECTED(bp)); |
622 | ||
b5256303 TC |
623 | if (BP_SHOULD_BYTESWAP(bp)) |
624 | drrs->drr_flags |= DRR_RAW_BYTESWAP; | |
625 | drrs->drr_compressiontype = BP_GET_COMPRESS(bp); | |
626 | drrs->drr_compressed_size = BP_GET_PSIZE(bp); | |
627 | zio_crypt_decode_params_bp(bp, drrs->drr_salt, drrs->drr_iv); | |
628 | zio_crypt_decode_mac_bp(bp, drrs->drr_mac); | |
b0ee5946 | 629 | payload_size = drrs->drr_compressed_size; |
b5256303 TC |
630 | } |
631 | ||
30af21b0 | 632 | if (dump_record(dscp, data, payload_size) != 0) |
2e528b49 | 633 | return (SET_ERROR(EINTR)); |
34dc7c2f BB |
634 | return (0); |
635 | } | |
636 | ||
637 | static int | |
30af21b0 | 638 | dump_freeobjects(dmu_send_cookie_t *dscp, uint64_t firstobj, uint64_t numobjs) |
34dc7c2f | 639 | { |
30af21b0 | 640 | struct drr_freeobjects *drrfo = &(dscp->dsc_drr->drr_u.drr_freeobjects); |
829e95c4 | 641 | uint64_t maxobj = DNODES_PER_BLOCK * |
30af21b0 | 642 | (DMU_META_DNODE(dscp->dsc_os)->dn_maxblkid + 1); |
829e95c4 FG |
643 | |
644 | /* | |
645 | * ZoL < 0.7 does not handle large FREEOBJECTS records correctly, | |
646 | * leading to zfs recv never completing. to avoid this issue, don't | |
647 | * send FREEOBJECTS records for object IDs which cannot exist on the | |
648 | * receiving side. | |
649 | */ | |
650 | if (maxobj > 0) { | |
20dfe8cd | 651 | if (maxobj <= firstobj) |
829e95c4 FG |
652 | return (0); |
653 | ||
654 | if (maxobj < firstobj + numobjs) | |
655 | numobjs = maxobj - firstobj; | |
656 | } | |
428870ff BB |
657 | |
658 | /* | |
659 | * If there is a pending op, but it's not PENDING_FREEOBJECTS, | |
660 | * push it out, since free block aggregation can only be done for | |
661 | * blocks of the same type (i.e., DRR_FREE records can only be | |
662 | * aggregated with other DRR_FREE records. DRR_FREEOBJECTS records | |
30af21b0 | 663 | * can only be aggregated with other DRR_FREEOBJECTS records). |
428870ff | 664 | */ |
30af21b0 PD |
665 | if (dscp->dsc_pending_op != PENDING_NONE && |
666 | dscp->dsc_pending_op != PENDING_FREEOBJECTS) { | |
667 | if (dump_record(dscp, NULL, 0) != 0) | |
2e528b49 | 668 | return (SET_ERROR(EINTR)); |
30af21b0 | 669 | dscp->dsc_pending_op = PENDING_NONE; |
428870ff | 670 | } |
30af21b0 PD |
671 | |
672 | if (dscp->dsc_pending_op == PENDING_FREEOBJECTS) { | |
428870ff BB |
673 | /* |
674 | * See whether this free object array can be aggregated | |
675 | * with pending one | |
676 | */ | |
677 | if (drrfo->drr_firstobj + drrfo->drr_numobjs == firstobj) { | |
678 | drrfo->drr_numobjs += numobjs; | |
679 | return (0); | |
680 | } else { | |
681 | /* can't be aggregated. Push out pending record */ | |
30af21b0 | 682 | if (dump_record(dscp, NULL, 0) != 0) |
2e528b49 | 683 | return (SET_ERROR(EINTR)); |
30af21b0 | 684 | dscp->dsc_pending_op = PENDING_NONE; |
428870ff BB |
685 | } |
686 | } | |
687 | ||
34dc7c2f | 688 | /* write a FREEOBJECTS record */ |
30af21b0 PD |
689 | bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t)); |
690 | dscp->dsc_drr->drr_type = DRR_FREEOBJECTS; | |
428870ff BB |
691 | drrfo->drr_firstobj = firstobj; |
692 | drrfo->drr_numobjs = numobjs; | |
30af21b0 | 693 | drrfo->drr_toguid = dscp->dsc_toguid; |
428870ff | 694 | |
30af21b0 | 695 | dscp->dsc_pending_op = PENDING_FREEOBJECTS; |
34dc7c2f | 696 | |
34dc7c2f BB |
697 | return (0); |
698 | } | |
699 | ||
700 | static int | |
30af21b0 | 701 | dump_dnode(dmu_send_cookie_t *dscp, const blkptr_t *bp, uint64_t object, |
b5256303 | 702 | dnode_phys_t *dnp) |
34dc7c2f | 703 | { |
30af21b0 | 704 | struct drr_object *drro = &(dscp->dsc_drr->drr_u.drr_object); |
4807c0ba | 705 | int bonuslen; |
428870ff | 706 | |
30af21b0 | 707 | if (object < dscp->dsc_resume_object) { |
47dfff3b MA |
708 | /* |
709 | * Note: when resuming, we will visit all the dnodes in | |
710 | * the block of dnodes that we are resuming from. In | |
711 | * this case it's unnecessary to send the dnodes prior to | |
712 | * the one we are resuming from. We should be at most one | |
713 | * block's worth of dnodes behind the resume point. | |
714 | */ | |
30af21b0 | 715 | ASSERT3U(dscp->dsc_resume_object - object, <, |
47dfff3b MA |
716 | 1 << (DNODE_BLOCK_SHIFT - DNODE_SHIFT)); |
717 | return (0); | |
718 | } | |
719 | ||
34dc7c2f | 720 | if (dnp == NULL || dnp->dn_type == DMU_OT_NONE) |
30af21b0 | 721 | return (dump_freeobjects(dscp, object, 1)); |
34dc7c2f | 722 | |
30af21b0 PD |
723 | if (dscp->dsc_pending_op != PENDING_NONE) { |
724 | if (dump_record(dscp, NULL, 0) != 0) | |
2e528b49 | 725 | return (SET_ERROR(EINTR)); |
30af21b0 | 726 | dscp->dsc_pending_op = PENDING_NONE; |
428870ff BB |
727 | } |
728 | ||
34dc7c2f | 729 | /* write an OBJECT record */ |
30af21b0 PD |
730 | bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t)); |
731 | dscp->dsc_drr->drr_type = DRR_OBJECT; | |
428870ff BB |
732 | drro->drr_object = object; |
733 | drro->drr_type = dnp->dn_type; | |
734 | drro->drr_bonustype = dnp->dn_bonustype; | |
735 | drro->drr_blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT; | |
736 | drro->drr_bonuslen = dnp->dn_bonuslen; | |
50c957f7 | 737 | drro->drr_dn_slots = dnp->dn_extra_slots + 1; |
428870ff BB |
738 | drro->drr_checksumtype = dnp->dn_checksum; |
739 | drro->drr_compress = dnp->dn_compress; | |
30af21b0 | 740 | drro->drr_toguid = dscp->dsc_toguid; |
428870ff | 741 | |
30af21b0 | 742 | if (!(dscp->dsc_featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) && |
f1512ee6 MA |
743 | drro->drr_blksz > SPA_OLD_MAXBLOCKSIZE) |
744 | drro->drr_blksz = SPA_OLD_MAXBLOCKSIZE; | |
745 | ||
4807c0ba TC |
746 | bonuslen = P2ROUNDUP(dnp->dn_bonuslen, 8); |
747 | ||
30af21b0 | 748 | if ((dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW)) { |
9b840763 TC |
749 | ASSERT(BP_IS_ENCRYPTED(bp)); |
750 | ||
b5256303 TC |
751 | if (BP_SHOULD_BYTESWAP(bp)) |
752 | drro->drr_flags |= DRR_RAW_BYTESWAP; | |
753 | ||
754 | /* needed for reconstructing dnp on recv side */ | |
ae76f45c | 755 | drro->drr_maxblkid = dnp->dn_maxblkid; |
b5256303 TC |
756 | drro->drr_indblkshift = dnp->dn_indblkshift; |
757 | drro->drr_nlevels = dnp->dn_nlevels; | |
758 | drro->drr_nblkptr = dnp->dn_nblkptr; | |
759 | ||
760 | /* | |
761 | * Since we encrypt the entire bonus area, the (raw) part | |
4807c0ba | 762 | * beyond the bonuslen is actually nonzero, so we need |
b5256303 TC |
763 | * to send it. |
764 | */ | |
765 | if (bonuslen != 0) { | |
766 | drro->drr_raw_bonuslen = DN_MAX_BONUS_LEN(dnp); | |
767 | bonuslen = drro->drr_raw_bonuslen; | |
768 | } | |
37f8a883 | 769 | } |
34dc7c2f | 770 | |
caf9dd20 BB |
771 | /* |
772 | * DRR_OBJECT_SPILL is set for every dnode which references a | |
30af21b0 | 773 | * spill block. This allows the receiving pool to definitively |
caf9dd20 BB |
774 | * determine when a spill block should be kept or freed. |
775 | */ | |
776 | if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) | |
777 | drro->drr_flags |= DRR_OBJECT_SPILL; | |
778 | ||
30af21b0 | 779 | if (dump_record(dscp, DN_BONUS(dnp), bonuslen) != 0) |
b5256303 TC |
780 | return (SET_ERROR(EINTR)); |
781 | ||
ea97f8ce | 782 | /* Free anything past the end of the file. */ |
30af21b0 | 783 | if (dump_free(dscp, object, (dnp->dn_maxblkid + 1) * |
ee45fbd8 | 784 | (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), DMU_OBJECT_END) != 0) |
2e528b49 | 785 | return (SET_ERROR(EINTR)); |
caf9dd20 BB |
786 | |
787 | /* | |
30af21b0 | 788 | * Send DRR_SPILL records for unmodified spill blocks. This is useful |
caf9dd20 BB |
789 | * because changing certain attributes of the object (e.g. blocksize) |
790 | * can cause old versions of ZFS to incorrectly remove a spill block. | |
791 | * Including these records in the stream forces an up to date version | |
792 | * to always be written ensuring they're never lost. Current versions | |
793 | * of the code which understand the DRR_FLAG_SPILL_BLOCK feature can | |
794 | * ignore these unmodified spill blocks. | |
795 | */ | |
796 | if (zfs_send_unmodified_spill_blocks && | |
797 | (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) && | |
30af21b0 PD |
798 | (DN_SPILL_BLKPTR(dnp)->blk_birth <= dscp->dsc_fromtxg)) { |
799 | struct send_range record; | |
800 | blkptr_t *bp = DN_SPILL_BLKPTR(dnp); | |
caf9dd20 | 801 | |
30af21b0 PD |
802 | bzero(&record, sizeof (struct send_range)); |
803 | record.type = DATA; | |
804 | record.object = object; | |
caf9dd20 | 805 | record.eos_marker = B_FALSE; |
30af21b0 PD |
806 | record.start_blkid = DMU_SPILL_BLKID; |
807 | record.end_blkid = record.start_blkid + 1; | |
808 | record.sru.data.bp = *bp; | |
809 | record.sru.data.obj_type = dnp->dn_type; | |
810 | record.sru.data.datablksz = BP_GET_LSIZE(bp); | |
caf9dd20 | 811 | |
30af21b0 | 812 | if (do_dump(dscp, &record) != 0) |
caf9dd20 BB |
813 | return (SET_ERROR(EINTR)); |
814 | } | |
815 | ||
30af21b0 | 816 | if (dscp->dsc_err != 0) |
2e528b49 | 817 | return (SET_ERROR(EINTR)); |
caf9dd20 | 818 | |
34dc7c2f BB |
819 | return (0); |
820 | } | |
821 | ||
b5256303 | 822 | static int |
30af21b0 PD |
823 | dump_object_range(dmu_send_cookie_t *dscp, const blkptr_t *bp, |
824 | uint64_t firstobj, uint64_t numslots) | |
b5256303 TC |
825 | { |
826 | struct drr_object_range *drror = | |
30af21b0 | 827 | &(dscp->dsc_drr->drr_u.drr_object_range); |
b5256303 TC |
828 | |
829 | /* we only use this record type for raw sends */ | |
830 | ASSERT(BP_IS_PROTECTED(bp)); | |
30af21b0 | 831 | ASSERT(dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW); |
b5256303 TC |
832 | ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF); |
833 | ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_DNODE); | |
834 | ASSERT0(BP_GET_LEVEL(bp)); | |
835 | ||
30af21b0 PD |
836 | if (dscp->dsc_pending_op != PENDING_NONE) { |
837 | if (dump_record(dscp, NULL, 0) != 0) | |
b5256303 | 838 | return (SET_ERROR(EINTR)); |
30af21b0 | 839 | dscp->dsc_pending_op = PENDING_NONE; |
b5256303 TC |
840 | } |
841 | ||
30af21b0 PD |
842 | bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t)); |
843 | dscp->dsc_drr->drr_type = DRR_OBJECT_RANGE; | |
b5256303 TC |
844 | drror->drr_firstobj = firstobj; |
845 | drror->drr_numslots = numslots; | |
30af21b0 | 846 | drror->drr_toguid = dscp->dsc_toguid; |
b5256303 TC |
847 | if (BP_SHOULD_BYTESWAP(bp)) |
848 | drror->drr_flags |= DRR_RAW_BYTESWAP; | |
849 | zio_crypt_decode_params_bp(bp, drror->drr_salt, drror->drr_iv); | |
850 | zio_crypt_decode_mac_bp(bp, drror->drr_mac); | |
851 | ||
30af21b0 | 852 | if (dump_record(dscp, NULL, 0) != 0) |
b5256303 TC |
853 | return (SET_ERROR(EINTR)); |
854 | return (0); | |
855 | } | |
856 | ||
9b67f605 | 857 | static boolean_t |
1dc32a67 | 858 | send_do_embed(const blkptr_t *bp, uint64_t featureflags) |
9b67f605 MA |
859 | { |
860 | if (!BP_IS_EMBEDDED(bp)) | |
861 | return (B_FALSE); | |
862 | ||
863 | /* | |
864 | * Compression function must be legacy, or explicitly enabled. | |
865 | */ | |
866 | if ((BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_LEGACY_FUNCTIONS && | |
1dc32a67 | 867 | !(featureflags & DMU_BACKUP_FEATURE_LZ4))) |
9b67f605 MA |
868 | return (B_FALSE); |
869 | ||
10b3c7f5 MN |
870 | /* |
871 | * If we have not set the ZSTD feature flag, we can't send ZSTD | |
872 | * compressed embedded blocks, as the receiver may not support them. | |
873 | */ | |
874 | if ((BP_GET_COMPRESS(bp) == ZIO_COMPRESS_ZSTD && | |
875 | !(featureflags & DMU_BACKUP_FEATURE_ZSTD))) | |
876 | return (B_FALSE); | |
877 | ||
9b67f605 MA |
878 | /* |
879 | * Embed type must be explicitly enabled. | |
880 | */ | |
881 | switch (BPE_GET_ETYPE(bp)) { | |
882 | case BP_EMBEDDED_TYPE_DATA: | |
1dc32a67 | 883 | if (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) |
9b67f605 MA |
884 | return (B_TRUE); |
885 | break; | |
886 | default: | |
887 | return (B_FALSE); | |
888 | } | |
889 | return (B_FALSE); | |
890 | } | |
891 | ||
fcff0f35 PD |
892 | /* |
893 | * This function actually handles figuring out what kind of record needs to be | |
1dc32a67 MA |
894 | * dumped, and calling the appropriate helper function. In most cases, |
895 | * the data has already been read by send_reader_thread(). | |
fcff0f35 | 896 | */ |
34dc7c2f | 897 | static int |
30af21b0 | 898 | do_dump(dmu_send_cookie_t *dscp, struct send_range *range) |
34dc7c2f | 899 | { |
34dc7c2f | 900 | int err = 0; |
30af21b0 PD |
901 | switch (range->type) { |
902 | case OBJECT: | |
903 | err = dump_dnode(dscp, &range->sru.object.bp, range->object, | |
904 | range->sru.object.dnp); | |
905 | return (err); | |
906 | case OBJECT_RANGE: { | |
907 | ASSERT3U(range->start_blkid + 1, ==, range->end_blkid); | |
908 | if (!(dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW)) { | |
909 | return (0); | |
b5256303 | 910 | } |
30af21b0 PD |
911 | uint64_t epb = BP_GET_LSIZE(&range->sru.object_range.bp) >> |
912 | DNODE_SHIFT; | |
913 | uint64_t firstobj = range->start_blkid * epb; | |
914 | err = dump_object_range(dscp, &range->sru.object_range.bp, | |
915 | firstobj, epb); | |
916 | break; | |
917 | } | |
918 | case REDACT: { | |
919 | struct srr *srrp = &range->sru.redact; | |
920 | err = dump_redact(dscp, range->object, range->start_blkid * | |
921 | srrp->datablksz, (range->end_blkid - range->start_blkid) * | |
922 | srrp->datablksz); | |
923 | return (err); | |
924 | } | |
925 | case DATA: { | |
926 | struct srd *srdp = &range->sru.data; | |
927 | blkptr_t *bp = &srdp->bp; | |
928 | spa_t *spa = | |
929 | dmu_objset_spa(dscp->dsc_os); | |
930 | ||
931 | ASSERT3U(srdp->datablksz, ==, BP_GET_LSIZE(bp)); | |
932 | ASSERT3U(range->start_blkid + 1, ==, range->end_blkid); | |
933 | if (BP_GET_TYPE(bp) == DMU_OT_SA) { | |
934 | arc_flags_t aflags = ARC_FLAG_WAIT; | |
935 | enum zio_flag zioflags = ZIO_FLAG_CANFAIL; | |
936 | ||
937 | if (dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW) { | |
938 | ASSERT(BP_IS_PROTECTED(bp)); | |
939 | zioflags |= ZIO_FLAG_RAW; | |
940 | } | |
b5256303 | 941 | |
30af21b0 PD |
942 | zbookmark_phys_t zb; |
943 | ASSERT3U(range->start_blkid, ==, DMU_SPILL_BLKID); | |
944 | zb.zb_objset = dmu_objset_id(dscp->dsc_os); | |
945 | zb.zb_object = range->object; | |
946 | zb.zb_level = 0; | |
947 | zb.zb_blkid = range->start_blkid; | |
948 | ||
1dc32a67 | 949 | arc_buf_t *abuf = NULL; |
30af21b0 PD |
950 | if (!dscp->dsc_dso->dso_dryrun && arc_read(NULL, spa, |
951 | bp, arc_getbuf_func, &abuf, ZIO_PRIORITY_ASYNC_READ, | |
952 | zioflags, &aflags, &zb) != 0) | |
953 | return (SET_ERROR(EIO)); | |
b5256303 | 954 | |
05d07ba9 | 955 | err = dump_spill(dscp, bp, zb.zb_object, |
956 | (abuf == NULL ? NULL : abuf->b_data)); | |
957 | if (abuf != NULL) | |
958 | arc_buf_destroy(abuf, &abuf); | |
30af21b0 | 959 | return (err); |
34dc7c2f | 960 | } |
1dc32a67 | 961 | if (send_do_embed(bp, dscp->dsc_featureflags)) { |
30af21b0 PD |
962 | err = dump_write_embedded(dscp, range->object, |
963 | range->start_blkid * srdp->datablksz, | |
964 | srdp->datablksz, bp); | |
965 | return (err); | |
b5256303 | 966 | } |
30af21b0 PD |
967 | ASSERT(range->object > dscp->dsc_resume_object || |
968 | (range->object == dscp->dsc_resume_object && | |
969 | range->start_blkid * srdp->datablksz >= | |
970 | dscp->dsc_resume_offset)); | |
fcff0f35 | 971 | /* it's a level-0 block of a regular object */ |
30af21b0 | 972 | |
1dc32a67 MA |
973 | mutex_enter(&srdp->lock); |
974 | while (srdp->io_outstanding) | |
975 | cv_wait(&srdp->cv, &srdp->lock); | |
976 | err = srdp->io_err; | |
977 | mutex_exit(&srdp->lock); | |
30af21b0 PD |
978 | |
979 | if (err != 0) { | |
980 | if (zfs_send_corrupt_data && | |
981 | !dscp->dsc_dso->dso_dryrun) { | |
1dc32a67 MA |
982 | /* |
983 | * Send a block filled with 0x"zfs badd bloc" | |
984 | */ | |
985 | srdp->abuf = arc_alloc_buf(spa, &srdp->abuf, | |
986 | ARC_BUFC_DATA, srdp->datablksz); | |
a7004725 | 987 | uint64_t *ptr; |
1dc32a67 MA |
988 | for (ptr = srdp->abuf->b_data; |
989 | (char *)ptr < (char *)srdp->abuf->b_data + | |
30af21b0 | 990 | srdp->datablksz; ptr++) |
dd26aa53 | 991 | *ptr = 0x2f5baddb10cULL; |
330d06f9 | 992 | } else { |
2e528b49 | 993 | return (SET_ERROR(EIO)); |
330d06f9 MA |
994 | } |
995 | } | |
428870ff | 996 | |
1dc32a67 MA |
997 | ASSERT(dscp->dsc_dso->dso_dryrun || |
998 | srdp->abuf != NULL || srdp->abd != NULL); | |
999 | ||
1000 | uint64_t offset = range->start_blkid * srdp->datablksz; | |
1001 | ||
1002 | char *data = NULL; | |
1003 | if (srdp->abd != NULL) { | |
1004 | data = abd_to_buf(srdp->abd); | |
1005 | ASSERT3P(srdp->abuf, ==, NULL); | |
1006 | } else if (srdp->abuf != NULL) { | |
1007 | data = srdp->abuf->b_data; | |
1008 | } | |
f1512ee6 | 1009 | |
1dc32a67 MA |
1010 | /* |
1011 | * If we have large blocks stored on disk but the send flags | |
1012 | * don't allow us to send large blocks, we split the data from | |
1013 | * the arc buf into chunks. | |
1014 | */ | |
1015 | if (srdp->datablksz > SPA_OLD_MAXBLOCKSIZE && | |
1016 | !(dscp->dsc_featureflags & | |
1017 | DMU_BACKUP_FEATURE_LARGE_BLOCKS)) { | |
30af21b0 PD |
1018 | while (srdp->datablksz > 0 && err == 0) { |
1019 | int n = MIN(srdp->datablksz, | |
1020 | SPA_OLD_MAXBLOCKSIZE); | |
177c79df | 1021 | err = dmu_dump_write(dscp, srdp->obj_type, |
37602733 PD |
1022 | range->object, offset, n, n, NULL, B_FALSE, |
1023 | data); | |
f1512ee6 | 1024 | offset += n; |
1dc32a67 MA |
1025 | /* |
1026 | * When doing dry run, data==NULL is used as a | |
1027 | * sentinel value by | |
1028 | * dmu_dump_write()->dump_record(). | |
1029 | */ | |
1030 | if (data != NULL) | |
1031 | data += n; | |
30af21b0 | 1032 | srdp->datablksz -= n; |
f1512ee6 MA |
1033 | } |
1034 | } else { | |
177c79df | 1035 | err = dmu_dump_write(dscp, srdp->obj_type, |
1dc32a67 | 1036 | range->object, offset, |
37602733 PD |
1037 | srdp->datablksz, srdp->datasz, bp, |
1038 | srdp->io_compressed, data); | |
f1512ee6 | 1039 | } |
30af21b0 | 1040 | return (err); |
34dc7c2f | 1041 | } |
30af21b0 PD |
1042 | case HOLE: { |
1043 | struct srh *srhp = &range->sru.hole; | |
1044 | if (range->object == DMU_META_DNODE_OBJECT) { | |
1045 | uint32_t span = srhp->datablksz >> DNODE_SHIFT; | |
1046 | uint64_t first_obj = range->start_blkid * span; | |
1047 | uint64_t numobj = range->end_blkid * span - first_obj; | |
1048 | return (dump_freeobjects(dscp, first_obj, numobj)); | |
1049 | } | |
1050 | uint64_t offset = 0; | |
1051 | ||
1052 | /* | |
1053 | * If this multiply overflows, we don't need to send this block. | |
1054 | * Even if it has a birth time, it can never not be a hole, so | |
1055 | * we don't need to send records for it. | |
1056 | */ | |
1057 | if (!overflow_multiply(range->start_blkid, srhp->datablksz, | |
1058 | &offset)) { | |
1059 | return (0); | |
1060 | } | |
1061 | uint64_t len = 0; | |
34dc7c2f | 1062 | |
30af21b0 PD |
1063 | if (!overflow_multiply(range->end_blkid, srhp->datablksz, &len)) |
1064 | len = UINT64_MAX; | |
1065 | len = len - offset; | |
1066 | return (dump_free(dscp, range->object, offset, len)); | |
1067 | } | |
1068 | default: | |
1069 | panic("Invalid range type in do_dump: %d", range->type); | |
1070 | } | |
34dc7c2f BB |
1071 | return (err); |
1072 | } | |
1073 | ||
177c79df | 1074 | static struct send_range * |
30af21b0 PD |
1075 | range_alloc(enum type type, uint64_t object, uint64_t start_blkid, |
1076 | uint64_t end_blkid, boolean_t eos) | |
fcff0f35 | 1077 | { |
30af21b0 PD |
1078 | struct send_range *range = kmem_alloc(sizeof (*range), KM_SLEEP); |
1079 | range->type = type; | |
1080 | range->object = object; | |
1081 | range->start_blkid = start_blkid; | |
1082 | range->end_blkid = end_blkid; | |
1083 | range->eos_marker = eos; | |
1dc32a67 MA |
1084 | if (type == DATA) { |
1085 | range->sru.data.abd = NULL; | |
1086 | range->sru.data.abuf = NULL; | |
1087 | mutex_init(&range->sru.data.lock, NULL, MUTEX_DEFAULT, NULL); | |
1088 | cv_init(&range->sru.data.cv, NULL, CV_DEFAULT, NULL); | |
1089 | range->sru.data.io_outstanding = 0; | |
1090 | range->sru.data.io_err = 0; | |
37602733 | 1091 | range->sru.data.io_compressed = B_FALSE; |
1dc32a67 | 1092 | } |
30af21b0 | 1093 | return (range); |
fcff0f35 PD |
1094 | } |
1095 | ||
1096 | /* | |
30af21b0 PD |
1097 | * This is the callback function to traverse_dataset that acts as a worker |
1098 | * thread for dmu_send_impl. | |
6f1ffb06 | 1099 | */ |
13fe0198 | 1100 | static int |
30af21b0 PD |
1101 | send_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, |
1102 | const zbookmark_phys_t *zb, const struct dnode_phys *dnp, void *arg) | |
34dc7c2f | 1103 | { |
14e4e3cb | 1104 | (void) zilog; |
30af21b0 PD |
1105 | struct send_thread_arg *sta = arg; |
1106 | struct send_range *record; | |
34dc7c2f | 1107 | |
30af21b0 PD |
1108 | ASSERT(zb->zb_object == DMU_META_DNODE_OBJECT || |
1109 | zb->zb_object >= sta->resume.zb_object); | |
34dc7c2f | 1110 | |
b5256303 | 1111 | /* |
30af21b0 PD |
1112 | * All bps of an encrypted os should have the encryption bit set. |
1113 | * If this is not true it indicates tampering and we report an error. | |
b5256303 | 1114 | */ |
0fdd6106 | 1115 | if (sta->os->os_encrypted && |
30af21b0 PD |
1116 | !BP_IS_HOLE(bp) && !BP_USES_CRYPT(bp)) { |
1117 | spa_log_error(spa, zb); | |
1118 | zfs_panic_recover("unencrypted block in encrypted " | |
0fdd6106 | 1119 | "object set %llu", dmu_objset_id(sta->os)); |
30af21b0 PD |
1120 | return (SET_ERROR(EIO)); |
1121 | } | |
a2c2ed1b | 1122 | |
30af21b0 PD |
1123 | if (sta->cancel) |
1124 | return (SET_ERROR(EINTR)); | |
1125 | if (zb->zb_object != DMU_META_DNODE_OBJECT && | |
1126 | DMU_OBJECT_IS_SPECIAL(zb->zb_object)) | |
1127 | return (0); | |
1128 | atomic_inc_64(sta->num_blocks_visited); | |
b5256303 | 1129 | |
30af21b0 PD |
1130 | if (zb->zb_level == ZB_DNODE_LEVEL) { |
1131 | if (zb->zb_object == DMU_META_DNODE_OBJECT) | |
1132 | return (0); | |
1133 | record = range_alloc(OBJECT, zb->zb_object, 0, 0, B_FALSE); | |
1134 | record->sru.object.bp = *bp; | |
1135 | size_t size = sizeof (*dnp) * (dnp->dn_extra_slots + 1); | |
1136 | record->sru.object.dnp = kmem_alloc(size, KM_SLEEP); | |
1137 | bcopy(dnp, record->sru.object.dnp, size); | |
1138 | bqueue_enqueue(&sta->q, record, sizeof (*record)); | |
1139 | return (0); | |
b5256303 | 1140 | } |
30af21b0 PD |
1141 | if (zb->zb_level == 0 && zb->zb_object == DMU_META_DNODE_OBJECT && |
1142 | !BP_IS_HOLE(bp)) { | |
1143 | record = range_alloc(OBJECT_RANGE, 0, zb->zb_blkid, | |
1144 | zb->zb_blkid + 1, B_FALSE); | |
1145 | record->sru.object_range.bp = *bp; | |
1146 | bqueue_enqueue(&sta->q, record, sizeof (*record)); | |
1147 | return (0); | |
1148 | } | |
1149 | if (zb->zb_level < 0 || (zb->zb_level > 0 && !BP_IS_HOLE(bp))) | |
1150 | return (0); | |
1151 | if (zb->zb_object == DMU_META_DNODE_OBJECT && !BP_IS_HOLE(bp)) | |
1152 | return (0); | |
b5256303 | 1153 | |
30af21b0 PD |
1154 | uint64_t span = bp_span_in_blocks(dnp->dn_indblkshift, zb->zb_level); |
1155 | uint64_t start; | |
47dfff3b | 1156 | |
30af21b0 PD |
1157 | /* |
1158 | * If this multiply overflows, we don't need to send this block. | |
1159 | * Even if it has a birth time, it can never not be a hole, so | |
1160 | * we don't need to send records for it. | |
1161 | */ | |
1162 | if (!overflow_multiply(span, zb->zb_blkid, &start) || (!(zb->zb_blkid == | |
1163 | DMU_SPILL_BLKID || DMU_OT_IS_METADATA(dnp->dn_type)) && | |
1164 | span * zb->zb_blkid > dnp->dn_maxblkid)) { | |
1165 | ASSERT(BP_IS_HOLE(bp)); | |
1166 | return (0); | |
1167 | } | |
1168 | ||
1169 | if (zb->zb_blkid == DMU_SPILL_BLKID) | |
1170 | ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_SA); | |
1171 | ||
99b281f1 PD |
1172 | enum type record_type = DATA; |
1173 | if (BP_IS_HOLE(bp)) | |
1174 | record_type = HOLE; | |
1175 | else if (BP_IS_REDACTED(bp)) | |
1176 | record_type = REDACT; | |
1177 | else | |
1178 | record_type = DATA; | |
1179 | ||
1180 | record = range_alloc(record_type, zb->zb_object, start, | |
1181 | (start + span < start ? 0 : start + span), B_FALSE); | |
30af21b0 PD |
1182 | |
1183 | uint64_t datablksz = (zb->zb_blkid == DMU_SPILL_BLKID ? | |
1184 | BP_GET_LSIZE(bp) : dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT); | |
99b281f1 | 1185 | |
30af21b0 | 1186 | if (BP_IS_HOLE(bp)) { |
30af21b0 PD |
1187 | record->sru.hole.datablksz = datablksz; |
1188 | } else if (BP_IS_REDACTED(bp)) { | |
30af21b0 PD |
1189 | record->sru.redact.datablksz = datablksz; |
1190 | } else { | |
30af21b0 PD |
1191 | record->sru.data.datablksz = datablksz; |
1192 | record->sru.data.obj_type = dnp->dn_type; | |
1193 | record->sru.data.bp = *bp; | |
1194 | } | |
99b281f1 | 1195 | |
30af21b0 PD |
1196 | bqueue_enqueue(&sta->q, record, sizeof (*record)); |
1197 | return (0); | |
1198 | } | |
1199 | ||
1200 | struct redact_list_cb_arg { | |
1201 | uint64_t *num_blocks_visited; | |
1202 | bqueue_t *q; | |
1203 | boolean_t *cancel; | |
1204 | boolean_t mark_redact; | |
1205 | }; | |
1206 | ||
1207 | static int | |
1208 | redact_list_cb(redact_block_phys_t *rb, void *arg) | |
1209 | { | |
1210 | struct redact_list_cb_arg *rlcap = arg; | |
1211 | ||
1212 | atomic_inc_64(rlcap->num_blocks_visited); | |
1213 | if (*rlcap->cancel) | |
1214 | return (-1); | |
1215 | ||
1216 | struct send_range *data = range_alloc(REDACT, rb->rbp_object, | |
1217 | rb->rbp_blkid, rb->rbp_blkid + redact_block_get_count(rb), B_FALSE); | |
1218 | ASSERT3U(data->end_blkid, >, rb->rbp_blkid); | |
1219 | if (rlcap->mark_redact) { | |
1220 | data->type = REDACT; | |
1221 | data->sru.redact.datablksz = redact_block_get_size(rb); | |
1222 | } else { | |
1223 | data->type = PREVIOUSLY_REDACTED; | |
1224 | } | |
1225 | bqueue_enqueue(rlcap->q, data, sizeof (*data)); | |
1226 | ||
1227 | return (0); | |
1228 | } | |
1229 | ||
1230 | /* | |
1231 | * This function kicks off the traverse_dataset. It also handles setting the | |
1232 | * error code of the thread in case something goes wrong, and pushes the End of | |
0fdd6106 | 1233 | * Stream record when the traverse_dataset call has finished. |
30af21b0 PD |
1234 | */ |
1235 | static void | |
1236 | send_traverse_thread(void *arg) | |
1237 | { | |
1238 | struct send_thread_arg *st_arg = arg; | |
1239 | int err = 0; | |
1240 | struct send_range *data; | |
1241 | fstrans_cookie_t cookie = spl_fstrans_mark(); | |
1242 | ||
0fdd6106 MA |
1243 | err = traverse_dataset_resume(st_arg->os->os_dsl_dataset, |
1244 | st_arg->fromtxg, &st_arg->resume, | |
1245 | st_arg->flags, send_cb, st_arg); | |
30af21b0 PD |
1246 | |
1247 | if (err != EINTR) | |
1248 | st_arg->error_code = err; | |
1249 | data = range_alloc(DATA, 0, 0, 0, B_TRUE); | |
1250 | bqueue_enqueue_flush(&st_arg->q, data, sizeof (*data)); | |
1251 | spl_fstrans_unmark(cookie); | |
1252 | thread_exit(); | |
1253 | } | |
1254 | ||
1255 | /* | |
1256 | * Utility function that causes End of Stream records to compare after of all | |
1257 | * others, so that other threads' comparison logic can stay simple. | |
1258 | */ | |
1259 | static int __attribute__((unused)) | |
1260 | send_range_after(const struct send_range *from, const struct send_range *to) | |
1261 | { | |
1262 | if (from->eos_marker == B_TRUE) | |
1263 | return (1); | |
1264 | if (to->eos_marker == B_TRUE) | |
1265 | return (-1); | |
1266 | ||
1267 | uint64_t from_obj = from->object; | |
1268 | uint64_t from_end_obj = from->object + 1; | |
1269 | uint64_t to_obj = to->object; | |
1270 | uint64_t to_end_obj = to->object + 1; | |
1271 | if (from_obj == 0) { | |
1272 | ASSERT(from->type == HOLE || from->type == OBJECT_RANGE); | |
1273 | from_obj = from->start_blkid << DNODES_PER_BLOCK_SHIFT; | |
1274 | from_end_obj = from->end_blkid << DNODES_PER_BLOCK_SHIFT; | |
1275 | } | |
1276 | if (to_obj == 0) { | |
1277 | ASSERT(to->type == HOLE || to->type == OBJECT_RANGE); | |
1278 | to_obj = to->start_blkid << DNODES_PER_BLOCK_SHIFT; | |
1279 | to_end_obj = to->end_blkid << DNODES_PER_BLOCK_SHIFT; | |
1280 | } | |
1281 | ||
1282 | if (from_end_obj <= to_obj) | |
1283 | return (-1); | |
1284 | if (from_obj >= to_end_obj) | |
1285 | return (1); | |
ca577779 | 1286 | int64_t cmp = TREE_CMP(to->type == OBJECT_RANGE, from->type == |
30af21b0 PD |
1287 | OBJECT_RANGE); |
1288 | if (unlikely(cmp)) | |
1289 | return (cmp); | |
ca577779 | 1290 | cmp = TREE_CMP(to->type == OBJECT, from->type == OBJECT); |
30af21b0 PD |
1291 | if (unlikely(cmp)) |
1292 | return (cmp); | |
1293 | if (from->end_blkid <= to->start_blkid) | |
1294 | return (-1); | |
1295 | if (from->start_blkid >= to->end_blkid) | |
1296 | return (1); | |
1297 | return (0); | |
1298 | } | |
1299 | ||
1300 | /* | |
1301 | * Pop the new data off the queue, check that the records we receive are in | |
1302 | * the right order, but do not free the old data. This is used so that the | |
1303 | * records can be sent on to the main thread without copying the data. | |
1304 | */ | |
1305 | static struct send_range * | |
1306 | get_next_range_nofree(bqueue_t *bq, struct send_range *prev) | |
1307 | { | |
1308 | struct send_range *next = bqueue_dequeue(bq); | |
1309 | ASSERT3S(send_range_after(prev, next), ==, -1); | |
1310 | return (next); | |
1311 | } | |
1312 | ||
1313 | /* | |
1314 | * Pop the new data off the queue, check that the records we receive are in | |
1315 | * the right order, and free the old data. | |
1316 | */ | |
1317 | static struct send_range * | |
1318 | get_next_range(bqueue_t *bq, struct send_range *prev) | |
1319 | { | |
1320 | struct send_range *next = get_next_range_nofree(bq, prev); | |
1321 | range_free(prev); | |
1322 | return (next); | |
1323 | } | |
1324 | ||
1325 | static void | |
1326 | redact_list_thread(void *arg) | |
1327 | { | |
1328 | struct redact_list_thread_arg *rlt_arg = arg; | |
1329 | struct send_range *record; | |
1330 | fstrans_cookie_t cookie = spl_fstrans_mark(); | |
1331 | if (rlt_arg->rl != NULL) { | |
1332 | struct redact_list_cb_arg rlcba = {0}; | |
1333 | rlcba.cancel = &rlt_arg->cancel; | |
1334 | rlcba.q = &rlt_arg->q; | |
1335 | rlcba.num_blocks_visited = rlt_arg->num_blocks_visited; | |
1336 | rlcba.mark_redact = rlt_arg->mark_redact; | |
1337 | int err = dsl_redaction_list_traverse(rlt_arg->rl, | |
1338 | &rlt_arg->resume, redact_list_cb, &rlcba); | |
1339 | if (err != EINTR) | |
1340 | rlt_arg->error_code = err; | |
1341 | } | |
1342 | record = range_alloc(DATA, 0, 0, 0, B_TRUE); | |
1343 | bqueue_enqueue_flush(&rlt_arg->q, record, sizeof (*record)); | |
1344 | spl_fstrans_unmark(cookie); | |
eeb8fae9 JL |
1345 | |
1346 | thread_exit(); | |
30af21b0 PD |
1347 | } |
1348 | ||
1349 | /* | |
1350 | * Compare the start point of the two provided ranges. End of stream ranges | |
1351 | * compare last, objects compare before any data or hole inside that object and | |
1352 | * multi-object holes that start at the same object. | |
1353 | */ | |
1354 | static int | |
1355 | send_range_start_compare(struct send_range *r1, struct send_range *r2) | |
1356 | { | |
1357 | uint64_t r1_objequiv = r1->object; | |
1358 | uint64_t r1_l0equiv = r1->start_blkid; | |
1359 | uint64_t r2_objequiv = r2->object; | |
1360 | uint64_t r2_l0equiv = r2->start_blkid; | |
ca577779 | 1361 | int64_t cmp = TREE_CMP(r1->eos_marker, r2->eos_marker); |
30af21b0 PD |
1362 | if (unlikely(cmp)) |
1363 | return (cmp); | |
1364 | if (r1->object == 0) { | |
1365 | r1_objequiv = r1->start_blkid * DNODES_PER_BLOCK; | |
1366 | r1_l0equiv = 0; | |
1367 | } | |
1368 | if (r2->object == 0) { | |
1369 | r2_objequiv = r2->start_blkid * DNODES_PER_BLOCK; | |
1370 | r2_l0equiv = 0; | |
1371 | } | |
1372 | ||
ca577779 | 1373 | cmp = TREE_CMP(r1_objequiv, r2_objequiv); |
30af21b0 PD |
1374 | if (likely(cmp)) |
1375 | return (cmp); | |
ca577779 | 1376 | cmp = TREE_CMP(r2->type == OBJECT_RANGE, r1->type == OBJECT_RANGE); |
30af21b0 PD |
1377 | if (unlikely(cmp)) |
1378 | return (cmp); | |
ca577779 | 1379 | cmp = TREE_CMP(r2->type == OBJECT, r1->type == OBJECT); |
30af21b0 PD |
1380 | if (unlikely(cmp)) |
1381 | return (cmp); | |
1382 | ||
ca577779 | 1383 | return (TREE_CMP(r1_l0equiv, r2_l0equiv)); |
30af21b0 PD |
1384 | } |
1385 | ||
1386 | enum q_idx { | |
1387 | REDACT_IDX = 0, | |
1388 | TO_IDX, | |
1389 | FROM_IDX, | |
1390 | NUM_THREADS | |
1391 | }; | |
1392 | ||
1393 | /* | |
1394 | * This function returns the next range the send_merge_thread should operate on. | |
1395 | * The inputs are two arrays; the first one stores the range at the front of the | |
1396 | * queues stored in the second one. The ranges are sorted in descending | |
1397 | * priority order; the metadata from earlier ranges overrules metadata from | |
1398 | * later ranges. out_mask is used to return which threads the ranges came from; | |
1399 | * bit i is set if ranges[i] started at the same place as the returned range. | |
1400 | * | |
1401 | * This code is not hardcoded to compare a specific number of threads; it could | |
1402 | * be used with any number, just by changing the q_idx enum. | |
1403 | * | |
1404 | * The "next range" is the one with the earliest start; if two starts are equal, | |
1405 | * the highest-priority range is the next to operate on. If a higher-priority | |
1406 | * range starts in the middle of the first range, then the first range will be | |
1407 | * truncated to end where the higher-priority range starts, and we will operate | |
1408 | * on that one next time. In this way, we make sure that each block covered by | |
1409 | * some range gets covered by a returned range, and each block covered is | |
1410 | * returned using the metadata of the highest-priority range it appears in. | |
1411 | * | |
1412 | * For example, if the three ranges at the front of the queues were [2,4), | |
1413 | * [3,5), and [1,3), then the ranges returned would be [1,2) with the metadata | |
1414 | * from the third range, [2,4) with the metadata from the first range, and then | |
1415 | * [4,5) with the metadata from the second. | |
1416 | */ | |
1417 | static struct send_range * | |
1418 | find_next_range(struct send_range **ranges, bqueue_t **qs, uint64_t *out_mask) | |
1419 | { | |
1420 | int idx = 0; // index of the range with the earliest start | |
1421 | int i; | |
1422 | uint64_t bmask = 0; | |
1423 | for (i = 1; i < NUM_THREADS; i++) { | |
1424 | if (send_range_start_compare(ranges[i], ranges[idx]) < 0) | |
1425 | idx = i; | |
1426 | } | |
1427 | if (ranges[idx]->eos_marker) { | |
1428 | struct send_range *ret = range_alloc(DATA, 0, 0, 0, B_TRUE); | |
1429 | *out_mask = 0; | |
1430 | return (ret); | |
1431 | } | |
1432 | /* | |
1433 | * Find all the ranges that start at that same point. | |
1434 | */ | |
1435 | for (i = 0; i < NUM_THREADS; i++) { | |
1436 | if (send_range_start_compare(ranges[i], ranges[idx]) == 0) | |
1437 | bmask |= 1 << i; | |
1438 | } | |
1439 | *out_mask = bmask; | |
1440 | /* | |
1441 | * OBJECT_RANGE records only come from the TO thread, and should always | |
1442 | * be treated as overlapping with nothing and sent on immediately. They | |
1443 | * are only used in raw sends, and are never redacted. | |
1444 | */ | |
1445 | if (ranges[idx]->type == OBJECT_RANGE) { | |
1446 | ASSERT3U(idx, ==, TO_IDX); | |
1447 | ASSERT3U(*out_mask, ==, 1 << TO_IDX); | |
1448 | struct send_range *ret = ranges[idx]; | |
1449 | ranges[idx] = get_next_range_nofree(qs[idx], ranges[idx]); | |
1450 | return (ret); | |
1451 | } | |
1452 | /* | |
1453 | * Find the first start or end point after the start of the first range. | |
1454 | */ | |
1455 | uint64_t first_change = ranges[idx]->end_blkid; | |
1456 | for (i = 0; i < NUM_THREADS; i++) { | |
1457 | if (i == idx || ranges[i]->eos_marker || | |
1458 | ranges[i]->object > ranges[idx]->object || | |
1459 | ranges[i]->object == DMU_META_DNODE_OBJECT) | |
1460 | continue; | |
1461 | ASSERT3U(ranges[i]->object, ==, ranges[idx]->object); | |
1462 | if (first_change > ranges[i]->start_blkid && | |
1463 | (bmask & (1 << i)) == 0) | |
1464 | first_change = ranges[i]->start_blkid; | |
1465 | else if (first_change > ranges[i]->end_blkid) | |
1466 | first_change = ranges[i]->end_blkid; | |
1467 | } | |
1468 | /* | |
1469 | * Update all ranges to no longer overlap with the range we're | |
1470 | * returning. All such ranges must start at the same place as the range | |
1471 | * being returned, and end at or after first_change. Thus we update | |
1472 | * their start to first_change. If that makes them size 0, then free | |
1473 | * them and pull a new range from that thread. | |
1474 | */ | |
1475 | for (i = 0; i < NUM_THREADS; i++) { | |
1476 | if (i == idx || (bmask & (1 << i)) == 0) | |
1477 | continue; | |
1478 | ASSERT3U(first_change, >, ranges[i]->start_blkid); | |
1479 | ranges[i]->start_blkid = first_change; | |
1480 | ASSERT3U(ranges[i]->start_blkid, <=, ranges[i]->end_blkid); | |
1481 | if (ranges[i]->start_blkid == ranges[i]->end_blkid) | |
1482 | ranges[i] = get_next_range(qs[i], ranges[i]); | |
1483 | } | |
1484 | /* | |
1485 | * Short-circuit the simple case; if the range doesn't overlap with | |
1486 | * anything else, or it only overlaps with things that start at the same | |
1487 | * place and are longer, send it on. | |
1488 | */ | |
1489 | if (first_change == ranges[idx]->end_blkid) { | |
1490 | struct send_range *ret = ranges[idx]; | |
1491 | ranges[idx] = get_next_range_nofree(qs[idx], ranges[idx]); | |
1492 | return (ret); | |
1493 | } | |
1494 | ||
1495 | /* | |
1496 | * Otherwise, return a truncated copy of ranges[idx] and move the start | |
1497 | * of ranges[idx] back to first_change. | |
1498 | */ | |
1499 | struct send_range *ret = kmem_alloc(sizeof (*ret), KM_SLEEP); | |
1500 | *ret = *ranges[idx]; | |
1501 | ret->end_blkid = first_change; | |
1502 | ranges[idx]->start_blkid = first_change; | |
1503 | return (ret); | |
1504 | } | |
1505 | ||
1506 | #define FROM_AND_REDACT_BITS ((1 << REDACT_IDX) | (1 << FROM_IDX)) | |
1507 | ||
1508 | /* | |
1509 | * Merge the results from the from thread and the to thread, and then hand the | |
1510 | * records off to send_prefetch_thread to prefetch them. If this is not a | |
1511 | * send from a redaction bookmark, the from thread will push an end of stream | |
1512 | * record and stop, and we'll just send everything that was changed in the | |
1513 | * to_ds since the ancestor's creation txg. If it is, then since | |
1514 | * traverse_dataset has a canonical order, we can compare each change as | |
1515 | * they're pulled off the queues. That will give us a stream that is | |
1516 | * appropriately sorted, and covers all records. In addition, we pull the | |
1517 | * data from the redact_list_thread and use that to determine which blocks | |
1518 | * should be redacted. | |
1519 | */ | |
1520 | static void | |
1521 | send_merge_thread(void *arg) | |
1522 | { | |
1523 | struct send_merge_thread_arg *smt_arg = arg; | |
1524 | struct send_range *front_ranges[NUM_THREADS]; | |
1525 | bqueue_t *queues[NUM_THREADS]; | |
1526 | int err = 0; | |
1527 | fstrans_cookie_t cookie = spl_fstrans_mark(); | |
1528 | ||
1529 | if (smt_arg->redact_arg == NULL) { | |
1530 | front_ranges[REDACT_IDX] = | |
1531 | kmem_zalloc(sizeof (struct send_range), KM_SLEEP); | |
1532 | front_ranges[REDACT_IDX]->eos_marker = B_TRUE; | |
1533 | front_ranges[REDACT_IDX]->type = REDACT; | |
1534 | queues[REDACT_IDX] = NULL; | |
1535 | } else { | |
1536 | front_ranges[REDACT_IDX] = | |
1537 | bqueue_dequeue(&smt_arg->redact_arg->q); | |
1538 | queues[REDACT_IDX] = &smt_arg->redact_arg->q; | |
1539 | } | |
1540 | front_ranges[TO_IDX] = bqueue_dequeue(&smt_arg->to_arg->q); | |
1541 | queues[TO_IDX] = &smt_arg->to_arg->q; | |
1542 | front_ranges[FROM_IDX] = bqueue_dequeue(&smt_arg->from_arg->q); | |
1543 | queues[FROM_IDX] = &smt_arg->from_arg->q; | |
1544 | uint64_t mask = 0; | |
1545 | struct send_range *range; | |
1546 | for (range = find_next_range(front_ranges, queues, &mask); | |
1547 | !range->eos_marker && err == 0 && !smt_arg->cancel; | |
1548 | range = find_next_range(front_ranges, queues, &mask)) { | |
1549 | /* | |
1550 | * If the range in question was in both the from redact bookmark | |
1551 | * and the bookmark we're using to redact, then don't send it. | |
1552 | * It's already redacted on the receiving system, so a redaction | |
1553 | * record would be redundant. | |
1554 | */ | |
1555 | if ((mask & FROM_AND_REDACT_BITS) == FROM_AND_REDACT_BITS) { | |
1556 | ASSERT3U(range->type, ==, REDACT); | |
1557 | range_free(range); | |
1558 | continue; | |
1559 | } | |
1560 | bqueue_enqueue(&smt_arg->q, range, sizeof (*range)); | |
1561 | ||
1562 | if (smt_arg->to_arg->error_code != 0) { | |
1563 | err = smt_arg->to_arg->error_code; | |
1564 | } else if (smt_arg->from_arg->error_code != 0) { | |
1565 | err = smt_arg->from_arg->error_code; | |
1566 | } else if (smt_arg->redact_arg != NULL && | |
1567 | smt_arg->redact_arg->error_code != 0) { | |
1568 | err = smt_arg->redact_arg->error_code; | |
1569 | } | |
1570 | } | |
1571 | if (smt_arg->cancel && err == 0) | |
1572 | err = SET_ERROR(EINTR); | |
1573 | smt_arg->error = err; | |
1574 | if (smt_arg->error != 0) { | |
1575 | smt_arg->to_arg->cancel = B_TRUE; | |
1576 | smt_arg->from_arg->cancel = B_TRUE; | |
1577 | if (smt_arg->redact_arg != NULL) | |
1578 | smt_arg->redact_arg->cancel = B_TRUE; | |
1579 | } | |
1580 | for (int i = 0; i < NUM_THREADS; i++) { | |
1581 | while (!front_ranges[i]->eos_marker) { | |
1582 | front_ranges[i] = get_next_range(queues[i], | |
1583 | front_ranges[i]); | |
1584 | } | |
1585 | range_free(front_ranges[i]); | |
1586 | } | |
1587 | if (range == NULL) | |
1588 | range = kmem_zalloc(sizeof (*range), KM_SLEEP); | |
1589 | range->eos_marker = B_TRUE; | |
1590 | bqueue_enqueue_flush(&smt_arg->q, range, 1); | |
1591 | spl_fstrans_unmark(cookie); | |
1592 | thread_exit(); | |
1593 | } | |
1594 | ||
1dc32a67 | 1595 | struct send_reader_thread_arg { |
30af21b0 PD |
1596 | struct send_merge_thread_arg *smta; |
1597 | bqueue_t q; | |
1598 | boolean_t cancel; | |
1dc32a67 MA |
1599 | boolean_t issue_reads; |
1600 | uint64_t featureflags; | |
30af21b0 PD |
1601 | int error; |
1602 | }; | |
1603 | ||
1dc32a67 MA |
1604 | static void |
1605 | dmu_send_read_done(zio_t *zio) | |
1606 | { | |
1607 | struct send_range *range = zio->io_private; | |
1608 | ||
1609 | mutex_enter(&range->sru.data.lock); | |
1610 | if (zio->io_error != 0) { | |
1611 | abd_free(range->sru.data.abd); | |
1612 | range->sru.data.abd = NULL; | |
1613 | range->sru.data.io_err = zio->io_error; | |
1614 | } | |
1615 | ||
1616 | ASSERT(range->sru.data.io_outstanding); | |
1617 | range->sru.data.io_outstanding = B_FALSE; | |
1618 | cv_broadcast(&range->sru.data.cv); | |
1619 | mutex_exit(&range->sru.data.lock); | |
1620 | } | |
1621 | ||
1622 | static void | |
1623 | issue_data_read(struct send_reader_thread_arg *srta, struct send_range *range) | |
1624 | { | |
1625 | struct srd *srdp = &range->sru.data; | |
1626 | blkptr_t *bp = &srdp->bp; | |
1627 | objset_t *os = srta->smta->os; | |
1628 | ||
1629 | ASSERT3U(range->type, ==, DATA); | |
1630 | ASSERT3U(range->start_blkid + 1, ==, range->end_blkid); | |
1631 | /* | |
1632 | * If we have large blocks stored on disk but | |
1633 | * the send flags don't allow us to send large | |
1634 | * blocks, we split the data from the arc buf | |
1635 | * into chunks. | |
1636 | */ | |
1637 | boolean_t split_large_blocks = | |
1638 | srdp->datablksz > SPA_OLD_MAXBLOCKSIZE && | |
1639 | !(srta->featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS); | |
1640 | /* | |
1641 | * We should only request compressed data from the ARC if all | |
1642 | * the following are true: | |
1643 | * - stream compression was requested | |
1644 | * - we aren't splitting large blocks into smaller chunks | |
1645 | * - the data won't need to be byteswapped before sending | |
1646 | * - this isn't an embedded block | |
1647 | * - this isn't metadata (if receiving on a different endian | |
1648 | * system it can be byteswapped more easily) | |
1649 | */ | |
1650 | boolean_t request_compressed = | |
1651 | (srta->featureflags & DMU_BACKUP_FEATURE_COMPRESSED) && | |
1652 | !split_large_blocks && !BP_SHOULD_BYTESWAP(bp) && | |
1653 | !BP_IS_EMBEDDED(bp) && !DMU_OT_IS_METADATA(BP_GET_TYPE(bp)); | |
1654 | ||
1655 | enum zio_flag zioflags = ZIO_FLAG_CANFAIL; | |
1656 | ||
37602733 | 1657 | if (srta->featureflags & DMU_BACKUP_FEATURE_RAW) { |
1dc32a67 | 1658 | zioflags |= ZIO_FLAG_RAW; |
37602733 PD |
1659 | srdp->io_compressed = B_TRUE; |
1660 | } else if (request_compressed) { | |
1dc32a67 | 1661 | zioflags |= ZIO_FLAG_RAW_COMPRESS; |
37602733 PD |
1662 | srdp->io_compressed = B_TRUE; |
1663 | } | |
1dc32a67 MA |
1664 | |
1665 | srdp->datasz = (zioflags & ZIO_FLAG_RAW_COMPRESS) ? | |
1666 | BP_GET_PSIZE(bp) : BP_GET_LSIZE(bp); | |
1667 | ||
1668 | if (!srta->issue_reads) | |
1669 | return; | |
1670 | if (BP_IS_REDACTED(bp)) | |
1671 | return; | |
1672 | if (send_do_embed(bp, srta->featureflags)) | |
1673 | return; | |
1674 | ||
1675 | zbookmark_phys_t zb = { | |
1676 | .zb_objset = dmu_objset_id(os), | |
1677 | .zb_object = range->object, | |
1678 | .zb_level = 0, | |
1679 | .zb_blkid = range->start_blkid, | |
1680 | }; | |
1681 | ||
1682 | arc_flags_t aflags = ARC_FLAG_CACHED_ONLY; | |
1683 | ||
1684 | int arc_err = arc_read(NULL, os->os_spa, bp, | |
1685 | arc_getbuf_func, &srdp->abuf, ZIO_PRIORITY_ASYNC_READ, | |
1686 | zioflags, &aflags, &zb); | |
1687 | /* | |
1688 | * If the data is not already cached in the ARC, we read directly | |
1689 | * from zio. This avoids the performance overhead of adding a new | |
1690 | * entry to the ARC, and we also avoid polluting the ARC cache with | |
1691 | * data that is not likely to be used in the future. | |
1692 | */ | |
1693 | if (arc_err != 0) { | |
1694 | srdp->abd = abd_alloc_linear(srdp->datasz, B_FALSE); | |
1695 | srdp->io_outstanding = B_TRUE; | |
1696 | zio_nowait(zio_read(NULL, os->os_spa, bp, srdp->abd, | |
1697 | srdp->datasz, dmu_send_read_done, range, | |
1698 | ZIO_PRIORITY_ASYNC_READ, zioflags, &zb)); | |
1699 | } | |
1700 | } | |
1701 | ||
30af21b0 PD |
1702 | /* |
1703 | * Create a new record with the given values. | |
1704 | */ | |
1705 | static void | |
1dc32a67 | 1706 | enqueue_range(struct send_reader_thread_arg *srta, bqueue_t *q, dnode_t *dn, |
30af21b0 PD |
1707 | uint64_t blkid, uint64_t count, const blkptr_t *bp, uint32_t datablksz) |
1708 | { | |
1709 | enum type range_type = (bp == NULL || BP_IS_HOLE(bp) ? HOLE : | |
1710 | (BP_IS_REDACTED(bp) ? REDACT : DATA)); | |
1711 | ||
1712 | struct send_range *range = range_alloc(range_type, dn->dn_object, | |
1713 | blkid, blkid + count, B_FALSE); | |
1714 | ||
1715 | if (blkid == DMU_SPILL_BLKID) | |
1716 | ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_SA); | |
1717 | ||
1718 | switch (range_type) { | |
1719 | case HOLE: | |
1720 | range->sru.hole.datablksz = datablksz; | |
1721 | break; | |
1722 | case DATA: | |
1723 | ASSERT3U(count, ==, 1); | |
1724 | range->sru.data.datablksz = datablksz; | |
1725 | range->sru.data.obj_type = dn->dn_type; | |
1726 | range->sru.data.bp = *bp; | |
1dc32a67 | 1727 | issue_data_read(srta, range); |
30af21b0 PD |
1728 | break; |
1729 | case REDACT: | |
1730 | range->sru.redact.datablksz = datablksz; | |
1731 | break; | |
1732 | default: | |
1733 | break; | |
1734 | } | |
1735 | bqueue_enqueue(q, range, datablksz); | |
1736 | } | |
1737 | ||
1738 | /* | |
1739 | * This thread is responsible for two things: First, it retrieves the correct | |
1740 | * blkptr in the to ds if we need to send the data because of something from | |
1741 | * the from thread. As a result of this, we're the first ones to discover that | |
1742 | * some indirect blocks can be discarded because they're not holes. Second, | |
1743 | * it issues prefetches for the data we need to send. | |
1744 | */ | |
1745 | static void | |
1dc32a67 | 1746 | send_reader_thread(void *arg) |
30af21b0 | 1747 | { |
1dc32a67 MA |
1748 | struct send_reader_thread_arg *srta = arg; |
1749 | struct send_merge_thread_arg *smta = srta->smta; | |
30af21b0 | 1750 | bqueue_t *inq = &smta->q; |
1dc32a67 | 1751 | bqueue_t *outq = &srta->q; |
30af21b0 PD |
1752 | objset_t *os = smta->os; |
1753 | fstrans_cookie_t cookie = spl_fstrans_mark(); | |
1754 | struct send_range *range = bqueue_dequeue(inq); | |
1755 | int err = 0; | |
1756 | ||
1757 | /* | |
1758 | * If the record we're analyzing is from a redaction bookmark from the | |
1759 | * fromds, then we need to know whether or not it exists in the tods so | |
1760 | * we know whether to create records for it or not. If it does, we need | |
1761 | * the datablksz so we can generate an appropriate record for it. | |
1762 | * Finally, if it isn't redacted, we need the blkptr so that we can send | |
1763 | * a WRITE record containing the actual data. | |
1764 | */ | |
1765 | uint64_t last_obj = UINT64_MAX; | |
1766 | uint64_t last_obj_exists = B_TRUE; | |
1dc32a67 | 1767 | while (!range->eos_marker && !srta->cancel && smta->error == 0 && |
30af21b0 PD |
1768 | err == 0) { |
1769 | switch (range->type) { | |
1dc32a67 MA |
1770 | case DATA: |
1771 | issue_data_read(srta, range); | |
30af21b0 PD |
1772 | bqueue_enqueue(outq, range, range->sru.data.datablksz); |
1773 | range = get_next_range_nofree(inq, range); | |
1774 | break; | |
30af21b0 PD |
1775 | case HOLE: |
1776 | case OBJECT: | |
1777 | case OBJECT_RANGE: | |
1778 | case REDACT: // Redacted blocks must exist | |
1779 | bqueue_enqueue(outq, range, sizeof (*range)); | |
1780 | range = get_next_range_nofree(inq, range); | |
1781 | break; | |
1782 | case PREVIOUSLY_REDACTED: { | |
1783 | /* | |
1784 | * This entry came from the "from bookmark" when | |
1785 | * sending from a bookmark that has a redaction | |
1786 | * list. We need to check if this object/blkid | |
1787 | * exists in the target ("to") dataset, and if | |
1788 | * not then we drop this entry. We also need | |
1789 | * to fill in the block pointer so that we know | |
1790 | * what to prefetch. | |
1791 | * | |
1792 | * To accomplish the above, we first cache whether or | |
1793 | * not the last object we examined exists. If it | |
1794 | * doesn't, we can drop this record. If it does, we hold | |
1795 | * the dnode and use it to call dbuf_dnode_findbp. We do | |
1796 | * this instead of dbuf_bookmark_findbp because we will | |
1797 | * often operate on large ranges, and holding the dnode | |
1798 | * once is more efficient. | |
1799 | */ | |
1800 | boolean_t object_exists = B_TRUE; | |
1801 | /* | |
1802 | * If the data is redacted, we only care if it exists, | |
1803 | * so that we don't send records for objects that have | |
1804 | * been deleted. | |
1805 | */ | |
1806 | dnode_t *dn; | |
1807 | if (range->object == last_obj && !last_obj_exists) { | |
1808 | /* | |
1809 | * If we're still examining the same object as | |
1810 | * previously, and it doesn't exist, we don't | |
1811 | * need to call dbuf_bookmark_findbp. | |
1812 | */ | |
1813 | object_exists = B_FALSE; | |
1814 | } else { | |
1815 | err = dnode_hold(os, range->object, FTAG, &dn); | |
1816 | if (err == ENOENT) { | |
1817 | object_exists = B_FALSE; | |
1818 | err = 0; | |
1819 | } | |
1820 | last_obj = range->object; | |
1821 | last_obj_exists = object_exists; | |
1822 | } | |
1823 | ||
1824 | if (err != 0) { | |
1825 | break; | |
1826 | } else if (!object_exists) { | |
1827 | /* | |
1828 | * The block was modified, but doesn't | |
1829 | * exist in the to dataset; if it was | |
1830 | * deleted in the to dataset, then we'll | |
1831 | * visit the hole bp for it at some point. | |
1832 | */ | |
1833 | range = get_next_range(inq, range); | |
1834 | continue; | |
1835 | } | |
1836 | uint64_t file_max = | |
1837 | (dn->dn_maxblkid < range->end_blkid ? | |
1838 | dn->dn_maxblkid : range->end_blkid); | |
1839 | /* | |
1840 | * The object exists, so we need to try to find the | |
1841 | * blkptr for each block in the range we're processing. | |
1842 | */ | |
1843 | rw_enter(&dn->dn_struct_rwlock, RW_READER); | |
1844 | for (uint64_t blkid = range->start_blkid; | |
1845 | blkid < file_max; blkid++) { | |
1846 | blkptr_t bp; | |
1847 | uint32_t datablksz = | |
1848 | dn->dn_phys->dn_datablkszsec << | |
1849 | SPA_MINBLOCKSHIFT; | |
1850 | uint64_t offset = blkid * datablksz; | |
1851 | /* | |
1852 | * This call finds the next non-hole block in | |
1853 | * the object. This is to prevent a | |
1854 | * performance problem where we're unredacting | |
1855 | * a large hole. Using dnode_next_offset to | |
1856 | * skip over the large hole avoids iterating | |
1857 | * over every block in it. | |
1858 | */ | |
1859 | err = dnode_next_offset(dn, DNODE_FIND_HAVELOCK, | |
1860 | &offset, 1, 1, 0); | |
1861 | if (err == ESRCH) { | |
1862 | offset = UINT64_MAX; | |
1863 | err = 0; | |
1864 | } else if (err != 0) { | |
1865 | break; | |
1866 | } | |
1867 | if (offset != blkid * datablksz) { | |
1868 | /* | |
1869 | * if there is a hole from here | |
1870 | * (blkid) to offset | |
1871 | */ | |
1872 | offset = MIN(offset, file_max * | |
1873 | datablksz); | |
1874 | uint64_t nblks = (offset / datablksz) - | |
1875 | blkid; | |
1dc32a67 | 1876 | enqueue_range(srta, outq, dn, blkid, |
30af21b0 PD |
1877 | nblks, NULL, datablksz); |
1878 | blkid += nblks; | |
1879 | } | |
1880 | if (blkid >= file_max) | |
1881 | break; | |
1882 | err = dbuf_dnode_findbp(dn, 0, blkid, &bp, | |
1883 | NULL, NULL); | |
1884 | if (err != 0) | |
1885 | break; | |
1886 | ASSERT(!BP_IS_HOLE(&bp)); | |
1dc32a67 | 1887 | enqueue_range(srta, outq, dn, blkid, 1, &bp, |
30af21b0 PD |
1888 | datablksz); |
1889 | } | |
1890 | rw_exit(&dn->dn_struct_rwlock); | |
1891 | dnode_rele(dn, FTAG); | |
1892 | range = get_next_range(inq, range); | |
37abac6d | 1893 | } |
428870ff BB |
1894 | } |
1895 | } | |
1dc32a67 | 1896 | if (srta->cancel || err != 0) { |
30af21b0 | 1897 | smta->cancel = B_TRUE; |
1dc32a67 | 1898 | srta->error = err; |
30af21b0 | 1899 | } else if (smta->error != 0) { |
1dc32a67 | 1900 | srta->error = smta->error; |
30af21b0 PD |
1901 | } |
1902 | while (!range->eos_marker) | |
1903 | range = get_next_range(inq, range); | |
1904 | ||
1905 | bqueue_enqueue_flush(outq, range, 1); | |
1906 | spl_fstrans_unmark(cookie); | |
1907 | thread_exit(); | |
1908 | } | |
1909 | ||
1910 | #define NUM_SNAPS_NOT_REDACTED UINT64_MAX | |
1911 | ||
1912 | struct dmu_send_params { | |
1913 | /* Pool args */ | |
1914 | void *tag; // Tag that dp was held with, will be used to release dp. | |
1915 | dsl_pool_t *dp; | |
1916 | /* To snapshot args */ | |
1917 | const char *tosnap; | |
1918 | dsl_dataset_t *to_ds; | |
1919 | /* From snapshot args */ | |
1920 | zfs_bookmark_phys_t ancestor_zb; | |
1921 | uint64_t *fromredactsnaps; | |
1922 | /* NUM_SNAPS_NOT_REDACTED if not sending from redaction bookmark */ | |
1923 | uint64_t numfromredactsnaps; | |
1924 | /* Stream params */ | |
1925 | boolean_t is_clone; | |
1926 | boolean_t embedok; | |
1927 | boolean_t large_block_ok; | |
1928 | boolean_t compressok; | |
ba0ba69e TC |
1929 | boolean_t rawok; |
1930 | boolean_t savedok; | |
30af21b0 PD |
1931 | uint64_t resumeobj; |
1932 | uint64_t resumeoff; | |
ba0ba69e | 1933 | uint64_t saved_guid; |
30af21b0 PD |
1934 | zfs_bookmark_phys_t *redactbook; |
1935 | /* Stream output params */ | |
1936 | dmu_send_outparams_t *dso; | |
1937 | ||
1938 | /* Stream progress params */ | |
1939 | offset_t *off; | |
1940 | int outfd; | |
ba0ba69e | 1941 | char saved_toname[MAXNAMELEN]; |
30af21b0 PD |
1942 | }; |
1943 | ||
1944 | static int | |
1945 | setup_featureflags(struct dmu_send_params *dspp, objset_t *os, | |
1946 | uint64_t *featureflags) | |
1947 | { | |
1948 | dsl_dataset_t *to_ds = dspp->to_ds; | |
1949 | dsl_pool_t *dp = dspp->dp; | |
1950 | #ifdef _KERNEL | |
1951 | if (dmu_objset_type(os) == DMU_OST_ZFS) { | |
1952 | uint64_t version; | |
1953 | if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &version) != 0) | |
1954 | return (SET_ERROR(EINVAL)); | |
1955 | ||
1956 | if (version >= ZPL_VERSION_SA) | |
1957 | *featureflags |= DMU_BACKUP_FEATURE_SA_SPILL; | |
1958 | } | |
428870ff BB |
1959 | #endif |
1960 | ||
b5256303 | 1961 | /* raw sends imply large_block_ok */ |
30af21b0 PD |
1962 | if ((dspp->rawok || dspp->large_block_ok) && |
1963 | dsl_dataset_feature_is_active(to_ds, SPA_FEATURE_LARGE_BLOCKS)) { | |
1964 | *featureflags |= DMU_BACKUP_FEATURE_LARGE_BLOCKS; | |
1965 | } | |
b5256303 TC |
1966 | |
1967 | /* encrypted datasets will not have embedded blocks */ | |
30af21b0 | 1968 | if ((dspp->embedok || dspp->rawok) && !os->os_encrypted && |
9b67f605 | 1969 | spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) { |
30af21b0 | 1970 | *featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA; |
2aa34383 | 1971 | } |
b5256303 TC |
1972 | |
1973 | /* raw send implies compressok */ | |
30af21b0 PD |
1974 | if (dspp->compressok || dspp->rawok) |
1975 | *featureflags |= DMU_BACKUP_FEATURE_COMPRESSED; | |
10b3c7f5 | 1976 | |
30af21b0 PD |
1977 | if (dspp->rawok && os->os_encrypted) |
1978 | *featureflags |= DMU_BACKUP_FEATURE_RAW; | |
b5256303 | 1979 | |
30af21b0 | 1980 | if ((*featureflags & |
b5256303 TC |
1981 | (DMU_BACKUP_FEATURE_EMBED_DATA | DMU_BACKUP_FEATURE_COMPRESSED | |
1982 | DMU_BACKUP_FEATURE_RAW)) != 0 && | |
1983 | spa_feature_is_active(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS)) { | |
30af21b0 | 1984 | *featureflags |= DMU_BACKUP_FEATURE_LZ4; |
9b67f605 MA |
1985 | } |
1986 | ||
10b3c7f5 MN |
1987 | /* |
1988 | * We specifically do not include DMU_BACKUP_FEATURE_EMBED_DATA here to | |
1989 | * allow sending ZSTD compressed datasets to a receiver that does not | |
1990 | * support ZSTD | |
1991 | */ | |
1992 | if ((*featureflags & | |
1993 | (DMU_BACKUP_FEATURE_COMPRESSED | DMU_BACKUP_FEATURE_RAW)) != 0 && | |
1994 | dsl_dataset_feature_is_active(to_ds, SPA_FEATURE_ZSTD_COMPRESS)) { | |
1995 | *featureflags |= DMU_BACKUP_FEATURE_ZSTD; | |
1996 | } | |
1997 | ||
30af21b0 PD |
1998 | if (dspp->resumeobj != 0 || dspp->resumeoff != 0) { |
1999 | *featureflags |= DMU_BACKUP_FEATURE_RESUMING; | |
47dfff3b MA |
2000 | } |
2001 | ||
30af21b0 PD |
2002 | if (dspp->redactbook != NULL) { |
2003 | *featureflags |= DMU_BACKUP_FEATURE_REDACTED; | |
2004 | } | |
9b67f605 | 2005 | |
30af21b0 PD |
2006 | if (dsl_dataset_feature_is_active(to_ds, SPA_FEATURE_LARGE_DNODE)) { |
2007 | *featureflags |= DMU_BACKUP_FEATURE_LARGE_DNODE; | |
2008 | } | |
2009 | return (0); | |
2010 | } | |
34dc7c2f | 2011 | |
30af21b0 PD |
2012 | static dmu_replay_record_t * |
2013 | create_begin_record(struct dmu_send_params *dspp, objset_t *os, | |
2014 | uint64_t featureflags) | |
2015 | { | |
2016 | dmu_replay_record_t *drr = kmem_zalloc(sizeof (dmu_replay_record_t), | |
2017 | KM_SLEEP); | |
2018 | drr->drr_type = DRR_BEGIN; | |
2019 | ||
2020 | struct drr_begin *drrb = &drr->drr_u.drr_begin; | |
2021 | dsl_dataset_t *to_ds = dspp->to_ds; | |
2022 | ||
2023 | drrb->drr_magic = DMU_BACKUP_MAGIC; | |
2024 | drrb->drr_creation_time = dsl_dataset_phys(to_ds)->ds_creation_time; | |
2025 | drrb->drr_type = dmu_objset_type(os); | |
2026 | drrb->drr_toguid = dsl_dataset_phys(to_ds)->ds_guid; | |
2027 | drrb->drr_fromguid = dspp->ancestor_zb.zbm_guid; | |
2028 | ||
2029 | DMU_SET_STREAM_HDRTYPE(drrb->drr_versioninfo, DMU_SUBSTREAM); | |
2030 | DMU_SET_FEATUREFLAGS(drrb->drr_versioninfo, featureflags); | |
2031 | ||
2032 | if (dspp->is_clone) | |
2033 | drrb->drr_flags |= DRR_FLAG_CLONE; | |
2034 | if (dsl_dataset_phys(dspp->to_ds)->ds_flags & DS_FLAG_CI_DATASET) | |
2035 | drrb->drr_flags |= DRR_FLAG_CI_DATA; | |
2036 | if (zfs_send_set_freerecords_bit) | |
2037 | drrb->drr_flags |= DRR_FLAG_FREERECORDS; | |
caf9dd20 BB |
2038 | drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_SPILL_BLOCK; |
2039 | ||
ba0ba69e TC |
2040 | if (dspp->savedok) { |
2041 | drrb->drr_toguid = dspp->saved_guid; | |
c9e319fa JL |
2042 | strlcpy(drrb->drr_toname, dspp->saved_toname, |
2043 | sizeof (drrb->drr_toname)); | |
ba0ba69e TC |
2044 | } else { |
2045 | dsl_dataset_name(to_ds, drrb->drr_toname); | |
2046 | if (!to_ds->ds_is_snapshot) { | |
2047 | (void) strlcat(drrb->drr_toname, "@--head--", | |
2048 | sizeof (drrb->drr_toname)); | |
2049 | } | |
13fe0198 | 2050 | } |
30af21b0 PD |
2051 | return (drr); |
2052 | } | |
34dc7c2f | 2053 | |
30af21b0 | 2054 | static void |
0fdd6106 | 2055 | setup_to_thread(struct send_thread_arg *to_arg, objset_t *to_os, |
30af21b0 PD |
2056 | dmu_sendstatus_t *dssp, uint64_t fromtxg, boolean_t rawok) |
2057 | { | |
2058 | VERIFY0(bqueue_init(&to_arg->q, zfs_send_no_prefetch_queue_ff, | |
2059 | MAX(zfs_send_no_prefetch_queue_length, 2 * zfs_max_recordsize), | |
2060 | offsetof(struct send_range, ln))); | |
2061 | to_arg->error_code = 0; | |
2062 | to_arg->cancel = B_FALSE; | |
0fdd6106 | 2063 | to_arg->os = to_os; |
30af21b0 PD |
2064 | to_arg->fromtxg = fromtxg; |
2065 | to_arg->flags = TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA; | |
2066 | if (rawok) | |
2067 | to_arg->flags |= TRAVERSE_NO_DECRYPT; | |
a68e4b59 AJ |
2068 | if (zfs_send_corrupt_data) |
2069 | to_arg->flags |= TRAVERSE_HARD; | |
30af21b0 PD |
2070 | to_arg->num_blocks_visited = &dssp->dss_blocks; |
2071 | (void) thread_create(NULL, 0, send_traverse_thread, to_arg, 0, | |
2072 | curproc, TS_RUN, minclsyspri); | |
2073 | } | |
37abac6d | 2074 | |
30af21b0 PD |
2075 | static void |
2076 | setup_from_thread(struct redact_list_thread_arg *from_arg, | |
2077 | redaction_list_t *from_rl, dmu_sendstatus_t *dssp) | |
2078 | { | |
2079 | VERIFY0(bqueue_init(&from_arg->q, zfs_send_no_prefetch_queue_ff, | |
2080 | MAX(zfs_send_no_prefetch_queue_length, 2 * zfs_max_recordsize), | |
2081 | offsetof(struct send_range, ln))); | |
2082 | from_arg->error_code = 0; | |
2083 | from_arg->cancel = B_FALSE; | |
2084 | from_arg->rl = from_rl; | |
2085 | from_arg->mark_redact = B_FALSE; | |
2086 | from_arg->num_blocks_visited = &dssp->dss_blocks; | |
2087 | /* | |
2088 | * If from_ds is null, send_traverse_thread just returns success and | |
2089 | * enqueues an eos marker. | |
2090 | */ | |
2091 | (void) thread_create(NULL, 0, redact_list_thread, from_arg, 0, | |
2092 | curproc, TS_RUN, minclsyspri); | |
2093 | } | |
37abac6d | 2094 | |
30af21b0 PD |
2095 | static void |
2096 | setup_redact_list_thread(struct redact_list_thread_arg *rlt_arg, | |
2097 | struct dmu_send_params *dspp, redaction_list_t *rl, dmu_sendstatus_t *dssp) | |
2098 | { | |
2099 | if (dspp->redactbook == NULL) | |
2100 | return; | |
2101 | ||
2102 | rlt_arg->cancel = B_FALSE; | |
2103 | VERIFY0(bqueue_init(&rlt_arg->q, zfs_send_no_prefetch_queue_ff, | |
2104 | MAX(zfs_send_no_prefetch_queue_length, 2 * zfs_max_recordsize), | |
2105 | offsetof(struct send_range, ln))); | |
2106 | rlt_arg->error_code = 0; | |
2107 | rlt_arg->mark_redact = B_TRUE; | |
2108 | rlt_arg->rl = rl; | |
2109 | rlt_arg->num_blocks_visited = &dssp->dss_blocks; | |
2110 | ||
2111 | (void) thread_create(NULL, 0, redact_list_thread, rlt_arg, 0, | |
2112 | curproc, TS_RUN, minclsyspri); | |
2113 | } | |
37abac6d | 2114 | |
30af21b0 PD |
2115 | static void |
2116 | setup_merge_thread(struct send_merge_thread_arg *smt_arg, | |
2117 | struct dmu_send_params *dspp, struct redact_list_thread_arg *from_arg, | |
2118 | struct send_thread_arg *to_arg, struct redact_list_thread_arg *rlt_arg, | |
2119 | objset_t *os) | |
2120 | { | |
2121 | VERIFY0(bqueue_init(&smt_arg->q, zfs_send_no_prefetch_queue_ff, | |
2122 | MAX(zfs_send_no_prefetch_queue_length, 2 * zfs_max_recordsize), | |
2123 | offsetof(struct send_range, ln))); | |
2124 | smt_arg->cancel = B_FALSE; | |
2125 | smt_arg->error = 0; | |
2126 | smt_arg->from_arg = from_arg; | |
2127 | smt_arg->to_arg = to_arg; | |
2128 | if (dspp->redactbook != NULL) | |
2129 | smt_arg->redact_arg = rlt_arg; | |
2130 | ||
2131 | smt_arg->os = os; | |
2132 | (void) thread_create(NULL, 0, send_merge_thread, smt_arg, 0, curproc, | |
2133 | TS_RUN, minclsyspri); | |
2134 | } | |
7ec09286 | 2135 | |
30af21b0 | 2136 | static void |
1dc32a67 MA |
2137 | setup_reader_thread(struct send_reader_thread_arg *srt_arg, |
2138 | struct dmu_send_params *dspp, struct send_merge_thread_arg *smt_arg, | |
2139 | uint64_t featureflags) | |
30af21b0 | 2140 | { |
1dc32a67 | 2141 | VERIFY0(bqueue_init(&srt_arg->q, zfs_send_queue_ff, |
30af21b0 PD |
2142 | MAX(zfs_send_queue_length, 2 * zfs_max_recordsize), |
2143 | offsetof(struct send_range, ln))); | |
1dc32a67 MA |
2144 | srt_arg->smta = smt_arg; |
2145 | srt_arg->issue_reads = !dspp->dso->dso_dryrun; | |
2146 | srt_arg->featureflags = featureflags; | |
2147 | (void) thread_create(NULL, 0, send_reader_thread, srt_arg, 0, | |
30af21b0 PD |
2148 | curproc, TS_RUN, minclsyspri); |
2149 | } | |
b5256303 | 2150 | |
30af21b0 PD |
2151 | static int |
2152 | setup_resume_points(struct dmu_send_params *dspp, | |
2153 | struct send_thread_arg *to_arg, struct redact_list_thread_arg *from_arg, | |
2154 | struct redact_list_thread_arg *rlt_arg, | |
2155 | struct send_merge_thread_arg *smt_arg, boolean_t resuming, objset_t *os, | |
2156 | redaction_list_t *redact_rl, nvlist_t *nvl) | |
2157 | { | |
14e4e3cb | 2158 | (void) smt_arg; |
30af21b0 PD |
2159 | dsl_dataset_t *to_ds = dspp->to_ds; |
2160 | int err = 0; | |
2161 | ||
2162 | uint64_t obj = 0; | |
2163 | uint64_t blkid = 0; | |
2164 | if (resuming) { | |
2165 | obj = dspp->resumeobj; | |
2166 | dmu_object_info_t to_doi; | |
2167 | err = dmu_object_info(os, obj, &to_doi); | |
2168 | if (err != 0) | |
2169 | return (err); | |
2170 | ||
2171 | blkid = dspp->resumeoff / to_doi.doi_data_block_size; | |
2172 | } | |
2173 | /* | |
2174 | * If we're resuming a redacted send, we can skip to the appropriate | |
2175 | * point in the redaction bookmark by binary searching through it. | |
2176 | */ | |
30af21b0 PD |
2177 | if (redact_rl != NULL) { |
2178 | SET_BOOKMARK(&rlt_arg->resume, to_ds->ds_object, obj, 0, blkid); | |
2179 | } | |
2180 | ||
2181 | SET_BOOKMARK(&to_arg->resume, to_ds->ds_object, obj, 0, blkid); | |
2182 | if (nvlist_exists(nvl, BEGINNV_REDACT_FROM_SNAPS)) { | |
2183 | uint64_t objset = dspp->ancestor_zb.zbm_redaction_obj; | |
2184 | /* | |
2185 | * Note: If the resume point is in an object whose | |
2186 | * blocksize is different in the from vs to snapshots, | |
2187 | * we will have divided by the "wrong" blocksize. | |
2188 | * However, in this case fromsnap's send_cb() will | |
2189 | * detect that the blocksize has changed and therefore | |
2190 | * ignore this object. | |
2191 | * | |
2192 | * If we're resuming a send from a redaction bookmark, | |
2193 | * we still cannot accidentally suggest blocks behind | |
2194 | * the to_ds. In addition, we know that any blocks in | |
2195 | * the object in the to_ds will have to be sent, since | |
2196 | * the size changed. Therefore, we can't cause any harm | |
2197 | * this way either. | |
2198 | */ | |
2199 | SET_BOOKMARK(&from_arg->resume, objset, obj, 0, blkid); | |
2200 | } | |
2201 | if (resuming) { | |
2202 | fnvlist_add_uint64(nvl, BEGINNV_RESUME_OBJECT, dspp->resumeobj); | |
2203 | fnvlist_add_uint64(nvl, BEGINNV_RESUME_OFFSET, dspp->resumeoff); | |
2204 | } | |
2205 | return (0); | |
2206 | } | |
b5256303 | 2207 | |
30af21b0 PD |
2208 | static dmu_sendstatus_t * |
2209 | setup_send_progress(struct dmu_send_params *dspp) | |
2210 | { | |
2211 | dmu_sendstatus_t *dssp = kmem_zalloc(sizeof (*dssp), KM_SLEEP); | |
2212 | dssp->dss_outfd = dspp->outfd; | |
2213 | dssp->dss_off = dspp->off; | |
2214 | dssp->dss_proc = curproc; | |
2215 | mutex_enter(&dspp->to_ds->ds_sendstream_lock); | |
2216 | list_insert_head(&dspp->to_ds->ds_sendstreams, dssp); | |
2217 | mutex_exit(&dspp->to_ds->ds_sendstream_lock); | |
2218 | return (dssp); | |
2219 | } | |
b5256303 | 2220 | |
30af21b0 PD |
2221 | /* |
2222 | * Actually do the bulk of the work in a zfs send. | |
2223 | * | |
2224 | * The idea is that we want to do a send from ancestor_zb to to_ds. We also | |
2225 | * want to not send any data that has been modified by all the datasets in | |
2226 | * redactsnaparr, and store the list of blocks that are redacted in this way in | |
2227 | * a bookmark named redactbook, created on the to_ds. We do this by creating | |
2228 | * several worker threads, whose function is described below. | |
2229 | * | |
2230 | * There are three cases. | |
2231 | * The first case is a redacted zfs send. In this case there are 5 threads. | |
2232 | * The first thread is the to_ds traversal thread: it calls dataset_traverse on | |
2233 | * the to_ds and finds all the blocks that have changed since ancestor_zb (if | |
2234 | * it's a full send, that's all blocks in the dataset). It then sends those | |
2235 | * blocks on to the send merge thread. The redact list thread takes the data | |
2236 | * from the redaction bookmark and sends those blocks on to the send merge | |
2237 | * thread. The send merge thread takes the data from the to_ds traversal | |
2238 | * thread, and combines it with the redaction records from the redact list | |
2239 | * thread. If a block appears in both the to_ds's data and the redaction data, | |
2240 | * the send merge thread will mark it as redacted and send it on to the prefetch | |
2241 | * thread. Otherwise, the send merge thread will send the block on to the | |
2242 | * prefetch thread unchanged. The prefetch thread will issue prefetch reads for | |
2243 | * any data that isn't redacted, and then send the data on to the main thread. | |
2244 | * The main thread behaves the same as in a normal send case, issuing demand | |
2245 | * reads for data blocks and sending out records over the network | |
2246 | * | |
2247 | * The graphic below diagrams the flow of data in the case of a redacted zfs | |
2248 | * send. Each box represents a thread, and each line represents the flow of | |
2249 | * data. | |
2250 | * | |
2251 | * Records from the | | |
2252 | * redaction bookmark | | |
2253 | * +--------------------+ | +---------------------------+ | |
2254 | * | | v | Send Merge Thread | | |
2255 | * | Redact List Thread +----------> Apply redaction marks to | | |
2256 | * | | | records as specified by | | |
2257 | * +--------------------+ | redaction ranges | | |
2258 | * +----^---------------+------+ | |
2259 | * | | Merged data | |
2260 | * | | | |
2261 | * | +------------v--------+ | |
2262 | * | | Prefetch Thread | | |
2263 | * +--------------------+ | | Issues prefetch | | |
2264 | * | to_ds Traversal | | | reads of data blocks| | |
2265 | * | Thread (finds +---------------+ +------------+--------+ | |
2266 | * | candidate blocks) | Blocks modified | Prefetched data | |
2267 | * +--------------------+ by to_ds since | | |
2268 | * ancestor_zb +------------v----+ | |
2269 | * | Main Thread | File Descriptor | |
2270 | * | Sends data over +->(to zfs receive) | |
2271 | * | wire | | |
2272 | * +-----------------+ | |
2273 | * | |
2274 | * The second case is an incremental send from a redaction bookmark. The to_ds | |
2275 | * traversal thread and the main thread behave the same as in the redacted | |
2276 | * send case. The new thread is the from bookmark traversal thread. It | |
2277 | * iterates over the redaction list in the redaction bookmark, and enqueues | |
2278 | * records for each block that was redacted in the original send. The send | |
2279 | * merge thread now has to merge the data from the two threads. For details | |
2280 | * about that process, see the header comment of send_merge_thread(). Any data | |
2281 | * it decides to send on will be prefetched by the prefetch thread. Note that | |
2282 | * you can perform a redacted send from a redaction bookmark; in that case, | |
2283 | * the data flow behaves very similarly to the flow in the redacted send case, | |
2284 | * except with the addition of the bookmark traversal thread iterating over the | |
2285 | * redaction bookmark. The send_merge_thread also has to take on the | |
2286 | * responsibility of merging the redact list thread's records, the bookmark | |
2287 | * traversal thread's records, and the to_ds records. | |
2288 | * | |
2289 | * +---------------------+ | |
2290 | * | | | |
2291 | * | Redact List Thread +--------------+ | |
2292 | * | | | | |
2293 | * +---------------------+ | | |
2294 | * Blocks in redaction list | Ranges modified by every secure snap | |
2295 | * of from bookmark | (or EOS if not readcted) | |
2296 | * | | |
2297 | * +---------------------+ | +----v----------------------+ | |
2298 | * | bookmark Traversal | v | Send Merge Thread | | |
2299 | * | Thread (finds +---------> Merges bookmark, rlt, and | | |
2300 | * | candidate blocks) | | to_ds send records | | |
2301 | * +---------------------+ +----^---------------+------+ | |
2302 | * | | Merged data | |
2303 | * | +------------v--------+ | |
2304 | * | | Prefetch Thread | | |
2305 | * +--------------------+ | | Issues prefetch | | |
2306 | * | to_ds Traversal | | | reads of data blocks| | |
2307 | * | Thread (finds +---------------+ +------------+--------+ | |
2308 | * | candidate blocks) | Blocks modified | Prefetched data | |
2309 | * +--------------------+ by to_ds since +------------v----+ | |
2310 | * ancestor_zb | Main Thread | File Descriptor | |
2311 | * | Sends data over +->(to zfs receive) | |
2312 | * | wire | | |
2313 | * +-----------------+ | |
2314 | * | |
2315 | * The final case is a simple zfs full or incremental send. The to_ds traversal | |
2316 | * thread behaves the same as always. The redact list thread is never started. | |
e1cfd73f | 2317 | * The send merge thread takes all the blocks that the to_ds traversal thread |
30af21b0 PD |
2318 | * sends it, prefetches the data, and sends the blocks on to the main thread. |
2319 | * The main thread sends the data over the wire. | |
2320 | * | |
2321 | * To keep performance acceptable, we want to prefetch the data in the worker | |
2322 | * threads. While the to_ds thread could simply use the TRAVERSE_PREFETCH | |
2323 | * feature built into traverse_dataset, the combining and deletion of records | |
2324 | * due to redaction and sends from redaction bookmarks mean that we could | |
2325 | * issue many unnecessary prefetches. As a result, we only prefetch data | |
2326 | * after we've determined that the record is not going to be redacted. To | |
2327 | * prevent the prefetching from getting too far ahead of the main thread, the | |
2328 | * blocking queues that are used for communication are capped not by the | |
2329 | * number of entries in the queue, but by the sum of the size of the | |
2330 | * prefetches associated with them. The limit on the amount of data that the | |
2331 | * thread can prefetch beyond what the main thread has reached is controlled | |
2332 | * by the global variable zfs_send_queue_length. In addition, to prevent poor | |
2333 | * performance in the beginning of a send, we also limit the distance ahead | |
2334 | * that the traversal threads can be. That distance is controlled by the | |
2335 | * zfs_send_no_prefetch_queue_length tunable. | |
2336 | * | |
2337 | * Note: Releases dp using the specified tag. | |
2338 | */ | |
2339 | static int | |
2340 | dmu_send_impl(struct dmu_send_params *dspp) | |
2341 | { | |
2342 | objset_t *os; | |
2343 | dmu_replay_record_t *drr; | |
2344 | dmu_sendstatus_t *dssp; | |
2345 | dmu_send_cookie_t dsc = {0}; | |
2346 | int err; | |
2347 | uint64_t fromtxg = dspp->ancestor_zb.zbm_creation_txg; | |
2348 | uint64_t featureflags = 0; | |
2349 | struct redact_list_thread_arg *from_arg; | |
2350 | struct send_thread_arg *to_arg; | |
2351 | struct redact_list_thread_arg *rlt_arg; | |
2352 | struct send_merge_thread_arg *smt_arg; | |
1dc32a67 | 2353 | struct send_reader_thread_arg *srt_arg; |
30af21b0 PD |
2354 | struct send_range *range; |
2355 | redaction_list_t *from_rl = NULL; | |
2356 | redaction_list_t *redact_rl = NULL; | |
2357 | boolean_t resuming = (dspp->resumeobj != 0 || dspp->resumeoff != 0); | |
2358 | boolean_t book_resuming = resuming; | |
2359 | ||
2360 | dsl_dataset_t *to_ds = dspp->to_ds; | |
2361 | zfs_bookmark_phys_t *ancestor_zb = &dspp->ancestor_zb; | |
2362 | dsl_pool_t *dp = dspp->dp; | |
2363 | void *tag = dspp->tag; | |
2364 | ||
2365 | err = dmu_objset_from_ds(to_ds, &os); | |
2366 | if (err != 0) { | |
2367 | dsl_pool_rele(dp, tag); | |
2368 | return (err); | |
2369 | } | |
ba0ba69e | 2370 | |
30af21b0 PD |
2371 | /* |
2372 | * If this is a non-raw send of an encrypted ds, we can ensure that | |
2373 | * the objset_phys_t is authenticated. This is safe because this is | |
2374 | * either a snapshot or we have owned the dataset, ensuring that | |
2375 | * it can't be modified. | |
2376 | */ | |
2377 | if (!dspp->rawok && os->os_encrypted && | |
2378 | arc_is_unauthenticated(os->os_phys_buf)) { | |
2379 | zbookmark_phys_t zb; | |
2380 | ||
2381 | SET_BOOKMARK(&zb, to_ds->ds_object, ZB_ROOT_OBJECT, | |
2382 | ZB_ROOT_LEVEL, ZB_ROOT_BLKID); | |
2383 | err = arc_untransform(os->os_phys_buf, os->os_spa, | |
2384 | &zb, B_FALSE); | |
2385 | if (err != 0) { | |
2386 | dsl_pool_rele(dp, tag); | |
2387 | return (err); | |
b5256303 TC |
2388 | } |
2389 | ||
30af21b0 PD |
2390 | ASSERT0(arc_is_unauthenticated(os->os_phys_buf)); |
2391 | } | |
2392 | ||
2393 | if ((err = setup_featureflags(dspp, os, &featureflags)) != 0) { | |
2394 | dsl_pool_rele(dp, tag); | |
2395 | return (err); | |
2396 | } | |
f00ab3f2 | 2397 | |
30af21b0 PD |
2398 | /* |
2399 | * If we're doing a redacted send, hold the bookmark's redaction list. | |
2400 | */ | |
2401 | if (dspp->redactbook != NULL) { | |
2402 | err = dsl_redaction_list_hold_obj(dp, | |
2403 | dspp->redactbook->zbm_redaction_obj, FTAG, | |
2404 | &redact_rl); | |
2405 | if (err != 0) { | |
2406 | dsl_pool_rele(dp, tag); | |
2407 | return (SET_ERROR(EINVAL)); | |
2408 | } | |
2409 | dsl_redaction_list_long_hold(dp, redact_rl, FTAG); | |
2410 | } | |
2411 | ||
2412 | /* | |
2413 | * If we're sending from a redaction bookmark, hold the redaction list | |
2414 | * so that we can consider sending the redacted blocks. | |
2415 | */ | |
2416 | if (ancestor_zb->zbm_redaction_obj != 0) { | |
2417 | err = dsl_redaction_list_hold_obj(dp, | |
2418 | ancestor_zb->zbm_redaction_obj, FTAG, &from_rl); | |
2419 | if (err != 0) { | |
2420 | if (redact_rl != NULL) { | |
2421 | dsl_redaction_list_long_rele(redact_rl, FTAG); | |
2422 | dsl_redaction_list_rele(redact_rl, FTAG); | |
b5256303 | 2423 | } |
30af21b0 PD |
2424 | dsl_pool_rele(dp, tag); |
2425 | return (SET_ERROR(EINVAL)); | |
2426 | } | |
2427 | dsl_redaction_list_long_hold(dp, from_rl, FTAG); | |
2428 | } | |
2429 | ||
2430 | dsl_dataset_long_hold(to_ds, FTAG); | |
2431 | ||
0ea03c7c CS |
2432 | from_arg = kmem_zalloc(sizeof (*from_arg), KM_SLEEP); |
2433 | to_arg = kmem_zalloc(sizeof (*to_arg), KM_SLEEP); | |
2434 | rlt_arg = kmem_zalloc(sizeof (*rlt_arg), KM_SLEEP); | |
2435 | smt_arg = kmem_zalloc(sizeof (*smt_arg), KM_SLEEP); | |
1dc32a67 | 2436 | srt_arg = kmem_zalloc(sizeof (*srt_arg), KM_SLEEP); |
0ea03c7c | 2437 | |
30af21b0 PD |
2438 | drr = create_begin_record(dspp, os, featureflags); |
2439 | dssp = setup_send_progress(dspp); | |
2440 | ||
2441 | dsc.dsc_drr = drr; | |
2442 | dsc.dsc_dso = dspp->dso; | |
2443 | dsc.dsc_os = os; | |
2444 | dsc.dsc_off = dspp->off; | |
2445 | dsc.dsc_toguid = dsl_dataset_phys(to_ds)->ds_guid; | |
2446 | dsc.dsc_fromtxg = fromtxg; | |
2447 | dsc.dsc_pending_op = PENDING_NONE; | |
2448 | dsc.dsc_featureflags = featureflags; | |
2449 | dsc.dsc_resume_object = dspp->resumeobj; | |
2450 | dsc.dsc_resume_offset = dspp->resumeoff; | |
2451 | ||
2452 | dsl_pool_rele(dp, tag); | |
2453 | ||
2454 | void *payload = NULL; | |
2455 | size_t payload_len = 0; | |
2456 | nvlist_t *nvl = fnvlist_alloc(); | |
2457 | ||
2458 | /* | |
2459 | * If we're doing a redacted send, we include the snapshots we're | |
2460 | * redacted with respect to so that the target system knows what send | |
2461 | * streams can be correctly received on top of this dataset. If we're | |
2462 | * instead sending a redacted dataset, we include the snapshots that the | |
2463 | * dataset was created with respect to. | |
2464 | */ | |
2465 | if (dspp->redactbook != NULL) { | |
2466 | fnvlist_add_uint64_array(nvl, BEGINNV_REDACT_SNAPS, | |
2467 | redact_rl->rl_phys->rlp_snaps, | |
2468 | redact_rl->rl_phys->rlp_num_snaps); | |
2469 | } else if (dsl_dataset_feature_is_active(to_ds, | |
2470 | SPA_FEATURE_REDACTED_DATASETS)) { | |
2471 | uint64_t *tods_guids; | |
2472 | uint64_t length; | |
2473 | VERIFY(dsl_dataset_get_uint64_array_feature(to_ds, | |
2474 | SPA_FEATURE_REDACTED_DATASETS, &length, &tods_guids)); | |
2475 | fnvlist_add_uint64_array(nvl, BEGINNV_REDACT_SNAPS, tods_guids, | |
2476 | length); | |
2477 | } | |
2478 | ||
2479 | /* | |
2480 | * If we're sending from a redaction bookmark, then we should retrieve | |
2481 | * the guids of that bookmark so we can send them over the wire. | |
2482 | */ | |
2483 | if (from_rl != NULL) { | |
2484 | fnvlist_add_uint64_array(nvl, BEGINNV_REDACT_FROM_SNAPS, | |
2485 | from_rl->rl_phys->rlp_snaps, | |
2486 | from_rl->rl_phys->rlp_num_snaps); | |
2487 | } | |
b5256303 | 2488 | |
30af21b0 PD |
2489 | /* |
2490 | * If the snapshot we're sending from is redacted, include the redaction | |
2491 | * list in the stream. | |
2492 | */ | |
2493 | if (dspp->numfromredactsnaps != NUM_SNAPS_NOT_REDACTED) { | |
2494 | ASSERT3P(from_rl, ==, NULL); | |
2495 | fnvlist_add_uint64_array(nvl, BEGINNV_REDACT_FROM_SNAPS, | |
2496 | dspp->fromredactsnaps, (uint_t)dspp->numfromredactsnaps); | |
2497 | if (dspp->numfromredactsnaps > 0) { | |
2498 | kmem_free(dspp->fromredactsnaps, | |
2499 | dspp->numfromredactsnaps * sizeof (uint64_t)); | |
2500 | dspp->fromredactsnaps = NULL; | |
b5256303 | 2501 | } |
30af21b0 PD |
2502 | } |
2503 | ||
2504 | if (resuming || book_resuming) { | |
2505 | err = setup_resume_points(dspp, to_arg, from_arg, | |
2506 | rlt_arg, smt_arg, resuming, os, redact_rl, nvl); | |
2507 | if (err != 0) | |
2508 | goto out; | |
2509 | } | |
2510 | ||
2511 | if (featureflags & DMU_BACKUP_FEATURE_RAW) { | |
2512 | uint64_t ivset_guid = (ancestor_zb != NULL) ? | |
2513 | ancestor_zb->zbm_ivset_guid : 0; | |
2514 | nvlist_t *keynvl = NULL; | |
2515 | ASSERT(os->os_encrypted); | |
47dfff3b | 2516 | |
0fdd6106 | 2517 | err = dsl_crypto_populate_key_nvlist(os, ivset_guid, |
30af21b0 PD |
2518 | &keynvl); |
2519 | if (err != 0) { | |
2520 | fnvlist_free(nvl); | |
2521 | goto out; | |
2522 | } | |
2523 | ||
2524 | fnvlist_add_nvlist(nvl, "crypt_keydata", keynvl); | |
2525 | fnvlist_free(keynvl); | |
2526 | } | |
2527 | ||
2528 | if (!nvlist_empty(nvl)) { | |
47dfff3b MA |
2529 | payload = fnvlist_pack(nvl, &payload_len); |
2530 | drr->drr_payloadlen = payload_len; | |
47dfff3b MA |
2531 | } |
2532 | ||
30af21b0 PD |
2533 | fnvlist_free(nvl); |
2534 | err = dump_record(&dsc, payload, payload_len); | |
47dfff3b MA |
2535 | fnvlist_pack_free(payload, payload_len); |
2536 | if (err != 0) { | |
30af21b0 | 2537 | err = dsc.dsc_err; |
37abac6d | 2538 | goto out; |
34dc7c2f BB |
2539 | } |
2540 | ||
0fdd6106 | 2541 | setup_to_thread(to_arg, os, dssp, fromtxg, dspp->rawok); |
30af21b0 PD |
2542 | setup_from_thread(from_arg, from_rl, dssp); |
2543 | setup_redact_list_thread(rlt_arg, dspp, redact_rl, dssp); | |
2544 | setup_merge_thread(smt_arg, dspp, from_arg, to_arg, rlt_arg, os); | |
1dc32a67 | 2545 | setup_reader_thread(srt_arg, dspp, smt_arg, featureflags); |
fcff0f35 | 2546 | |
1dc32a67 | 2547 | range = bqueue_dequeue(&srt_arg->q); |
30af21b0 PD |
2548 | while (err == 0 && !range->eos_marker) { |
2549 | err = do_dump(&dsc, range); | |
1dc32a67 | 2550 | range = get_next_range(&srt_arg->q, range); |
fcff0f35 | 2551 | if (issig(JUSTLOOKING) && issig(FORREAL)) |
28caa74b | 2552 | err = SET_ERROR(EINTR); |
fcff0f35 PD |
2553 | } |
2554 | ||
30af21b0 PD |
2555 | /* |
2556 | * If we hit an error or are interrupted, cancel our worker threads and | |
2557 | * clear the queue of any pending records. The threads will pass the | |
2558 | * cancel up the tree of worker threads, and each one will clean up any | |
2559 | * pending records before exiting. | |
2560 | */ | |
fcff0f35 | 2561 | if (err != 0) { |
1dc32a67 | 2562 | srt_arg->cancel = B_TRUE; |
30af21b0 | 2563 | while (!range->eos_marker) { |
1dc32a67 | 2564 | range = get_next_range(&srt_arg->q, range); |
fcff0f35 PD |
2565 | } |
2566 | } | |
30af21b0 | 2567 | range_free(range); |
fcff0f35 | 2568 | |
1dc32a67 | 2569 | bqueue_destroy(&srt_arg->q); |
30af21b0 PD |
2570 | bqueue_destroy(&smt_arg->q); |
2571 | if (dspp->redactbook != NULL) | |
2572 | bqueue_destroy(&rlt_arg->q); | |
2573 | bqueue_destroy(&to_arg->q); | |
2574 | bqueue_destroy(&from_arg->q); | |
fcff0f35 | 2575 | |
1dc32a67 MA |
2576 | if (err == 0 && srt_arg->error != 0) |
2577 | err = srt_arg->error; | |
fcff0f35 PD |
2578 | |
2579 | if (err != 0) | |
2580 | goto out; | |
34dc7c2f | 2581 | |
30af21b0 PD |
2582 | if (dsc.dsc_pending_op != PENDING_NONE) |
2583 | if (dump_record(&dsc, NULL, 0) != 0) | |
2e528b49 | 2584 | err = SET_ERROR(EINTR); |
428870ff | 2585 | |
13fe0198 | 2586 | if (err != 0) { |
30af21b0 PD |
2587 | if (err == EINTR && dsc.dsc_err != 0) |
2588 | err = dsc.dsc_err; | |
37abac6d | 2589 | goto out; |
34dc7c2f BB |
2590 | } |
2591 | ||
ba0ba69e TC |
2592 | /* |
2593 | * Send the DRR_END record if this is not a saved stream. | |
2594 | * Otherwise, the omitted DRR_END record will signal to | |
2595 | * the receive side that the stream is incomplete. | |
2596 | */ | |
2597 | if (!dspp->savedok) { | |
2598 | bzero(drr, sizeof (dmu_replay_record_t)); | |
2599 | drr->drr_type = DRR_END; | |
2600 | drr->drr_u.drr_end.drr_checksum = dsc.dsc_zc; | |
2601 | drr->drr_u.drr_end.drr_toguid = dsc.dsc_toguid; | |
34dc7c2f | 2602 | |
ba0ba69e TC |
2603 | if (dump_record(&dsc, NULL, 0) != 0) |
2604 | err = dsc.dsc_err; | |
2605 | } | |
37abac6d | 2606 | out: |
fcff0f35 | 2607 | mutex_enter(&to_ds->ds_sendstream_lock); |
30af21b0 | 2608 | list_remove(&to_ds->ds_sendstreams, dssp); |
fcff0f35 | 2609 | mutex_exit(&to_ds->ds_sendstream_lock); |
37abac6d | 2610 | |
ba0ba69e TC |
2611 | VERIFY(err != 0 || (dsc.dsc_sent_begin && |
2612 | (dsc.dsc_sent_end || dspp->savedok))); | |
51907a31 | 2613 | |
34dc7c2f | 2614 | kmem_free(drr, sizeof (dmu_replay_record_t)); |
30af21b0 PD |
2615 | kmem_free(dssp, sizeof (dmu_sendstatus_t)); |
2616 | kmem_free(from_arg, sizeof (*from_arg)); | |
2617 | kmem_free(to_arg, sizeof (*to_arg)); | |
2618 | kmem_free(rlt_arg, sizeof (*rlt_arg)); | |
2619 | kmem_free(smt_arg, sizeof (*smt_arg)); | |
1dc32a67 | 2620 | kmem_free(srt_arg, sizeof (*srt_arg)); |
34dc7c2f | 2621 | |
fcff0f35 | 2622 | dsl_dataset_long_rele(to_ds, FTAG); |
30af21b0 PD |
2623 | if (from_rl != NULL) { |
2624 | dsl_redaction_list_long_rele(from_rl, FTAG); | |
2625 | dsl_redaction_list_rele(from_rl, FTAG); | |
2626 | } | |
2627 | if (redact_rl != NULL) { | |
2628 | dsl_redaction_list_long_rele(redact_rl, FTAG); | |
2629 | dsl_redaction_list_rele(redact_rl, FTAG); | |
2630 | } | |
13fe0198 | 2631 | |
37abac6d | 2632 | return (err); |
34dc7c2f BB |
2633 | } |
2634 | ||
330d06f9 | 2635 | int |
13fe0198 | 2636 | dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap, |
2aa34383 | 2637 | boolean_t embedok, boolean_t large_block_ok, boolean_t compressok, |
ba0ba69e TC |
2638 | boolean_t rawok, boolean_t savedok, int outfd, offset_t *off, |
2639 | dmu_send_outparams_t *dsop) | |
13fe0198 | 2640 | { |
13fe0198 | 2641 | int err; |
30af21b0 | 2642 | dsl_dataset_t *fromds; |
40ab927a | 2643 | ds_hold_flags_t dsflags; |
30af21b0 PD |
2644 | struct dmu_send_params dspp = {0}; |
2645 | dspp.embedok = embedok; | |
2646 | dspp.large_block_ok = large_block_ok; | |
2647 | dspp.compressok = compressok; | |
2648 | dspp.outfd = outfd; | |
2649 | dspp.off = off; | |
2650 | dspp.dso = dsop; | |
2651 | dspp.tag = FTAG; | |
2652 | dspp.rawok = rawok; | |
ba0ba69e | 2653 | dspp.savedok = savedok; |
30af21b0 | 2654 | |
40ab927a | 2655 | dsflags = (rawok) ? DS_HOLD_FLAG_NONE : DS_HOLD_FLAG_DECRYPT; |
30af21b0 | 2656 | err = dsl_pool_hold(pool, FTAG, &dspp.dp); |
13fe0198 MA |
2657 | if (err != 0) |
2658 | return (err); | |
2659 | ||
30af21b0 PD |
2660 | err = dsl_dataset_hold_obj_flags(dspp.dp, tosnap, dsflags, FTAG, |
2661 | &dspp.to_ds); | |
13fe0198 | 2662 | if (err != 0) { |
30af21b0 | 2663 | dsl_pool_rele(dspp.dp, FTAG); |
13fe0198 MA |
2664 | return (err); |
2665 | } | |
2666 | ||
2667 | if (fromsnap != 0) { | |
30af21b0 PD |
2668 | err = dsl_dataset_hold_obj_flags(dspp.dp, fromsnap, dsflags, |
2669 | FTAG, &fromds); | |
13fe0198 | 2670 | if (err != 0) { |
30af21b0 PD |
2671 | dsl_dataset_rele_flags(dspp.to_ds, dsflags, FTAG); |
2672 | dsl_pool_rele(dspp.dp, FTAG); | |
13fe0198 MA |
2673 | return (err); |
2674 | } | |
30af21b0 PD |
2675 | dspp.ancestor_zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid; |
2676 | dspp.ancestor_zb.zbm_creation_txg = | |
2677 | dsl_dataset_phys(fromds)->ds_creation_txg; | |
2678 | dspp.ancestor_zb.zbm_creation_time = | |
d683ddbb | 2679 | dsl_dataset_phys(fromds)->ds_creation_time; |
f00ab3f2 TC |
2680 | |
2681 | if (dsl_dataset_is_zapified(fromds)) { | |
30af21b0 | 2682 | (void) zap_lookup(dspp.dp->dp_meta_objset, |
f00ab3f2 | 2683 | fromds->ds_object, DS_FIELD_IVSET_GUID, 8, 1, |
30af21b0 PD |
2684 | &dspp.ancestor_zb.zbm_ivset_guid); |
2685 | } | |
2686 | ||
2687 | /* See dmu_send for the reasons behind this. */ | |
2688 | uint64_t *fromredact; | |
2689 | ||
2690 | if (!dsl_dataset_get_uint64_array_feature(fromds, | |
2691 | SPA_FEATURE_REDACTED_DATASETS, | |
2692 | &dspp.numfromredactsnaps, | |
2693 | &fromredact)) { | |
2694 | dspp.numfromredactsnaps = NUM_SNAPS_NOT_REDACTED; | |
2695 | } else if (dspp.numfromredactsnaps > 0) { | |
2696 | uint64_t size = dspp.numfromredactsnaps * | |
2697 | sizeof (uint64_t); | |
2698 | dspp.fromredactsnaps = kmem_zalloc(size, KM_SLEEP); | |
2699 | bcopy(fromredact, dspp.fromredactsnaps, size); | |
f00ab3f2 TC |
2700 | } |
2701 | ||
c0bd2e0f RM |
2702 | boolean_t is_before = |
2703 | dsl_dataset_is_before(dspp.to_ds, fromds, 0); | |
2704 | dspp.is_clone = (dspp.to_ds->ds_dir != | |
2705 | fromds->ds_dir); | |
2706 | dsl_dataset_rele(fromds, FTAG); | |
2707 | if (!is_before) { | |
2708 | dsl_pool_rele(dspp.dp, FTAG); | |
30af21b0 PD |
2709 | err = SET_ERROR(EXDEV); |
2710 | } else { | |
30af21b0 PD |
2711 | err = dmu_send_impl(&dspp); |
2712 | } | |
da536844 | 2713 | } else { |
30af21b0 PD |
2714 | dspp.numfromredactsnaps = NUM_SNAPS_NOT_REDACTED; |
2715 | err = dmu_send_impl(&dspp); | |
13fe0198 | 2716 | } |
30af21b0 | 2717 | dsl_dataset_rele(dspp.to_ds, FTAG); |
da536844 | 2718 | return (err); |
13fe0198 MA |
2719 | } |
2720 | ||
2721 | int | |
47dfff3b | 2722 | dmu_send(const char *tosnap, const char *fromsnap, boolean_t embedok, |
b5256303 | 2723 | boolean_t large_block_ok, boolean_t compressok, boolean_t rawok, |
ba0ba69e TC |
2724 | boolean_t savedok, uint64_t resumeobj, uint64_t resumeoff, |
2725 | const char *redactbook, int outfd, offset_t *off, | |
2726 | dmu_send_outparams_t *dsop) | |
13fe0198 | 2727 | { |
30af21b0 | 2728 | int err = 0; |
40ab927a | 2729 | ds_hold_flags_t dsflags; |
da536844 | 2730 | boolean_t owned = B_FALSE; |
30af21b0 PD |
2731 | dsl_dataset_t *fromds = NULL; |
2732 | zfs_bookmark_phys_t book = {0}; | |
2733 | struct dmu_send_params dspp = {0}; | |
ba0ba69e | 2734 | |
40ab927a | 2735 | dsflags = (rawok) ? DS_HOLD_FLAG_NONE : DS_HOLD_FLAG_DECRYPT; |
30af21b0 PD |
2736 | dspp.tosnap = tosnap; |
2737 | dspp.embedok = embedok; | |
2738 | dspp.large_block_ok = large_block_ok; | |
2739 | dspp.compressok = compressok; | |
2740 | dspp.outfd = outfd; | |
2741 | dspp.off = off; | |
2742 | dspp.dso = dsop; | |
2743 | dspp.tag = FTAG; | |
2744 | dspp.resumeobj = resumeobj; | |
2745 | dspp.resumeoff = resumeoff; | |
2746 | dspp.rawok = rawok; | |
ba0ba69e | 2747 | dspp.savedok = savedok; |
13fe0198 | 2748 | |
da536844 | 2749 | if (fromsnap != NULL && strpbrk(fromsnap, "@#") == NULL) |
2e528b49 | 2750 | return (SET_ERROR(EINVAL)); |
13fe0198 | 2751 | |
30af21b0 | 2752 | err = dsl_pool_hold(tosnap, FTAG, &dspp.dp); |
13fe0198 MA |
2753 | if (err != 0) |
2754 | return (err); | |
ba0ba69e | 2755 | |
30af21b0 | 2756 | if (strchr(tosnap, '@') == NULL && spa_writeable(dspp.dp->dp_spa)) { |
da536844 MA |
2757 | /* |
2758 | * We are sending a filesystem or volume. Ensure | |
2759 | * that it doesn't change by owning the dataset. | |
2760 | */ | |
ba0ba69e TC |
2761 | |
2762 | if (savedok) { | |
2763 | /* | |
2764 | * We are looking for the dataset that represents the | |
2765 | * partially received send stream. If this stream was | |
2766 | * received as a new snapshot of an existing dataset, | |
2767 | * this will be saved in a hidden clone named | |
2768 | * "<pool>/<dataset>/%recv". Otherwise, the stream | |
2769 | * will be saved in the live dataset itself. In | |
2770 | * either case we need to use dsl_dataset_own_force() | |
2771 | * because the stream is marked as inconsistent, | |
2772 | * which would normally make it unavailable to be | |
2773 | * owned. | |
2774 | */ | |
2775 | char *name = kmem_asprintf("%s/%s", tosnap, | |
2776 | recv_clone_name); | |
2777 | err = dsl_dataset_own_force(dspp.dp, name, dsflags, | |
2778 | FTAG, &dspp.to_ds); | |
2779 | if (err == ENOENT) { | |
2780 | err = dsl_dataset_own_force(dspp.dp, tosnap, | |
2781 | dsflags, FTAG, &dspp.to_ds); | |
2782 | } | |
2783 | ||
2784 | if (err == 0) { | |
2785 | err = zap_lookup(dspp.dp->dp_meta_objset, | |
2786 | dspp.to_ds->ds_object, | |
2787 | DS_FIELD_RESUME_TOGUID, 8, 1, | |
2788 | &dspp.saved_guid); | |
2789 | } | |
2790 | ||
2791 | if (err == 0) { | |
2792 | err = zap_lookup(dspp.dp->dp_meta_objset, | |
2793 | dspp.to_ds->ds_object, | |
2794 | DS_FIELD_RESUME_TONAME, 1, | |
2795 | sizeof (dspp.saved_toname), | |
2796 | dspp.saved_toname); | |
2797 | } | |
2798 | if (err != 0) | |
2799 | dsl_dataset_disown(dspp.to_ds, dsflags, FTAG); | |
2800 | ||
2801 | kmem_strfree(name); | |
2802 | } else { | |
2803 | err = dsl_dataset_own(dspp.dp, tosnap, dsflags, | |
2804 | FTAG, &dspp.to_ds); | |
2805 | } | |
da536844 MA |
2806 | owned = B_TRUE; |
2807 | } else { | |
30af21b0 PD |
2808 | err = dsl_dataset_hold_flags(dspp.dp, tosnap, dsflags, FTAG, |
2809 | &dspp.to_ds); | |
2810 | } | |
2811 | ||
2812 | if (err != 0) { | |
2813 | dsl_pool_rele(dspp.dp, FTAG); | |
2814 | return (err); | |
2815 | } | |
2816 | ||
2817 | if (redactbook != NULL) { | |
2818 | char path[ZFS_MAX_DATASET_NAME_LEN]; | |
2819 | (void) strlcpy(path, tosnap, sizeof (path)); | |
2820 | char *at = strchr(path, '@'); | |
2821 | if (at == NULL) { | |
2822 | err = EINVAL; | |
2823 | } else { | |
2824 | (void) snprintf(at, sizeof (path) - (at - path), "#%s", | |
2825 | redactbook); | |
2826 | err = dsl_bookmark_lookup(dspp.dp, path, | |
2827 | NULL, &book); | |
2828 | dspp.redactbook = &book; | |
2829 | } | |
da536844 | 2830 | } |
30af21b0 | 2831 | |
13fe0198 | 2832 | if (err != 0) { |
30af21b0 PD |
2833 | dsl_pool_rele(dspp.dp, FTAG); |
2834 | if (owned) | |
2835 | dsl_dataset_disown(dspp.to_ds, dsflags, FTAG); | |
2836 | else | |
2837 | dsl_dataset_rele_flags(dspp.to_ds, dsflags, FTAG); | |
13fe0198 MA |
2838 | return (err); |
2839 | } | |
2840 | ||
2841 | if (fromsnap != NULL) { | |
30af21b0 PD |
2842 | zfs_bookmark_phys_t *zb = &dspp.ancestor_zb; |
2843 | int fsnamelen; | |
2844 | if (strpbrk(tosnap, "@#") != NULL) | |
2845 | fsnamelen = strpbrk(tosnap, "@#") - tosnap; | |
2846 | else | |
2847 | fsnamelen = strlen(tosnap); | |
da536844 MA |
2848 | |
2849 | /* | |
2850 | * If the fromsnap is in a different filesystem, then | |
2851 | * mark the send stream as a clone. | |
2852 | */ | |
2853 | if (strncmp(tosnap, fromsnap, fsnamelen) != 0 || | |
2854 | (fromsnap[fsnamelen] != '@' && | |
2855 | fromsnap[fsnamelen] != '#')) { | |
30af21b0 | 2856 | dspp.is_clone = B_TRUE; |
da536844 MA |
2857 | } |
2858 | ||
30af21b0 PD |
2859 | if (strchr(fromsnap, '@') != NULL) { |
2860 | err = dsl_dataset_hold(dspp.dp, fromsnap, FTAG, | |
2861 | &fromds); | |
2862 | ||
2863 | if (err != 0) { | |
2864 | ASSERT3P(fromds, ==, NULL); | |
2865 | } else { | |
2866 | /* | |
2867 | * We need to make a deep copy of the redact | |
2868 | * snapshots of the from snapshot, because the | |
2869 | * array will be freed when we evict from_ds. | |
2870 | */ | |
2871 | uint64_t *fromredact; | |
2872 | if (!dsl_dataset_get_uint64_array_feature( | |
2873 | fromds, SPA_FEATURE_REDACTED_DATASETS, | |
2874 | &dspp.numfromredactsnaps, | |
2875 | &fromredact)) { | |
2876 | dspp.numfromredactsnaps = | |
2877 | NUM_SNAPS_NOT_REDACTED; | |
2878 | } else if (dspp.numfromredactsnaps > 0) { | |
2879 | uint64_t size = | |
2880 | dspp.numfromredactsnaps * | |
2881 | sizeof (uint64_t); | |
2882 | dspp.fromredactsnaps = kmem_zalloc(size, | |
2883 | KM_SLEEP); | |
2884 | bcopy(fromredact, dspp.fromredactsnaps, | |
2885 | size); | |
2886 | } | |
2887 | if (!dsl_dataset_is_before(dspp.to_ds, fromds, | |
2888 | 0)) { | |
da536844 | 2889 | err = SET_ERROR(EXDEV); |
30af21b0 | 2890 | } else { |
30af21b0 PD |
2891 | zb->zbm_creation_txg = |
2892 | dsl_dataset_phys(fromds)-> | |
2893 | ds_creation_txg; | |
2894 | zb->zbm_creation_time = | |
2895 | dsl_dataset_phys(fromds)-> | |
2896 | ds_creation_time; | |
2897 | zb->zbm_guid = | |
2898 | dsl_dataset_phys(fromds)->ds_guid; | |
2899 | zb->zbm_redaction_obj = 0; | |
2900 | ||
2901 | if (dsl_dataset_is_zapified(fromds)) { | |
2902 | (void) zap_lookup( | |
2903 | dspp.dp->dp_meta_objset, | |
2904 | fromds->ds_object, | |
2905 | DS_FIELD_IVSET_GUID, 8, 1, | |
2906 | &zb->zbm_ivset_guid); | |
2907 | } | |
f00ab3f2 | 2908 | } |
da536844 MA |
2909 | dsl_dataset_rele(fromds, FTAG); |
2910 | } | |
2911 | } else { | |
30af21b0 PD |
2912 | dspp.numfromredactsnaps = NUM_SNAPS_NOT_REDACTED; |
2913 | err = dsl_bookmark_lookup(dspp.dp, fromsnap, dspp.to_ds, | |
2914 | zb); | |
2915 | if (err == EXDEV && zb->zbm_redaction_obj != 0 && | |
2916 | zb->zbm_guid == | |
2917 | dsl_dataset_phys(dspp.to_ds)->ds_guid) | |
2918 | err = 0; | |
da536844 | 2919 | } |
b5256303 | 2920 | |
30af21b0 PD |
2921 | if (err == 0) { |
2922 | /* dmu_send_impl will call dsl_pool_rele for us. */ | |
2923 | err = dmu_send_impl(&dspp); | |
2924 | } else { | |
2925 | dsl_pool_rele(dspp.dp, FTAG); | |
13fe0198 | 2926 | } |
da536844 | 2927 | } else { |
30af21b0 PD |
2928 | dspp.numfromredactsnaps = NUM_SNAPS_NOT_REDACTED; |
2929 | err = dmu_send_impl(&dspp); | |
13fe0198 | 2930 | } |
da536844 | 2931 | if (owned) |
30af21b0 | 2932 | dsl_dataset_disown(dspp.to_ds, dsflags, FTAG); |
da536844 | 2933 | else |
30af21b0 | 2934 | dsl_dataset_rele_flags(dspp.to_ds, dsflags, FTAG); |
da536844 | 2935 | return (err); |
13fe0198 MA |
2936 | } |
2937 | ||
5dc8b736 | 2938 | static int |
2aa34383 DK |
2939 | dmu_adjust_send_estimate_for_indirects(dsl_dataset_t *ds, uint64_t uncompressed, |
2940 | uint64_t compressed, boolean_t stream_compressed, uint64_t *sizep) | |
5dc8b736 | 2941 | { |
ca0845d5 | 2942 | int err = 0; |
2aa34383 | 2943 | uint64_t size; |
5dc8b736 MG |
2944 | /* |
2945 | * Assume that space (both on-disk and in-stream) is dominated by | |
2946 | * data. We will adjust for indirect blocks and the copies property, | |
2947 | * but ignore per-object space used (eg, dnodes and DRR_OBJECT records). | |
2948 | */ | |
2949 | ||
2aa34383 DK |
2950 | uint64_t recordsize; |
2951 | uint64_t record_count; | |
dd429b46 PD |
2952 | objset_t *os; |
2953 | VERIFY0(dmu_objset_from_ds(ds, &os)); | |
2aa34383 DK |
2954 | |
2955 | /* Assume all (uncompressed) blocks are recordsize. */ | |
ca0845d5 PD |
2956 | if (zfs_override_estimate_recordsize != 0) { |
2957 | recordsize = zfs_override_estimate_recordsize; | |
2958 | } else if (os->os_phys->os_type == DMU_OST_ZVOL) { | |
dd429b46 PD |
2959 | err = dsl_prop_get_int_ds(ds, |
2960 | zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &recordsize); | |
2961 | } else { | |
2962 | err = dsl_prop_get_int_ds(ds, | |
2963 | zfs_prop_to_name(ZFS_PROP_RECORDSIZE), &recordsize); | |
2964 | } | |
2aa34383 DK |
2965 | if (err != 0) |
2966 | return (err); | |
2967 | record_count = uncompressed / recordsize; | |
2968 | ||
2969 | /* | |
2970 | * If we're estimating a send size for a compressed stream, use the | |
2971 | * compressed data size to estimate the stream size. Otherwise, use the | |
2972 | * uncompressed data size. | |
2973 | */ | |
2974 | size = stream_compressed ? compressed : uncompressed; | |
2975 | ||
5dc8b736 MG |
2976 | /* |
2977 | * Subtract out approximate space used by indirect blocks. | |
2978 | * Assume most space is used by data blocks (non-indirect, non-dnode). | |
2aa34383 | 2979 | * Assume no ditto blocks or internal fragmentation. |
5dc8b736 MG |
2980 | * |
2981 | * Therefore, space used by indirect blocks is sizeof(blkptr_t) per | |
2aa34383 | 2982 | * block. |
5dc8b736 | 2983 | */ |
2aa34383 | 2984 | size -= record_count * sizeof (blkptr_t); |
5dc8b736 MG |
2985 | |
2986 | /* Add in the space for the record associated with each block. */ | |
2aa34383 | 2987 | size += record_count * sizeof (dmu_replay_record_t); |
5dc8b736 MG |
2988 | |
2989 | *sizep = size; | |
2990 | ||
2991 | return (0); | |
2992 | } | |
2993 | ||
13fe0198 | 2994 | int |
ba0ba69e TC |
2995 | dmu_send_estimate_fast(dsl_dataset_t *origds, dsl_dataset_t *fromds, |
2996 | zfs_bookmark_phys_t *frombook, boolean_t stream_compressed, | |
2997 | boolean_t saved, uint64_t *sizep) | |
330d06f9 | 2998 | { |
330d06f9 | 2999 | int err; |
ba0ba69e | 3000 | dsl_dataset_t *ds = origds; |
2aa34383 | 3001 | uint64_t uncomp, comp; |
13fe0198 | 3002 | |
ba0ba69e | 3003 | ASSERT(dsl_pool_config_held(origds->ds_dir->dd_pool)); |
30af21b0 | 3004 | ASSERT(fromds == NULL || frombook == NULL); |
330d06f9 | 3005 | |
ba0ba69e TC |
3006 | /* |
3007 | * If this is a saved send we may actually be sending | |
3008 | * from the %recv clone used for resuming. | |
3009 | */ | |
3010 | if (saved) { | |
3011 | objset_t *mos = origds->ds_dir->dd_pool->dp_meta_objset; | |
3012 | uint64_t guid; | |
3013 | char dsname[ZFS_MAX_DATASET_NAME_LEN + 6]; | |
3014 | ||
3015 | dsl_dataset_name(origds, dsname); | |
3016 | (void) strcat(dsname, "/"); | |
3017 | (void) strcat(dsname, recv_clone_name); | |
3018 | ||
3019 | err = dsl_dataset_hold(origds->ds_dir->dd_pool, | |
3020 | dsname, FTAG, &ds); | |
3021 | if (err != ENOENT && err != 0) { | |
3022 | return (err); | |
3023 | } else if (err == ENOENT) { | |
3024 | ds = origds; | |
3025 | } | |
3026 | ||
3027 | /* check that this dataset has partially received data */ | |
3028 | err = zap_lookup(mos, ds->ds_object, | |
3029 | DS_FIELD_RESUME_TOGUID, 8, 1, &guid); | |
3030 | if (err != 0) { | |
3031 | err = SET_ERROR(err == ENOENT ? EINVAL : err); | |
3032 | goto out; | |
3033 | } | |
3034 | ||
3035 | err = zap_lookup(mos, ds->ds_object, | |
3036 | DS_FIELD_RESUME_TONAME, 1, sizeof (dsname), dsname); | |
3037 | if (err != 0) { | |
3038 | err = SET_ERROR(err == ENOENT ? EINVAL : err); | |
3039 | goto out; | |
3040 | } | |
3041 | } | |
3042 | ||
3043 | /* tosnap must be a snapshot or the target of a saved send */ | |
3044 | if (!ds->ds_is_snapshot && ds == origds) | |
2e528b49 | 3045 | return (SET_ERROR(EINVAL)); |
330d06f9 | 3046 | |
30af21b0 PD |
3047 | if (fromds != NULL) { |
3048 | uint64_t used; | |
ba0ba69e TC |
3049 | if (!fromds->ds_is_snapshot) { |
3050 | err = SET_ERROR(EINVAL); | |
3051 | goto out; | |
3052 | } | |
71e2fe41 | 3053 | |
ba0ba69e TC |
3054 | if (!dsl_dataset_is_before(ds, fromds, 0)) { |
3055 | err = SET_ERROR(EXDEV); | |
3056 | goto out; | |
3057 | } | |
330d06f9 | 3058 | |
30af21b0 PD |
3059 | err = dsl_dataset_space_written(fromds, ds, &used, &comp, |
3060 | &uncomp); | |
3061 | if (err != 0) | |
ba0ba69e | 3062 | goto out; |
30af21b0 | 3063 | } else if (frombook != NULL) { |
2aa34383 | 3064 | uint64_t used; |
30af21b0 PD |
3065 | err = dsl_dataset_space_written_bookmark(frombook, ds, &used, |
3066 | &comp, &uncomp); | |
13fe0198 | 3067 | if (err != 0) |
ba0ba69e | 3068 | goto out; |
30af21b0 PD |
3069 | } else { |
3070 | uncomp = dsl_dataset_phys(ds)->ds_uncompressed_bytes; | |
3071 | comp = dsl_dataset_phys(ds)->ds_compressed_bytes; | |
330d06f9 MA |
3072 | } |
3073 | ||
2aa34383 DK |
3074 | err = dmu_adjust_send_estimate_for_indirects(ds, uncomp, comp, |
3075 | stream_compressed, sizep); | |
dd429b46 PD |
3076 | /* |
3077 | * Add the size of the BEGIN and END records to the estimate. | |
3078 | */ | |
3079 | *sizep += 2 * sizeof (dmu_replay_record_t); | |
ba0ba69e TC |
3080 | |
3081 | out: | |
3082 | if (ds != origds) | |
3083 | dsl_dataset_rele(ds, FTAG); | |
5dc8b736 MG |
3084 | return (err); |
3085 | } | |
330d06f9 | 3086 | |
03fdcb9a MM |
3087 | ZFS_MODULE_PARAM(zfs_send, zfs_send_, corrupt_data, INT, ZMOD_RW, |
3088 | "Allow sending corrupt data"); | |
3b0d9928 | 3089 | |
03fdcb9a MM |
3090 | ZFS_MODULE_PARAM(zfs_send, zfs_send_, queue_length, INT, ZMOD_RW, |
3091 | "Maximum send queue length"); | |
caf9dd20 | 3092 | |
03fdcb9a | 3093 | ZFS_MODULE_PARAM(zfs_send, zfs_send_, unmodified_spill_blocks, INT, ZMOD_RW, |
caf9dd20 | 3094 | "Send unmodified spill blocks"); |
30af21b0 | 3095 | |
03fdcb9a | 3096 | ZFS_MODULE_PARAM(zfs_send, zfs_send_, no_prefetch_queue_length, INT, ZMOD_RW, |
30af21b0 PD |
3097 | "Maximum send queue length for non-prefetch queues"); |
3098 | ||
03fdcb9a MM |
3099 | ZFS_MODULE_PARAM(zfs_send, zfs_send_, queue_ff, INT, ZMOD_RW, |
3100 | "Send queue fill fraction"); | |
30af21b0 | 3101 | |
03fdcb9a | 3102 | ZFS_MODULE_PARAM(zfs_send, zfs_send_, no_prefetch_queue_ff, INT, ZMOD_RW, |
30af21b0 PD |
3103 | "Send queue fill fraction for non-prefetch queues"); |
3104 | ||
03fdcb9a | 3105 | ZFS_MODULE_PARAM(zfs_send, zfs_, override_estimate_recordsize, INT, ZMOD_RW, |
30af21b0 | 3106 | "Override block size estimate with fixed size"); |