4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
25 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
26 * Copyright 2014 HybridCluster. All rights reserved.
27 * Copyright 2016 RackTop Systems.
28 * Copyright (c) 2016 Actifio, Inc. All rights reserved.
32 #include <sys/dmu_impl.h>
33 #include <sys/dmu_tx.h>
35 #include <sys/dnode.h>
36 #include <sys/zfs_context.h>
37 #include <sys/dmu_objset.h>
38 #include <sys/dmu_traverse.h>
39 #include <sys/dsl_dataset.h>
40 #include <sys/dsl_dir.h>
41 #include <sys/dsl_prop.h>
42 #include <sys/dsl_pool.h>
43 #include <sys/dsl_synctask.h>
44 #include <sys/spa_impl.h>
45 #include <sys/zfs_ioctl.h>
47 #include <sys/zio_checksum.h>
48 #include <sys/zfs_znode.h>
49 #include <zfs_fletcher.h>
52 #include <sys/zfs_onexit.h>
53 #include <sys/dmu_send.h>
54 #include <sys/dsl_destroy.h>
55 #include <sys/blkptr.h>
56 #include <sys/dsl_bookmark.h>
57 #include <sys/zfeature.h>
58 #include <sys/bqueue.h>
60 #include <sys/policy.h>
62 /* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */
63 int zfs_send_corrupt_data
= B_FALSE
;
64 int zfs_send_queue_length
= 16 * 1024 * 1024;
65 int zfs_recv_queue_length
= 16 * 1024 * 1024;
66 /* Set this tunable to FALSE to disable setting of DRR_FLAG_FREERECORDS */
67 int zfs_send_set_freerecords_bit
= B_TRUE
;
69 static char *dmu_recv_tag
= "dmu_recv_tag";
70 const char *recv_clone_name
= "%recv";
72 #define BP_SPAN(datablkszsec, indblkshift, level) \
73 (((uint64_t)datablkszsec) << (SPA_MINBLOCKSHIFT + \
74 (level) * (indblkshift - SPA_BLKPTRSHIFT)))
76 static void byteswap_record(dmu_replay_record_t
*drr
);
78 struct send_thread_arg
{
80 dsl_dataset_t
*ds
; /* Dataset to traverse */
81 uint64_t fromtxg
; /* Traverse from this txg */
82 int flags
; /* flags to pass to traverse_dataset */
85 zbookmark_phys_t resume
;
88 struct send_block_record
{
89 boolean_t eos_marker
; /* Marks the end of the stream */
93 uint16_t datablkszsec
;
97 typedef struct dump_bytes_io
{
98 dmu_sendarg_t
*dbi_dsp
;
104 dump_bytes_cb(void *arg
)
106 dump_bytes_io_t
*dbi
= (dump_bytes_io_t
*)arg
;
107 dmu_sendarg_t
*dsp
= dbi
->dbi_dsp
;
108 dsl_dataset_t
*ds
= dmu_objset_ds(dsp
->dsa_os
);
109 ssize_t resid
; /* have to get resid to get detailed errno */
112 * The code does not rely on len being a multiple of 8. We keep
113 * this assertion because of the corresponding assertion in
114 * receive_read(). Keeping this assertion ensures that we do not
115 * inadvertently break backwards compatibility (causing the assertion
116 * in receive_read() to trigger on old software). Newer feature flags
117 * (such as raw send) may break this assertion since they were
118 * introduced after the requirement was made obsolete.
121 ASSERT(dbi
->dbi_len
% 8 == 0 ||
122 (dsp
->dsa_featureflags
& DMU_BACKUP_FEATURE_RAW
) != 0);
124 dsp
->dsa_err
= vn_rdwr(UIO_WRITE
, dsp
->dsa_vp
,
125 (caddr_t
)dbi
->dbi_buf
, dbi
->dbi_len
,
126 0, UIO_SYSSPACE
, FAPPEND
, RLIM64_INFINITY
, CRED(), &resid
);
128 mutex_enter(&ds
->ds_sendstream_lock
);
129 *dsp
->dsa_off
+= dbi
->dbi_len
;
130 mutex_exit(&ds
->ds_sendstream_lock
);
134 dump_bytes(dmu_sendarg_t
*dsp
, void *buf
, int len
)
142 #if defined(HAVE_LARGE_STACKS)
146 * The vn_rdwr() call is performed in a taskq to ensure that there is
147 * always enough stack space to write safely to the target filesystem.
148 * The ZIO_TYPE_FREE threads are used because there can be a lot of
149 * them and they are used in vdev_file.c for a similar purpose.
151 spa_taskq_dispatch_sync(dmu_objset_spa(dsp
->dsa_os
), ZIO_TYPE_FREE
,
152 ZIO_TASKQ_ISSUE
, dump_bytes_cb
, &dbi
, TQ_SLEEP
);
153 #endif /* HAVE_LARGE_STACKS */
155 return (dsp
->dsa_err
);
159 * For all record types except BEGIN, fill in the checksum (overlaid in
160 * drr_u.drr_checksum.drr_checksum). The checksum verifies everything
161 * up to the start of the checksum itself.
164 dump_record(dmu_sendarg_t
*dsp
, void *payload
, int payload_len
)
166 ASSERT3U(offsetof(dmu_replay_record_t
, drr_u
.drr_checksum
.drr_checksum
),
167 ==, sizeof (dmu_replay_record_t
) - sizeof (zio_cksum_t
));
168 (void) fletcher_4_incremental_native(dsp
->dsa_drr
,
169 offsetof(dmu_replay_record_t
, drr_u
.drr_checksum
.drr_checksum
),
171 if (dsp
->dsa_drr
->drr_type
== DRR_BEGIN
) {
172 dsp
->dsa_sent_begin
= B_TRUE
;
174 ASSERT(ZIO_CHECKSUM_IS_ZERO(&dsp
->dsa_drr
->drr_u
.
175 drr_checksum
.drr_checksum
));
176 dsp
->dsa_drr
->drr_u
.drr_checksum
.drr_checksum
= dsp
->dsa_zc
;
178 if (dsp
->dsa_drr
->drr_type
== DRR_END
) {
179 dsp
->dsa_sent_end
= B_TRUE
;
181 (void) fletcher_4_incremental_native(&dsp
->dsa_drr
->
182 drr_u
.drr_checksum
.drr_checksum
,
183 sizeof (zio_cksum_t
), &dsp
->dsa_zc
);
184 if (dump_bytes(dsp
, dsp
->dsa_drr
, sizeof (dmu_replay_record_t
)) != 0)
185 return (SET_ERROR(EINTR
));
186 if (payload_len
!= 0) {
187 (void) fletcher_4_incremental_native(payload
, payload_len
,
189 if (dump_bytes(dsp
, payload
, payload_len
) != 0)
190 return (SET_ERROR(EINTR
));
196 * Fill in the drr_free struct, or perform aggregation if the previous record is
197 * also a free record, and the two are adjacent.
199 * Note that we send free records even for a full send, because we want to be
200 * able to receive a full send as a clone, which requires a list of all the free
201 * and freeobject records that were generated on the source.
204 dump_free(dmu_sendarg_t
*dsp
, uint64_t object
, uint64_t offset
,
207 struct drr_free
*drrf
= &(dsp
->dsa_drr
->drr_u
.drr_free
);
210 * When we receive a free record, dbuf_free_range() assumes
211 * that the receiving system doesn't have any dbufs in the range
212 * being freed. This is always true because there is a one-record
213 * constraint: we only send one WRITE record for any given
214 * object,offset. We know that the one-record constraint is
215 * true because we always send data in increasing order by
218 * If the increasing-order constraint ever changes, we should find
219 * another way to assert that the one-record constraint is still
222 ASSERT(object
> dsp
->dsa_last_data_object
||
223 (object
== dsp
->dsa_last_data_object
&&
224 offset
> dsp
->dsa_last_data_offset
));
227 * If there is a pending op, but it's not PENDING_FREE, push it out,
228 * since free block aggregation can only be done for blocks of the
229 * same type (i.e., DRR_FREE records can only be aggregated with
230 * other DRR_FREE records. DRR_FREEOBJECTS records can only be
231 * aggregated with other DRR_FREEOBJECTS records.
233 if (dsp
->dsa_pending_op
!= PENDING_NONE
&&
234 dsp
->dsa_pending_op
!= PENDING_FREE
) {
235 if (dump_record(dsp
, NULL
, 0) != 0)
236 return (SET_ERROR(EINTR
));
237 dsp
->dsa_pending_op
= PENDING_NONE
;
240 if (dsp
->dsa_pending_op
== PENDING_FREE
) {
242 * There should never be a PENDING_FREE if length is
243 * DMU_OBJECT_END (because dump_dnode is the only place where
244 * this function is called with a DMU_OBJECT_END, and only after
245 * flushing any pending record).
247 ASSERT(length
!= DMU_OBJECT_END
);
249 * Check to see whether this free block can be aggregated
252 if (drrf
->drr_object
== object
&& drrf
->drr_offset
+
253 drrf
->drr_length
== offset
) {
254 if (offset
+ length
< offset
)
255 drrf
->drr_length
= DMU_OBJECT_END
;
257 drrf
->drr_length
+= length
;
260 /* not a continuation. Push out pending record */
261 if (dump_record(dsp
, NULL
, 0) != 0)
262 return (SET_ERROR(EINTR
));
263 dsp
->dsa_pending_op
= PENDING_NONE
;
266 /* create a FREE record and make it pending */
267 bzero(dsp
->dsa_drr
, sizeof (dmu_replay_record_t
));
268 dsp
->dsa_drr
->drr_type
= DRR_FREE
;
269 drrf
->drr_object
= object
;
270 drrf
->drr_offset
= offset
;
271 if (offset
+ length
< offset
)
272 drrf
->drr_length
= DMU_OBJECT_END
;
274 drrf
->drr_length
= length
;
275 drrf
->drr_toguid
= dsp
->dsa_toguid
;
276 if (length
== DMU_OBJECT_END
) {
277 if (dump_record(dsp
, NULL
, 0) != 0)
278 return (SET_ERROR(EINTR
));
280 dsp
->dsa_pending_op
= PENDING_FREE
;
287 dump_write(dmu_sendarg_t
*dsp
, dmu_object_type_t type
, uint64_t object
,
288 uint64_t offset
, int lsize
, int psize
, const blkptr_t
*bp
, void *data
)
290 uint64_t payload_size
;
291 boolean_t raw
= (dsp
->dsa_featureflags
& DMU_BACKUP_FEATURE_RAW
);
292 struct drr_write
*drrw
= &(dsp
->dsa_drr
->drr_u
.drr_write
);
295 * We send data in increasing object, offset order.
296 * See comment in dump_free() for details.
298 ASSERT(object
> dsp
->dsa_last_data_object
||
299 (object
== dsp
->dsa_last_data_object
&&
300 offset
> dsp
->dsa_last_data_offset
));
301 dsp
->dsa_last_data_object
= object
;
302 dsp
->dsa_last_data_offset
= offset
+ lsize
- 1;
305 * If there is any kind of pending aggregation (currently either
306 * a grouping of free objects or free blocks), push it out to
307 * the stream, since aggregation can't be done across operations
308 * of different types.
310 if (dsp
->dsa_pending_op
!= PENDING_NONE
) {
311 if (dump_record(dsp
, NULL
, 0) != 0)
312 return (SET_ERROR(EINTR
));
313 dsp
->dsa_pending_op
= PENDING_NONE
;
315 /* write a WRITE record */
316 bzero(dsp
->dsa_drr
, sizeof (dmu_replay_record_t
));
317 dsp
->dsa_drr
->drr_type
= DRR_WRITE
;
318 drrw
->drr_object
= object
;
319 drrw
->drr_type
= type
;
320 drrw
->drr_offset
= offset
;
321 drrw
->drr_toguid
= dsp
->dsa_toguid
;
322 drrw
->drr_logical_size
= lsize
;
324 /* only set the compression fields if the buf is compressed or raw */
325 if (raw
|| lsize
!= psize
) {
326 ASSERT(!BP_IS_EMBEDDED(bp
));
327 ASSERT3S(psize
, >, 0);
330 ASSERT(BP_IS_PROTECTED(bp
));
333 * This is a raw protected block so we need to pass
334 * along everything the receiving side will need to
335 * interpret this block, including the byteswap, salt,
338 if (BP_SHOULD_BYTESWAP(bp
))
339 drrw
->drr_flags
|= DRR_RAW_BYTESWAP
;
340 zio_crypt_decode_params_bp(bp
, drrw
->drr_salt
,
342 zio_crypt_decode_mac_bp(bp
, drrw
->drr_mac
);
344 /* this is a compressed block */
345 ASSERT(dsp
->dsa_featureflags
&
346 DMU_BACKUP_FEATURE_COMPRESSED
);
347 ASSERT(!BP_SHOULD_BYTESWAP(bp
));
348 ASSERT(!DMU_OT_IS_METADATA(BP_GET_TYPE(bp
)));
349 ASSERT3U(BP_GET_COMPRESS(bp
), !=, ZIO_COMPRESS_OFF
);
350 ASSERT3S(lsize
, >=, psize
);
353 /* set fields common to compressed and raw sends */
354 drrw
->drr_compressiontype
= BP_GET_COMPRESS(bp
);
355 drrw
->drr_compressed_size
= psize
;
356 payload_size
= drrw
->drr_compressed_size
;
358 payload_size
= drrw
->drr_logical_size
;
361 if (bp
== NULL
|| BP_IS_EMBEDDED(bp
) || (BP_IS_PROTECTED(bp
) && !raw
)) {
363 * There's no pre-computed checksum for partial-block writes,
364 * embedded BP's, or encrypted BP's that are being sent as
365 * plaintext, so (like fletcher4-checkummed blocks) userland
366 * will have to compute a dedup-capable checksum itself.
368 drrw
->drr_checksumtype
= ZIO_CHECKSUM_OFF
;
370 drrw
->drr_checksumtype
= BP_GET_CHECKSUM(bp
);
371 if (zio_checksum_table
[drrw
->drr_checksumtype
].ci_flags
&
372 ZCHECKSUM_FLAG_DEDUP
)
373 drrw
->drr_flags
|= DRR_CHECKSUM_DEDUP
;
374 DDK_SET_LSIZE(&drrw
->drr_key
, BP_GET_LSIZE(bp
));
375 DDK_SET_PSIZE(&drrw
->drr_key
, BP_GET_PSIZE(bp
));
376 DDK_SET_COMPRESS(&drrw
->drr_key
, BP_GET_COMPRESS(bp
));
377 DDK_SET_CRYPT(&drrw
->drr_key
, BP_IS_PROTECTED(bp
));
378 drrw
->drr_key
.ddk_cksum
= bp
->blk_cksum
;
381 if (dump_record(dsp
, data
, payload_size
) != 0)
382 return (SET_ERROR(EINTR
));
387 dump_write_embedded(dmu_sendarg_t
*dsp
, uint64_t object
, uint64_t offset
,
388 int blksz
, const blkptr_t
*bp
)
390 char buf
[BPE_PAYLOAD_SIZE
];
391 struct drr_write_embedded
*drrw
=
392 &(dsp
->dsa_drr
->drr_u
.drr_write_embedded
);
394 if (dsp
->dsa_pending_op
!= PENDING_NONE
) {
395 if (dump_record(dsp
, NULL
, 0) != 0)
396 return (SET_ERROR(EINTR
));
397 dsp
->dsa_pending_op
= PENDING_NONE
;
400 ASSERT(BP_IS_EMBEDDED(bp
));
402 bzero(dsp
->dsa_drr
, sizeof (dmu_replay_record_t
));
403 dsp
->dsa_drr
->drr_type
= DRR_WRITE_EMBEDDED
;
404 drrw
->drr_object
= object
;
405 drrw
->drr_offset
= offset
;
406 drrw
->drr_length
= blksz
;
407 drrw
->drr_toguid
= dsp
->dsa_toguid
;
408 drrw
->drr_compression
= BP_GET_COMPRESS(bp
);
409 drrw
->drr_etype
= BPE_GET_ETYPE(bp
);
410 drrw
->drr_lsize
= BPE_GET_LSIZE(bp
);
411 drrw
->drr_psize
= BPE_GET_PSIZE(bp
);
413 decode_embedded_bp_compressed(bp
, buf
);
415 if (dump_record(dsp
, buf
, P2ROUNDUP(drrw
->drr_psize
, 8)) != 0)
416 return (SET_ERROR(EINTR
));
421 dump_spill(dmu_sendarg_t
*dsp
, const blkptr_t
*bp
, uint64_t object
, void *data
)
423 struct drr_spill
*drrs
= &(dsp
->dsa_drr
->drr_u
.drr_spill
);
424 uint64_t blksz
= BP_GET_LSIZE(bp
);
426 if (dsp
->dsa_pending_op
!= PENDING_NONE
) {
427 if (dump_record(dsp
, NULL
, 0) != 0)
428 return (SET_ERROR(EINTR
));
429 dsp
->dsa_pending_op
= PENDING_NONE
;
432 /* write a SPILL record */
433 bzero(dsp
->dsa_drr
, sizeof (dmu_replay_record_t
));
434 dsp
->dsa_drr
->drr_type
= DRR_SPILL
;
435 drrs
->drr_object
= object
;
436 drrs
->drr_length
= blksz
;
437 drrs
->drr_toguid
= dsp
->dsa_toguid
;
439 /* handle raw send fields */
440 if (dsp
->dsa_featureflags
& DMU_BACKUP_FEATURE_RAW
) {
441 ASSERT(BP_IS_PROTECTED(bp
));
443 if (BP_SHOULD_BYTESWAP(bp
))
444 drrs
->drr_flags
|= DRR_RAW_BYTESWAP
;
445 drrs
->drr_compressiontype
= BP_GET_COMPRESS(bp
);
446 drrs
->drr_compressed_size
= BP_GET_PSIZE(bp
);
447 zio_crypt_decode_params_bp(bp
, drrs
->drr_salt
, drrs
->drr_iv
);
448 zio_crypt_decode_mac_bp(bp
, drrs
->drr_mac
);
451 if (dump_record(dsp
, data
, blksz
) != 0)
452 return (SET_ERROR(EINTR
));
457 dump_freeobjects(dmu_sendarg_t
*dsp
, uint64_t firstobj
, uint64_t numobjs
)
459 struct drr_freeobjects
*drrfo
= &(dsp
->dsa_drr
->drr_u
.drr_freeobjects
);
460 uint64_t maxobj
= DNODES_PER_BLOCK
*
461 (DMU_META_DNODE(dsp
->dsa_os
)->dn_maxblkid
+ 1);
464 * ZoL < 0.7 does not handle large FREEOBJECTS records correctly,
465 * leading to zfs recv never completing. to avoid this issue, don't
466 * send FREEOBJECTS records for object IDs which cannot exist on the
470 if (maxobj
< firstobj
)
473 if (maxobj
< firstobj
+ numobjs
)
474 numobjs
= maxobj
- firstobj
;
478 * If there is a pending op, but it's not PENDING_FREEOBJECTS,
479 * push it out, since free block aggregation can only be done for
480 * blocks of the same type (i.e., DRR_FREE records can only be
481 * aggregated with other DRR_FREE records. DRR_FREEOBJECTS records
482 * can only be aggregated with other DRR_FREEOBJECTS records.
484 if (dsp
->dsa_pending_op
!= PENDING_NONE
&&
485 dsp
->dsa_pending_op
!= PENDING_FREEOBJECTS
) {
486 if (dump_record(dsp
, NULL
, 0) != 0)
487 return (SET_ERROR(EINTR
));
488 dsp
->dsa_pending_op
= PENDING_NONE
;
490 if (dsp
->dsa_pending_op
== PENDING_FREEOBJECTS
) {
492 * See whether this free object array can be aggregated
495 if (drrfo
->drr_firstobj
+ drrfo
->drr_numobjs
== firstobj
) {
496 drrfo
->drr_numobjs
+= numobjs
;
499 /* can't be aggregated. Push out pending record */
500 if (dump_record(dsp
, NULL
, 0) != 0)
501 return (SET_ERROR(EINTR
));
502 dsp
->dsa_pending_op
= PENDING_NONE
;
506 /* write a FREEOBJECTS record */
507 bzero(dsp
->dsa_drr
, sizeof (dmu_replay_record_t
));
508 dsp
->dsa_drr
->drr_type
= DRR_FREEOBJECTS
;
509 drrfo
->drr_firstobj
= firstobj
;
510 drrfo
->drr_numobjs
= numobjs
;
511 drrfo
->drr_toguid
= dsp
->dsa_toguid
;
513 dsp
->dsa_pending_op
= PENDING_FREEOBJECTS
;
519 dump_dnode(dmu_sendarg_t
*dsp
, const blkptr_t
*bp
, uint64_t object
,
522 struct drr_object
*drro
= &(dsp
->dsa_drr
->drr_u
.drr_object
);
525 if (object
< dsp
->dsa_resume_object
) {
527 * Note: when resuming, we will visit all the dnodes in
528 * the block of dnodes that we are resuming from. In
529 * this case it's unnecessary to send the dnodes prior to
530 * the one we are resuming from. We should be at most one
531 * block's worth of dnodes behind the resume point.
533 ASSERT3U(dsp
->dsa_resume_object
- object
, <,
534 1 << (DNODE_BLOCK_SHIFT
- DNODE_SHIFT
));
538 if (dnp
== NULL
|| dnp
->dn_type
== DMU_OT_NONE
)
539 return (dump_freeobjects(dsp
, object
, 1));
541 if (dsp
->dsa_pending_op
!= PENDING_NONE
) {
542 if (dump_record(dsp
, NULL
, 0) != 0)
543 return (SET_ERROR(EINTR
));
544 dsp
->dsa_pending_op
= PENDING_NONE
;
547 /* write an OBJECT record */
548 bzero(dsp
->dsa_drr
, sizeof (dmu_replay_record_t
));
549 dsp
->dsa_drr
->drr_type
= DRR_OBJECT
;
550 drro
->drr_object
= object
;
551 drro
->drr_type
= dnp
->dn_type
;
552 drro
->drr_bonustype
= dnp
->dn_bonustype
;
553 drro
->drr_blksz
= dnp
->dn_datablkszsec
<< SPA_MINBLOCKSHIFT
;
554 drro
->drr_bonuslen
= dnp
->dn_bonuslen
;
555 drro
->drr_dn_slots
= dnp
->dn_extra_slots
+ 1;
556 drro
->drr_checksumtype
= dnp
->dn_checksum
;
557 drro
->drr_compress
= dnp
->dn_compress
;
558 drro
->drr_toguid
= dsp
->dsa_toguid
;
560 if (!(dsp
->dsa_featureflags
& DMU_BACKUP_FEATURE_LARGE_BLOCKS
) &&
561 drro
->drr_blksz
> SPA_OLD_MAXBLOCKSIZE
)
562 drro
->drr_blksz
= SPA_OLD_MAXBLOCKSIZE
;
564 bonuslen
= P2ROUNDUP(dnp
->dn_bonuslen
, 8);
566 if ((dsp
->dsa_featureflags
& DMU_BACKUP_FEATURE_RAW
)) {
567 ASSERT(BP_IS_ENCRYPTED(bp
));
569 if (BP_SHOULD_BYTESWAP(bp
))
570 drro
->drr_flags
|= DRR_RAW_BYTESWAP
;
572 /* needed for reconstructing dnp on recv side */
573 drro
->drr_maxblkid
= dnp
->dn_maxblkid
;
574 drro
->drr_indblkshift
= dnp
->dn_indblkshift
;
575 drro
->drr_nlevels
= dnp
->dn_nlevels
;
576 drro
->drr_nblkptr
= dnp
->dn_nblkptr
;
579 * Since we encrypt the entire bonus area, the (raw) part
580 * beyond the bonuslen is actually nonzero, so we need
584 drro
->drr_raw_bonuslen
= DN_MAX_BONUS_LEN(dnp
);
585 bonuslen
= drro
->drr_raw_bonuslen
;
589 if (dump_record(dsp
, DN_BONUS(dnp
), bonuslen
) != 0)
590 return (SET_ERROR(EINTR
));
592 /* Free anything past the end of the file. */
593 if (dump_free(dsp
, object
, (dnp
->dn_maxblkid
+ 1) *
594 (dnp
->dn_datablkszsec
<< SPA_MINBLOCKSHIFT
), DMU_OBJECT_END
) != 0)
595 return (SET_ERROR(EINTR
));
596 if (dsp
->dsa_err
!= 0)
597 return (SET_ERROR(EINTR
));
602 dump_object_range(dmu_sendarg_t
*dsp
, const blkptr_t
*bp
, uint64_t firstobj
,
605 struct drr_object_range
*drror
=
606 &(dsp
->dsa_drr
->drr_u
.drr_object_range
);
608 /* we only use this record type for raw sends */
609 ASSERT(BP_IS_PROTECTED(bp
));
610 ASSERT(dsp
->dsa_featureflags
& DMU_BACKUP_FEATURE_RAW
);
611 ASSERT3U(BP_GET_COMPRESS(bp
), ==, ZIO_COMPRESS_OFF
);
612 ASSERT3U(BP_GET_TYPE(bp
), ==, DMU_OT_DNODE
);
613 ASSERT0(BP_GET_LEVEL(bp
));
615 if (dsp
->dsa_pending_op
!= PENDING_NONE
) {
616 if (dump_record(dsp
, NULL
, 0) != 0)
617 return (SET_ERROR(EINTR
));
618 dsp
->dsa_pending_op
= PENDING_NONE
;
621 bzero(dsp
->dsa_drr
, sizeof (dmu_replay_record_t
));
622 dsp
->dsa_drr
->drr_type
= DRR_OBJECT_RANGE
;
623 drror
->drr_firstobj
= firstobj
;
624 drror
->drr_numslots
= numslots
;
625 drror
->drr_toguid
= dsp
->dsa_toguid
;
626 if (BP_SHOULD_BYTESWAP(bp
))
627 drror
->drr_flags
|= DRR_RAW_BYTESWAP
;
628 zio_crypt_decode_params_bp(bp
, drror
->drr_salt
, drror
->drr_iv
);
629 zio_crypt_decode_mac_bp(bp
, drror
->drr_mac
);
631 if (dump_record(dsp
, NULL
, 0) != 0)
632 return (SET_ERROR(EINTR
));
637 backup_do_embed(dmu_sendarg_t
*dsp
, const blkptr_t
*bp
)
639 if (!BP_IS_EMBEDDED(bp
))
643 * Compression function must be legacy, or explicitly enabled.
645 if ((BP_GET_COMPRESS(bp
) >= ZIO_COMPRESS_LEGACY_FUNCTIONS
&&
646 !(dsp
->dsa_featureflags
& DMU_BACKUP_FEATURE_LZ4
)))
650 * Embed type must be explicitly enabled.
652 switch (BPE_GET_ETYPE(bp
)) {
653 case BP_EMBEDDED_TYPE_DATA
:
654 if (dsp
->dsa_featureflags
& DMU_BACKUP_FEATURE_EMBED_DATA
)
664 * This is the callback function to traverse_dataset that acts as the worker
665 * thread for dmu_send_impl.
669 send_cb(spa_t
*spa
, zilog_t
*zilog
, const blkptr_t
*bp
,
670 const zbookmark_phys_t
*zb
, const struct dnode_phys
*dnp
, void *arg
)
672 struct send_thread_arg
*sta
= arg
;
673 struct send_block_record
*record
;
674 uint64_t record_size
;
677 ASSERT(zb
->zb_object
== DMU_META_DNODE_OBJECT
||
678 zb
->zb_object
>= sta
->resume
.zb_object
);
679 ASSERT3P(sta
->ds
, !=, NULL
);
682 return (SET_ERROR(EINTR
));
685 ASSERT3U(zb
->zb_level
, ==, ZB_DNODE_LEVEL
);
687 } else if (zb
->zb_level
< 0) {
691 record
= kmem_zalloc(sizeof (struct send_block_record
), KM_SLEEP
);
692 record
->eos_marker
= B_FALSE
;
695 record
->indblkshift
= dnp
->dn_indblkshift
;
696 record
->datablkszsec
= dnp
->dn_datablkszsec
;
697 record_size
= dnp
->dn_datablkszsec
<< SPA_MINBLOCKSHIFT
;
698 bqueue_enqueue(&sta
->q
, record
, record_size
);
704 * This function kicks off the traverse_dataset. It also handles setting the
705 * error code of the thread in case something goes wrong, and pushes the End of
706 * Stream record when the traverse_dataset call has finished. If there is no
707 * dataset to traverse, the thread immediately pushes End of Stream marker.
710 send_traverse_thread(void *arg
)
712 struct send_thread_arg
*st_arg
= arg
;
714 struct send_block_record
*data
;
715 fstrans_cookie_t cookie
= spl_fstrans_mark();
717 if (st_arg
->ds
!= NULL
) {
718 err
= traverse_dataset_resume(st_arg
->ds
,
719 st_arg
->fromtxg
, &st_arg
->resume
,
720 st_arg
->flags
, send_cb
, st_arg
);
723 st_arg
->error_code
= err
;
725 data
= kmem_zalloc(sizeof (*data
), KM_SLEEP
);
726 data
->eos_marker
= B_TRUE
;
727 bqueue_enqueue(&st_arg
->q
, data
, 1);
728 spl_fstrans_unmark(cookie
);
733 * This function actually handles figuring out what kind of record needs to be
734 * dumped, reading the data (which has hopefully been prefetched), and calling
735 * the appropriate helper function.
738 do_dump(dmu_sendarg_t
*dsa
, struct send_block_record
*data
)
740 dsl_dataset_t
*ds
= dmu_objset_ds(dsa
->dsa_os
);
741 const blkptr_t
*bp
= &data
->bp
;
742 const zbookmark_phys_t
*zb
= &data
->zb
;
743 uint8_t indblkshift
= data
->indblkshift
;
744 uint16_t dblkszsec
= data
->datablkszsec
;
745 spa_t
*spa
= ds
->ds_dir
->dd_pool
->dp_spa
;
746 dmu_object_type_t type
= bp
? BP_GET_TYPE(bp
) : DMU_OT_NONE
;
749 ASSERT3U(zb
->zb_level
, >=, 0);
751 ASSERT(zb
->zb_object
== DMU_META_DNODE_OBJECT
||
752 zb
->zb_object
>= dsa
->dsa_resume_object
);
755 * All bps of an encrypted os should have the encryption bit set.
756 * If this is not true it indicates tampering and we report an error.
758 if (dsa
->dsa_os
->os_encrypted
&&
759 !BP_IS_HOLE(bp
) && !BP_USES_CRYPT(bp
)) {
760 spa_log_error(spa
, zb
);
761 zfs_panic_recover("unencrypted block in encrypted "
762 "object set %llu", ds
->ds_object
);
763 return (SET_ERROR(EIO
));
766 if (zb
->zb_object
!= DMU_META_DNODE_OBJECT
&&
767 DMU_OBJECT_IS_SPECIAL(zb
->zb_object
)) {
769 } else if (BP_IS_HOLE(bp
) &&
770 zb
->zb_object
== DMU_META_DNODE_OBJECT
) {
771 uint64_t span
= BP_SPAN(dblkszsec
, indblkshift
, zb
->zb_level
);
772 uint64_t dnobj
= (zb
->zb_blkid
* span
) >> DNODE_SHIFT
;
773 err
= dump_freeobjects(dsa
, dnobj
, span
>> DNODE_SHIFT
);
774 } else if (BP_IS_HOLE(bp
)) {
775 uint64_t span
= BP_SPAN(dblkszsec
, indblkshift
, zb
->zb_level
);
776 uint64_t offset
= zb
->zb_blkid
* span
;
777 /* Don't dump free records for offsets > DMU_OBJECT_END */
778 if (zb
->zb_blkid
== 0 || span
<= DMU_OBJECT_END
/ zb
->zb_blkid
)
779 err
= dump_free(dsa
, zb
->zb_object
, offset
, span
);
780 } else if (zb
->zb_level
> 0 || type
== DMU_OT_OBJSET
) {
782 } else if (type
== DMU_OT_DNODE
) {
783 int epb
= BP_GET_LSIZE(bp
) >> DNODE_SHIFT
;
784 arc_flags_t aflags
= ARC_FLAG_WAIT
;
786 enum zio_flag zioflags
= ZIO_FLAG_CANFAIL
;
788 if (dsa
->dsa_featureflags
& DMU_BACKUP_FEATURE_RAW
) {
789 ASSERT(BP_IS_ENCRYPTED(bp
));
790 ASSERT3U(BP_GET_COMPRESS(bp
), ==, ZIO_COMPRESS_OFF
);
791 zioflags
|= ZIO_FLAG_RAW
;
794 ASSERT0(zb
->zb_level
);
796 if (arc_read(NULL
, spa
, bp
, arc_getbuf_func
, &abuf
,
797 ZIO_PRIORITY_ASYNC_READ
, zioflags
, &aflags
, zb
) != 0)
798 return (SET_ERROR(EIO
));
800 dnode_phys_t
*blk
= abuf
->b_data
;
801 uint64_t dnobj
= zb
->zb_blkid
* epb
;
804 * Raw sends require sending encryption parameters for the
805 * block of dnodes. Regular sends do not need to send this
808 if (dsa
->dsa_featureflags
& DMU_BACKUP_FEATURE_RAW
) {
809 ASSERT(arc_is_encrypted(abuf
));
810 err
= dump_object_range(dsa
, bp
, dnobj
, epb
);
814 for (int i
= 0; i
< epb
;
815 i
+= blk
[i
].dn_extra_slots
+ 1) {
816 err
= dump_dnode(dsa
, bp
, dnobj
+ i
, blk
+ i
);
821 arc_buf_destroy(abuf
, &abuf
);
822 } else if (type
== DMU_OT_SA
) {
823 arc_flags_t aflags
= ARC_FLAG_WAIT
;
825 enum zio_flag zioflags
= ZIO_FLAG_CANFAIL
;
827 if (dsa
->dsa_featureflags
& DMU_BACKUP_FEATURE_RAW
) {
828 ASSERT(BP_IS_PROTECTED(bp
));
829 zioflags
|= ZIO_FLAG_RAW
;
832 if (arc_read(NULL
, spa
, bp
, arc_getbuf_func
, &abuf
,
833 ZIO_PRIORITY_ASYNC_READ
, zioflags
, &aflags
, zb
) != 0)
834 return (SET_ERROR(EIO
));
836 err
= dump_spill(dsa
, bp
, zb
->zb_object
, abuf
->b_data
);
837 arc_buf_destroy(abuf
, &abuf
);
838 } else if (backup_do_embed(dsa
, bp
)) {
839 /* it's an embedded level-0 block of a regular object */
840 int blksz
= dblkszsec
<< SPA_MINBLOCKSHIFT
;
841 ASSERT0(zb
->zb_level
);
842 err
= dump_write_embedded(dsa
, zb
->zb_object
,
843 zb
->zb_blkid
* blksz
, blksz
, bp
);
845 /* it's a level-0 block of a regular object */
846 arc_flags_t aflags
= ARC_FLAG_WAIT
;
848 int blksz
= dblkszsec
<< SPA_MINBLOCKSHIFT
;
852 * If we have large blocks stored on disk but the send flags
853 * don't allow us to send large blocks, we split the data from
854 * the arc buf into chunks.
856 boolean_t split_large_blocks
= blksz
> SPA_OLD_MAXBLOCKSIZE
&&
857 !(dsa
->dsa_featureflags
& DMU_BACKUP_FEATURE_LARGE_BLOCKS
);
860 * Raw sends require that we always get raw data as it exists
861 * on disk, so we assert that we are not splitting blocks here.
863 boolean_t request_raw
=
864 (dsa
->dsa_featureflags
& DMU_BACKUP_FEATURE_RAW
) != 0;
867 * We should only request compressed data from the ARC if all
868 * the following are true:
869 * - stream compression was requested
870 * - we aren't splitting large blocks into smaller chunks
871 * - the data won't need to be byteswapped before sending
872 * - this isn't an embedded block
873 * - this isn't metadata (if receiving on a different endian
874 * system it can be byteswapped more easily)
876 boolean_t request_compressed
=
877 (dsa
->dsa_featureflags
& DMU_BACKUP_FEATURE_COMPRESSED
) &&
878 !split_large_blocks
&& !BP_SHOULD_BYTESWAP(bp
) &&
879 !BP_IS_EMBEDDED(bp
) && !DMU_OT_IS_METADATA(BP_GET_TYPE(bp
));
881 IMPLY(request_raw
, !split_large_blocks
);
882 IMPLY(request_raw
, BP_IS_PROTECTED(bp
));
883 ASSERT0(zb
->zb_level
);
884 ASSERT(zb
->zb_object
> dsa
->dsa_resume_object
||
885 (zb
->zb_object
== dsa
->dsa_resume_object
&&
886 zb
->zb_blkid
* blksz
>= dsa
->dsa_resume_offset
));
888 ASSERT3U(blksz
, ==, BP_GET_LSIZE(bp
));
890 enum zio_flag zioflags
= ZIO_FLAG_CANFAIL
;
892 zioflags
|= ZIO_FLAG_RAW
;
893 else if (request_compressed
)
894 zioflags
|= ZIO_FLAG_RAW_COMPRESS
;
896 if (arc_read(NULL
, spa
, bp
, arc_getbuf_func
, &abuf
,
897 ZIO_PRIORITY_ASYNC_READ
, zioflags
, &aflags
, zb
) != 0) {
898 if (zfs_send_corrupt_data
) {
899 /* Send a block filled with 0x"zfs badd bloc" */
900 abuf
= arc_alloc_buf(spa
, &abuf
, ARC_BUFC_DATA
,
903 for (ptr
= abuf
->b_data
;
904 (char *)ptr
< (char *)abuf
->b_data
+ blksz
;
906 *ptr
= 0x2f5baddb10cULL
;
908 return (SET_ERROR(EIO
));
912 offset
= zb
->zb_blkid
* blksz
;
914 if (split_large_blocks
) {
915 ASSERT0(arc_is_encrypted(abuf
));
916 ASSERT3U(arc_get_compression(abuf
), ==,
918 char *buf
= abuf
->b_data
;
919 while (blksz
> 0 && err
== 0) {
920 int n
= MIN(blksz
, SPA_OLD_MAXBLOCKSIZE
);
921 err
= dump_write(dsa
, type
, zb
->zb_object
,
922 offset
, n
, n
, NULL
, buf
);
928 err
= dump_write(dsa
, type
, zb
->zb_object
, offset
,
929 blksz
, arc_buf_size(abuf
), bp
, abuf
->b_data
);
931 arc_buf_destroy(abuf
, &abuf
);
934 ASSERT(err
== 0 || err
== EINTR
);
939 * Pop the new data off the queue, and free the old data.
941 static struct send_block_record
*
942 get_next_record(bqueue_t
*bq
, struct send_block_record
*data
)
944 struct send_block_record
*tmp
= bqueue_dequeue(bq
);
945 kmem_free(data
, sizeof (*data
));
950 * Actually do the bulk of the work in a zfs send.
952 * Note: Releases dp using the specified tag.
955 dmu_send_impl(void *tag
, dsl_pool_t
*dp
, dsl_dataset_t
*to_ds
,
956 zfs_bookmark_phys_t
*ancestor_zb
, boolean_t is_clone
,
957 boolean_t embedok
, boolean_t large_block_ok
, boolean_t compressok
,
958 boolean_t rawok
, int outfd
, uint64_t resumeobj
, uint64_t resumeoff
,
959 vnode_t
*vp
, offset_t
*off
)
962 dmu_replay_record_t
*drr
;
965 uint64_t fromtxg
= 0;
966 uint64_t featureflags
= 0;
967 struct send_thread_arg to_arg
;
968 void *payload
= NULL
;
969 size_t payload_len
= 0;
970 struct send_block_record
*to_data
;
972 err
= dmu_objset_from_ds(to_ds
, &os
);
974 dsl_pool_rele(dp
, tag
);
979 * If this is a non-raw send of an encrypted ds, we can ensure that
980 * the objset_phys_t is authenticated. This is safe because this is
981 * either a snapshot or we have owned the dataset, ensuring that
982 * it can't be modified.
984 if (!rawok
&& os
->os_encrypted
&&
985 arc_is_unauthenticated(os
->os_phys_buf
)) {
988 SET_BOOKMARK(&zb
, to_ds
->ds_object
, ZB_ROOT_OBJECT
,
989 ZB_ROOT_LEVEL
, ZB_ROOT_BLKID
);
990 err
= arc_untransform(os
->os_phys_buf
, os
->os_spa
,
993 dsl_pool_rele(dp
, tag
);
997 ASSERT0(arc_is_unauthenticated(os
->os_phys_buf
));
1000 drr
= kmem_zalloc(sizeof (dmu_replay_record_t
), KM_SLEEP
);
1001 drr
->drr_type
= DRR_BEGIN
;
1002 drr
->drr_u
.drr_begin
.drr_magic
= DMU_BACKUP_MAGIC
;
1003 DMU_SET_STREAM_HDRTYPE(drr
->drr_u
.drr_begin
.drr_versioninfo
,
1006 bzero(&to_arg
, sizeof (to_arg
));
1009 if (dmu_objset_type(os
) == DMU_OST_ZFS
) {
1011 if (zfs_get_zplprop(os
, ZFS_PROP_VERSION
, &version
) != 0) {
1012 kmem_free(drr
, sizeof (dmu_replay_record_t
));
1013 dsl_pool_rele(dp
, tag
);
1014 return (SET_ERROR(EINVAL
));
1016 if (version
>= ZPL_VERSION_SA
) {
1017 featureflags
|= DMU_BACKUP_FEATURE_SA_SPILL
;
1022 /* raw sends imply large_block_ok */
1023 if ((large_block_ok
|| rawok
) &&
1024 to_ds
->ds_feature_inuse
[SPA_FEATURE_LARGE_BLOCKS
])
1025 featureflags
|= DMU_BACKUP_FEATURE_LARGE_BLOCKS
;
1026 if (to_ds
->ds_feature_inuse
[SPA_FEATURE_LARGE_DNODE
])
1027 featureflags
|= DMU_BACKUP_FEATURE_LARGE_DNODE
;
1029 /* encrypted datasets will not have embedded blocks */
1030 if ((embedok
|| rawok
) && !os
->os_encrypted
&&
1031 spa_feature_is_active(dp
->dp_spa
, SPA_FEATURE_EMBEDDED_DATA
)) {
1032 featureflags
|= DMU_BACKUP_FEATURE_EMBED_DATA
;
1035 /* raw send implies compressok */
1036 if (compressok
|| rawok
)
1037 featureflags
|= DMU_BACKUP_FEATURE_COMPRESSED
;
1038 if (rawok
&& os
->os_encrypted
)
1039 featureflags
|= DMU_BACKUP_FEATURE_RAW
;
1042 (DMU_BACKUP_FEATURE_EMBED_DATA
| DMU_BACKUP_FEATURE_COMPRESSED
|
1043 DMU_BACKUP_FEATURE_RAW
)) != 0 &&
1044 spa_feature_is_active(dp
->dp_spa
, SPA_FEATURE_LZ4_COMPRESS
)) {
1045 featureflags
|= DMU_BACKUP_FEATURE_LZ4
;
1048 if (resumeobj
!= 0 || resumeoff
!= 0) {
1049 featureflags
|= DMU_BACKUP_FEATURE_RESUMING
;
1052 DMU_SET_FEATUREFLAGS(drr
->drr_u
.drr_begin
.drr_versioninfo
,
1055 drr
->drr_u
.drr_begin
.drr_creation_time
=
1056 dsl_dataset_phys(to_ds
)->ds_creation_time
;
1057 drr
->drr_u
.drr_begin
.drr_type
= dmu_objset_type(os
);
1059 drr
->drr_u
.drr_begin
.drr_flags
|= DRR_FLAG_CLONE
;
1060 drr
->drr_u
.drr_begin
.drr_toguid
= dsl_dataset_phys(to_ds
)->ds_guid
;
1061 if (dsl_dataset_phys(to_ds
)->ds_flags
& DS_FLAG_CI_DATASET
)
1062 drr
->drr_u
.drr_begin
.drr_flags
|= DRR_FLAG_CI_DATA
;
1063 if (zfs_send_set_freerecords_bit
)
1064 drr
->drr_u
.drr_begin
.drr_flags
|= DRR_FLAG_FREERECORDS
;
1066 if (ancestor_zb
!= NULL
) {
1067 drr
->drr_u
.drr_begin
.drr_fromguid
=
1068 ancestor_zb
->zbm_guid
;
1069 fromtxg
= ancestor_zb
->zbm_creation_txg
;
1071 dsl_dataset_name(to_ds
, drr
->drr_u
.drr_begin
.drr_toname
);
1072 if (!to_ds
->ds_is_snapshot
) {
1073 (void) strlcat(drr
->drr_u
.drr_begin
.drr_toname
, "@--head--",
1074 sizeof (drr
->drr_u
.drr_begin
.drr_toname
));
1077 dsp
= kmem_zalloc(sizeof (dmu_sendarg_t
), KM_SLEEP
);
1081 dsp
->dsa_outfd
= outfd
;
1082 dsp
->dsa_proc
= curproc
;
1085 dsp
->dsa_toguid
= dsl_dataset_phys(to_ds
)->ds_guid
;
1086 dsp
->dsa_pending_op
= PENDING_NONE
;
1087 dsp
->dsa_featureflags
= featureflags
;
1088 dsp
->dsa_resume_object
= resumeobj
;
1089 dsp
->dsa_resume_offset
= resumeoff
;
1091 mutex_enter(&to_ds
->ds_sendstream_lock
);
1092 list_insert_head(&to_ds
->ds_sendstreams
, dsp
);
1093 mutex_exit(&to_ds
->ds_sendstream_lock
);
1095 dsl_dataset_long_hold(to_ds
, FTAG
);
1096 dsl_pool_rele(dp
, tag
);
1098 /* handle features that require a DRR_BEGIN payload */
1100 (DMU_BACKUP_FEATURE_RESUMING
| DMU_BACKUP_FEATURE_RAW
)) {
1101 nvlist_t
*keynvl
= NULL
;
1102 nvlist_t
*nvl
= fnvlist_alloc();
1104 if (featureflags
& DMU_BACKUP_FEATURE_RESUMING
) {
1105 dmu_object_info_t to_doi
;
1106 err
= dmu_object_info(os
, resumeobj
, &to_doi
);
1112 SET_BOOKMARK(&to_arg
.resume
, to_ds
->ds_object
,
1114 resumeoff
/ to_doi
.doi_data_block_size
);
1116 fnvlist_add_uint64(nvl
, "resume_object", resumeobj
);
1117 fnvlist_add_uint64(nvl
, "resume_offset", resumeoff
);
1120 if (featureflags
& DMU_BACKUP_FEATURE_RAW
) {
1121 ASSERT(os
->os_encrypted
);
1123 err
= dsl_crypto_populate_key_nvlist(to_ds
, &keynvl
);
1129 fnvlist_add_nvlist(nvl
, "crypt_keydata", keynvl
);
1132 payload
= fnvlist_pack(nvl
, &payload_len
);
1133 drr
->drr_payloadlen
= payload_len
;
1134 fnvlist_free(keynvl
);
1138 err
= dump_record(dsp
, payload
, payload_len
);
1139 fnvlist_pack_free(payload
, payload_len
);
1145 err
= bqueue_init(&to_arg
.q
, zfs_send_queue_length
,
1146 offsetof(struct send_block_record
, ln
));
1147 to_arg
.error_code
= 0;
1148 to_arg
.cancel
= B_FALSE
;
1150 to_arg
.fromtxg
= fromtxg
;
1151 to_arg
.flags
= TRAVERSE_PRE
| TRAVERSE_PREFETCH
;
1153 to_arg
.flags
|= TRAVERSE_NO_DECRYPT
;
1154 (void) thread_create(NULL
, 0, send_traverse_thread
, &to_arg
, 0, curproc
,
1155 TS_RUN
, minclsyspri
);
1157 to_data
= bqueue_dequeue(&to_arg
.q
);
1159 while (!to_data
->eos_marker
&& err
== 0) {
1160 err
= do_dump(dsp
, to_data
);
1161 to_data
= get_next_record(&to_arg
.q
, to_data
);
1162 if (issig(JUSTLOOKING
) && issig(FORREAL
))
1167 to_arg
.cancel
= B_TRUE
;
1168 while (!to_data
->eos_marker
) {
1169 to_data
= get_next_record(&to_arg
.q
, to_data
);
1172 kmem_free(to_data
, sizeof (*to_data
));
1174 bqueue_destroy(&to_arg
.q
);
1176 if (err
== 0 && to_arg
.error_code
!= 0)
1177 err
= to_arg
.error_code
;
1182 if (dsp
->dsa_pending_op
!= PENDING_NONE
)
1183 if (dump_record(dsp
, NULL
, 0) != 0)
1184 err
= SET_ERROR(EINTR
);
1187 if (err
== EINTR
&& dsp
->dsa_err
!= 0)
1192 bzero(drr
, sizeof (dmu_replay_record_t
));
1193 drr
->drr_type
= DRR_END
;
1194 drr
->drr_u
.drr_end
.drr_checksum
= dsp
->dsa_zc
;
1195 drr
->drr_u
.drr_end
.drr_toguid
= dsp
->dsa_toguid
;
1197 if (dump_record(dsp
, NULL
, 0) != 0)
1200 mutex_enter(&to_ds
->ds_sendstream_lock
);
1201 list_remove(&to_ds
->ds_sendstreams
, dsp
);
1202 mutex_exit(&to_ds
->ds_sendstream_lock
);
1204 VERIFY(err
!= 0 || (dsp
->dsa_sent_begin
&& dsp
->dsa_sent_end
));
1206 kmem_free(drr
, sizeof (dmu_replay_record_t
));
1207 kmem_free(dsp
, sizeof (dmu_sendarg_t
));
1209 dsl_dataset_long_rele(to_ds
, FTAG
);
1215 dmu_send_obj(const char *pool
, uint64_t tosnap
, uint64_t fromsnap
,
1216 boolean_t embedok
, boolean_t large_block_ok
, boolean_t compressok
,
1217 boolean_t rawok
, int outfd
, vnode_t
*vp
, offset_t
*off
)
1221 dsl_dataset_t
*fromds
= NULL
;
1222 ds_hold_flags_t dsflags
= (rawok
) ? 0 : DS_HOLD_FLAG_DECRYPT
;
1225 err
= dsl_pool_hold(pool
, FTAG
, &dp
);
1229 err
= dsl_dataset_hold_obj_flags(dp
, tosnap
, dsflags
, FTAG
, &ds
);
1231 dsl_pool_rele(dp
, FTAG
);
1235 if (fromsnap
!= 0) {
1236 zfs_bookmark_phys_t zb
;
1239 err
= dsl_dataset_hold_obj(dp
, fromsnap
, FTAG
, &fromds
);
1241 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
1242 dsl_pool_rele(dp
, FTAG
);
1245 if (!dsl_dataset_is_before(ds
, fromds
, 0))
1246 err
= SET_ERROR(EXDEV
);
1247 zb
.zbm_creation_time
=
1248 dsl_dataset_phys(fromds
)->ds_creation_time
;
1249 zb
.zbm_creation_txg
= dsl_dataset_phys(fromds
)->ds_creation_txg
;
1250 zb
.zbm_guid
= dsl_dataset_phys(fromds
)->ds_guid
;
1251 is_clone
= (fromds
->ds_dir
!= ds
->ds_dir
);
1252 dsl_dataset_rele(fromds
, FTAG
);
1253 err
= dmu_send_impl(FTAG
, dp
, ds
, &zb
, is_clone
,
1254 embedok
, large_block_ok
, compressok
, rawok
, outfd
,
1257 err
= dmu_send_impl(FTAG
, dp
, ds
, NULL
, B_FALSE
,
1258 embedok
, large_block_ok
, compressok
, rawok
, outfd
,
1261 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
1266 dmu_send(const char *tosnap
, const char *fromsnap
, boolean_t embedok
,
1267 boolean_t large_block_ok
, boolean_t compressok
, boolean_t rawok
,
1268 int outfd
, uint64_t resumeobj
, uint64_t resumeoff
, vnode_t
*vp
,
1274 ds_hold_flags_t dsflags
= (rawok
) ? 0 : DS_HOLD_FLAG_DECRYPT
;
1275 boolean_t owned
= B_FALSE
;
1277 if (fromsnap
!= NULL
&& strpbrk(fromsnap
, "@#") == NULL
)
1278 return (SET_ERROR(EINVAL
));
1280 err
= dsl_pool_hold(tosnap
, FTAG
, &dp
);
1284 if (strchr(tosnap
, '@') == NULL
&& spa_writeable(dp
->dp_spa
)) {
1286 * We are sending a filesystem or volume. Ensure
1287 * that it doesn't change by owning the dataset.
1289 err
= dsl_dataset_own(dp
, tosnap
, dsflags
, FTAG
, &ds
);
1292 err
= dsl_dataset_hold_flags(dp
, tosnap
, dsflags
, FTAG
, &ds
);
1295 dsl_pool_rele(dp
, FTAG
);
1299 if (fromsnap
!= NULL
) {
1300 zfs_bookmark_phys_t zb
;
1301 boolean_t is_clone
= B_FALSE
;
1302 int fsnamelen
= strchr(tosnap
, '@') - tosnap
;
1305 * If the fromsnap is in a different filesystem, then
1306 * mark the send stream as a clone.
1308 if (strncmp(tosnap
, fromsnap
, fsnamelen
) != 0 ||
1309 (fromsnap
[fsnamelen
] != '@' &&
1310 fromsnap
[fsnamelen
] != '#')) {
1314 if (strchr(fromsnap
, '@')) {
1315 dsl_dataset_t
*fromds
;
1316 err
= dsl_dataset_hold(dp
, fromsnap
, FTAG
, &fromds
);
1318 if (!dsl_dataset_is_before(ds
, fromds
, 0))
1319 err
= SET_ERROR(EXDEV
);
1320 zb
.zbm_creation_time
=
1321 dsl_dataset_phys(fromds
)->ds_creation_time
;
1322 zb
.zbm_creation_txg
=
1323 dsl_dataset_phys(fromds
)->ds_creation_txg
;
1324 zb
.zbm_guid
= dsl_dataset_phys(fromds
)->ds_guid
;
1325 is_clone
= (ds
->ds_dir
!= fromds
->ds_dir
);
1326 dsl_dataset_rele(fromds
, FTAG
);
1329 err
= dsl_bookmark_lookup(dp
, fromsnap
, ds
, &zb
);
1333 dsl_dataset_disown(ds
, dsflags
, FTAG
);
1335 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
1337 dsl_pool_rele(dp
, FTAG
);
1340 err
= dmu_send_impl(FTAG
, dp
, ds
, &zb
, is_clone
,
1341 embedok
, large_block_ok
, compressok
, rawok
,
1342 outfd
, resumeobj
, resumeoff
, vp
, off
);
1344 err
= dmu_send_impl(FTAG
, dp
, ds
, NULL
, B_FALSE
,
1345 embedok
, large_block_ok
, compressok
, rawok
,
1346 outfd
, resumeobj
, resumeoff
, vp
, off
);
1349 dsl_dataset_disown(ds
, dsflags
, FTAG
);
1351 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
1357 dmu_adjust_send_estimate_for_indirects(dsl_dataset_t
*ds
, uint64_t uncompressed
,
1358 uint64_t compressed
, boolean_t stream_compressed
, uint64_t *sizep
)
1363 * Assume that space (both on-disk and in-stream) is dominated by
1364 * data. We will adjust for indirect blocks and the copies property,
1365 * but ignore per-object space used (eg, dnodes and DRR_OBJECT records).
1368 uint64_t recordsize
;
1369 uint64_t record_count
;
1371 VERIFY0(dmu_objset_from_ds(ds
, &os
));
1373 /* Assume all (uncompressed) blocks are recordsize. */
1374 if (os
->os_phys
->os_type
== DMU_OST_ZVOL
) {
1375 err
= dsl_prop_get_int_ds(ds
,
1376 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE
), &recordsize
);
1378 err
= dsl_prop_get_int_ds(ds
,
1379 zfs_prop_to_name(ZFS_PROP_RECORDSIZE
), &recordsize
);
1383 record_count
= uncompressed
/ recordsize
;
1386 * If we're estimating a send size for a compressed stream, use the
1387 * compressed data size to estimate the stream size. Otherwise, use the
1388 * uncompressed data size.
1390 size
= stream_compressed
? compressed
: uncompressed
;
1393 * Subtract out approximate space used by indirect blocks.
1394 * Assume most space is used by data blocks (non-indirect, non-dnode).
1395 * Assume no ditto blocks or internal fragmentation.
1397 * Therefore, space used by indirect blocks is sizeof(blkptr_t) per
1400 size
-= record_count
* sizeof (blkptr_t
);
1402 /* Add in the space for the record associated with each block. */
1403 size
+= record_count
* sizeof (dmu_replay_record_t
);
1411 dmu_send_estimate(dsl_dataset_t
*ds
, dsl_dataset_t
*fromds
,
1412 boolean_t stream_compressed
, uint64_t *sizep
)
1415 uint64_t uncomp
, comp
;
1417 ASSERT(dsl_pool_config_held(ds
->ds_dir
->dd_pool
));
1419 /* tosnap must be a snapshot */
1420 if (!ds
->ds_is_snapshot
)
1421 return (SET_ERROR(EINVAL
));
1423 /* fromsnap, if provided, must be a snapshot */
1424 if (fromds
!= NULL
&& !fromds
->ds_is_snapshot
)
1425 return (SET_ERROR(EINVAL
));
1428 * fromsnap must be an earlier snapshot from the same fs as tosnap,
1429 * or the origin's fs.
1431 if (fromds
!= NULL
&& !dsl_dataset_is_before(ds
, fromds
, 0))
1432 return (SET_ERROR(EXDEV
));
1434 /* Get compressed and uncompressed size estimates of changed data. */
1435 if (fromds
== NULL
) {
1436 uncomp
= dsl_dataset_phys(ds
)->ds_uncompressed_bytes
;
1437 comp
= dsl_dataset_phys(ds
)->ds_compressed_bytes
;
1440 err
= dsl_dataset_space_written(fromds
, ds
,
1441 &used
, &comp
, &uncomp
);
1446 err
= dmu_adjust_send_estimate_for_indirects(ds
, uncomp
, comp
,
1447 stream_compressed
, sizep
);
1449 * Add the size of the BEGIN and END records to the estimate.
1451 *sizep
+= 2 * sizeof (dmu_replay_record_t
);
1455 struct calculate_send_arg
{
1456 uint64_t uncompressed
;
1457 uint64_t compressed
;
1461 * Simple callback used to traverse the blocks of a snapshot and sum their
1462 * uncompressed and compressed sizes.
1466 dmu_calculate_send_traversal(spa_t
*spa
, zilog_t
*zilog
, const blkptr_t
*bp
,
1467 const zbookmark_phys_t
*zb
, const dnode_phys_t
*dnp
, void *arg
)
1469 struct calculate_send_arg
*space
= arg
;
1470 if (bp
!= NULL
&& !BP_IS_HOLE(bp
)) {
1471 space
->uncompressed
+= BP_GET_UCSIZE(bp
);
1472 space
->compressed
+= BP_GET_PSIZE(bp
);
1478 * Given a desination snapshot and a TXG, calculate the approximate size of a
1479 * send stream sent from that TXG. from_txg may be zero, indicating that the
1480 * whole snapshot will be sent.
1483 dmu_send_estimate_from_txg(dsl_dataset_t
*ds
, uint64_t from_txg
,
1484 boolean_t stream_compressed
, uint64_t *sizep
)
1487 struct calculate_send_arg size
= { 0 };
1489 ASSERT(dsl_pool_config_held(ds
->ds_dir
->dd_pool
));
1491 /* tosnap must be a snapshot */
1492 if (!dsl_dataset_is_snapshot(ds
))
1493 return (SET_ERROR(EINVAL
));
1495 /* verify that from_txg is before the provided snapshot was taken */
1496 if (from_txg
>= dsl_dataset_phys(ds
)->ds_creation_txg
) {
1497 return (SET_ERROR(EXDEV
));
1500 * traverse the blocks of the snapshot with birth times after
1501 * from_txg, summing their uncompressed size
1503 err
= traverse_dataset(ds
, from_txg
,
1504 TRAVERSE_POST
| TRAVERSE_NO_DECRYPT
,
1505 dmu_calculate_send_traversal
, &size
);
1510 err
= dmu_adjust_send_estimate_for_indirects(ds
, size
.uncompressed
,
1511 size
.compressed
, stream_compressed
, sizep
);
1515 typedef struct dmu_recv_begin_arg
{
1516 const char *drba_origin
;
1517 dmu_recv_cookie_t
*drba_cookie
;
1519 uint64_t drba_snapobj
;
1520 } dmu_recv_begin_arg_t
;
1523 recv_begin_check_existing_impl(dmu_recv_begin_arg_t
*drba
, dsl_dataset_t
*ds
,
1528 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
1529 struct drr_begin
*drrb
= drba
->drba_cookie
->drc_drrb
;
1530 uint64_t featureflags
= DMU_GET_FEATUREFLAGS(drrb
->drr_versioninfo
);
1531 boolean_t encrypted
= ds
->ds_dir
->dd_crypto_obj
!= 0;
1532 boolean_t raw
= (featureflags
& DMU_BACKUP_FEATURE_RAW
) != 0;
1534 /* temporary clone name must not exist */
1535 error
= zap_lookup(dp
->dp_meta_objset
,
1536 dsl_dir_phys(ds
->ds_dir
)->dd_child_dir_zapobj
, recv_clone_name
,
1538 if (error
!= ENOENT
)
1539 return (error
== 0 ? EBUSY
: error
);
1541 /* new snapshot name must not exist */
1542 error
= zap_lookup(dp
->dp_meta_objset
,
1543 dsl_dataset_phys(ds
)->ds_snapnames_zapobj
,
1544 drba
->drba_cookie
->drc_tosnap
, 8, 1, &val
);
1545 if (error
!= ENOENT
)
1546 return (error
== 0 ? EEXIST
: error
);
1549 * Check snapshot limit before receiving. We'll recheck again at the
1550 * end, but might as well abort before receiving if we're already over
1553 * Note that we do not check the file system limit with
1554 * dsl_dir_fscount_check because the temporary %clones don't count
1555 * against that limit.
1557 error
= dsl_fs_ss_limit_check(ds
->ds_dir
, 1, ZFS_PROP_SNAPSHOT_LIMIT
,
1558 NULL
, drba
->drba_cred
);
1562 if (fromguid
!= 0) {
1563 dsl_dataset_t
*snap
;
1564 uint64_t obj
= dsl_dataset_phys(ds
)->ds_prev_snap_obj
;
1566 /* Can't perform a raw receive on top of a non-raw receive */
1567 if (!encrypted
&& raw
)
1568 return (SET_ERROR(EINVAL
));
1570 /* Find snapshot in this dir that matches fromguid. */
1572 error
= dsl_dataset_hold_obj(dp
, obj
, FTAG
,
1575 return (SET_ERROR(ENODEV
));
1576 if (snap
->ds_dir
!= ds
->ds_dir
) {
1577 dsl_dataset_rele(snap
, FTAG
);
1578 return (SET_ERROR(ENODEV
));
1580 if (dsl_dataset_phys(snap
)->ds_guid
== fromguid
)
1582 obj
= dsl_dataset_phys(snap
)->ds_prev_snap_obj
;
1583 dsl_dataset_rele(snap
, FTAG
);
1586 return (SET_ERROR(ENODEV
));
1588 if (drba
->drba_cookie
->drc_force
) {
1589 drba
->drba_snapobj
= obj
;
1592 * If we are not forcing, there must be no
1593 * changes since fromsnap.
1595 if (dsl_dataset_modified_since_snap(ds
, snap
)) {
1596 dsl_dataset_rele(snap
, FTAG
);
1597 return (SET_ERROR(ETXTBSY
));
1599 drba
->drba_snapobj
= ds
->ds_prev
->ds_object
;
1602 dsl_dataset_rele(snap
, FTAG
);
1604 /* if full, then must be forced */
1605 if (!drba
->drba_cookie
->drc_force
)
1606 return (SET_ERROR(EEXIST
));
1609 * We don't support using zfs recv -F to blow away
1610 * encrypted filesystems. This would require the
1611 * dsl dir to point to the old encryption key and
1612 * the new one at the same time during the receive.
1614 if ((!encrypted
&& raw
) || encrypted
)
1615 return (SET_ERROR(EINVAL
));
1617 drba
->drba_snapobj
= 0;
1625 dmu_recv_begin_check(void *arg
, dmu_tx_t
*tx
)
1627 dmu_recv_begin_arg_t
*drba
= arg
;
1628 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1629 struct drr_begin
*drrb
= drba
->drba_cookie
->drc_drrb
;
1630 uint64_t fromguid
= drrb
->drr_fromguid
;
1631 int flags
= drrb
->drr_flags
;
1632 ds_hold_flags_t dsflags
= 0;
1634 uint64_t featureflags
= DMU_GET_FEATUREFLAGS(drrb
->drr_versioninfo
);
1636 const char *tofs
= drba
->drba_cookie
->drc_tofs
;
1638 /* already checked */
1639 ASSERT3U(drrb
->drr_magic
, ==, DMU_BACKUP_MAGIC
);
1640 ASSERT(!(featureflags
& DMU_BACKUP_FEATURE_RESUMING
));
1642 if (DMU_GET_STREAM_HDRTYPE(drrb
->drr_versioninfo
) ==
1643 DMU_COMPOUNDSTREAM
||
1644 drrb
->drr_type
>= DMU_OST_NUMTYPES
||
1645 ((flags
& DRR_FLAG_CLONE
) && drba
->drba_origin
== NULL
))
1646 return (SET_ERROR(EINVAL
));
1648 /* Verify pool version supports SA if SA_SPILL feature set */
1649 if ((featureflags
& DMU_BACKUP_FEATURE_SA_SPILL
) &&
1650 spa_version(dp
->dp_spa
) < SPA_VERSION_SA
)
1651 return (SET_ERROR(ENOTSUP
));
1653 if (drba
->drba_cookie
->drc_resumable
&&
1654 !spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_EXTENSIBLE_DATASET
))
1655 return (SET_ERROR(ENOTSUP
));
1658 * The receiving code doesn't know how to translate a WRITE_EMBEDDED
1659 * record to a plain WRITE record, so the pool must have the
1660 * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED
1661 * records. Same with WRITE_EMBEDDED records that use LZ4 compression.
1663 if ((featureflags
& DMU_BACKUP_FEATURE_EMBED_DATA
) &&
1664 !spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_EMBEDDED_DATA
))
1665 return (SET_ERROR(ENOTSUP
));
1666 if ((featureflags
& DMU_BACKUP_FEATURE_LZ4
) &&
1667 !spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_LZ4_COMPRESS
))
1668 return (SET_ERROR(ENOTSUP
));
1671 * The receiving code doesn't know how to translate large blocks
1672 * to smaller ones, so the pool must have the LARGE_BLOCKS
1673 * feature enabled if the stream has LARGE_BLOCKS. Same with
1676 if ((featureflags
& DMU_BACKUP_FEATURE_LARGE_BLOCKS
) &&
1677 !spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_LARGE_BLOCKS
))
1678 return (SET_ERROR(ENOTSUP
));
1679 if ((featureflags
& DMU_BACKUP_FEATURE_LARGE_DNODE
) &&
1680 !spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_LARGE_DNODE
))
1681 return (SET_ERROR(ENOTSUP
));
1683 if ((featureflags
& DMU_BACKUP_FEATURE_RAW
)) {
1684 /* raw receives require the encryption feature */
1685 if (!spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_ENCRYPTION
))
1686 return (SET_ERROR(ENOTSUP
));
1688 dsflags
|= DS_HOLD_FLAG_DECRYPT
;
1691 error
= dsl_dataset_hold_flags(dp
, tofs
, dsflags
, FTAG
, &ds
);
1693 /* target fs already exists; recv into temp clone */
1695 /* Can't recv a clone into an existing fs */
1696 if (flags
& DRR_FLAG_CLONE
|| drba
->drba_origin
) {
1697 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
1698 return (SET_ERROR(EINVAL
));
1701 error
= recv_begin_check_existing_impl(drba
, ds
, fromguid
);
1702 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
1703 } else if (error
== ENOENT
) {
1704 /* target fs does not exist; must be a full backup or clone */
1705 char buf
[ZFS_MAX_DATASET_NAME_LEN
];
1708 * If it's a non-clone incremental, we are missing the
1709 * target fs, so fail the recv.
1711 if (fromguid
!= 0 && !(flags
& DRR_FLAG_CLONE
||
1713 return (SET_ERROR(ENOENT
));
1716 * If we're receiving a full send as a clone, and it doesn't
1717 * contain all the necessary free records and freeobject
1718 * records, reject it.
1720 if (fromguid
== 0 && drba
->drba_origin
&&
1721 !(flags
& DRR_FLAG_FREERECORDS
))
1722 return (SET_ERROR(EINVAL
));
1724 /* Open the parent of tofs */
1725 ASSERT3U(strlen(tofs
), <, sizeof (buf
));
1726 (void) strlcpy(buf
, tofs
, strrchr(tofs
, '/') - tofs
+ 1);
1727 error
= dsl_dataset_hold_flags(dp
, buf
, dsflags
, FTAG
, &ds
);
1732 * Check filesystem and snapshot limits before receiving. We'll
1733 * recheck snapshot limits again at the end (we create the
1734 * filesystems and increment those counts during begin_sync).
1736 error
= dsl_fs_ss_limit_check(ds
->ds_dir
, 1,
1737 ZFS_PROP_FILESYSTEM_LIMIT
, NULL
, drba
->drba_cred
);
1739 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
1743 error
= dsl_fs_ss_limit_check(ds
->ds_dir
, 1,
1744 ZFS_PROP_SNAPSHOT_LIMIT
, NULL
, drba
->drba_cred
);
1746 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
1750 if (drba
->drba_origin
!= NULL
) {
1751 dsl_dataset_t
*origin
;
1753 error
= dsl_dataset_hold_flags(dp
, drba
->drba_origin
,
1754 dsflags
, FTAG
, &origin
);
1756 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
1759 if (!origin
->ds_is_snapshot
) {
1760 dsl_dataset_rele_flags(origin
, dsflags
, FTAG
);
1761 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
1762 return (SET_ERROR(EINVAL
));
1764 if (dsl_dataset_phys(origin
)->ds_guid
!= fromguid
&&
1766 dsl_dataset_rele_flags(origin
, dsflags
, FTAG
);
1767 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
1768 return (SET_ERROR(ENODEV
));
1770 dsl_dataset_rele_flags(origin
,
1773 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
1780 dmu_recv_begin_sync(void *arg
, dmu_tx_t
*tx
)
1782 dmu_recv_begin_arg_t
*drba
= arg
;
1783 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1784 objset_t
*mos
= dp
->dp_meta_objset
;
1785 struct drr_begin
*drrb
= drba
->drba_cookie
->drc_drrb
;
1786 const char *tofs
= drba
->drba_cookie
->drc_tofs
;
1787 uint64_t featureflags
= DMU_GET_FEATUREFLAGS(drrb
->drr_versioninfo
);
1788 dsl_dataset_t
*ds
, *newds
;
1791 ds_hold_flags_t dsflags
= 0;
1793 uint64_t crflags
= 0;
1794 dsl_crypto_params_t
*dcpp
= NULL
;
1795 dsl_crypto_params_t dcp
= { 0 };
1797 if (drrb
->drr_flags
& DRR_FLAG_CI_DATA
)
1798 crflags
|= DS_FLAG_CI_DATASET
;
1799 if ((featureflags
& DMU_BACKUP_FEATURE_RAW
) == 0) {
1800 dsflags
|= DS_HOLD_FLAG_DECRYPT
;
1802 dcp
.cp_cmd
= DCP_CMD_RAW_RECV
;
1805 error
= dsl_dataset_hold_flags(dp
, tofs
, dsflags
, FTAG
, &ds
);
1807 /* create temporary clone */
1808 dsl_dataset_t
*snap
= NULL
;
1810 if (drba
->drba_snapobj
!= 0) {
1811 VERIFY0(dsl_dataset_hold_obj(dp
,
1812 drba
->drba_snapobj
, FTAG
, &snap
));
1814 /* we use the dcp whenever we are not making a clone */
1818 dsobj
= dsl_dataset_create_sync(ds
->ds_dir
, recv_clone_name
,
1819 snap
, crflags
, drba
->drba_cred
, dcpp
, tx
);
1820 if (drba
->drba_snapobj
!= 0)
1821 dsl_dataset_rele(snap
, FTAG
);
1822 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
1826 dsl_dataset_t
*origin
= NULL
;
1828 VERIFY0(dsl_dir_hold(dp
, tofs
, FTAG
, &dd
, &tail
));
1830 if (drba
->drba_origin
!= NULL
) {
1831 VERIFY0(dsl_dataset_hold(dp
, drba
->drba_origin
,
1834 /* we use the dcp whenever we are not making a clone */
1838 /* Create new dataset. */
1839 dsobj
= dsl_dataset_create_sync(dd
, strrchr(tofs
, '/') + 1,
1840 origin
, crflags
, drba
->drba_cred
, dcpp
, tx
);
1842 dsl_dataset_rele(origin
, FTAG
);
1843 dsl_dir_rele(dd
, FTAG
);
1844 drba
->drba_cookie
->drc_newfs
= B_TRUE
;
1846 VERIFY0(dsl_dataset_own_obj(dp
, dsobj
, dsflags
, dmu_recv_tag
, &newds
));
1847 VERIFY0(dmu_objset_from_ds(newds
, &os
));
1849 if (drba
->drba_cookie
->drc_resumable
) {
1850 dsl_dataset_zapify(newds
, tx
);
1851 if (drrb
->drr_fromguid
!= 0) {
1852 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_FROMGUID
,
1853 8, 1, &drrb
->drr_fromguid
, tx
));
1855 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_TOGUID
,
1856 8, 1, &drrb
->drr_toguid
, tx
));
1857 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_TONAME
,
1858 1, strlen(drrb
->drr_toname
) + 1, drrb
->drr_toname
, tx
));
1861 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_OBJECT
,
1863 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_OFFSET
,
1865 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_BYTES
,
1867 if (featureflags
& DMU_BACKUP_FEATURE_LARGE_BLOCKS
) {
1868 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_LARGEBLOCK
,
1871 if (featureflags
& DMU_BACKUP_FEATURE_EMBED_DATA
) {
1872 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_EMBEDOK
,
1875 if (featureflags
& DMU_BACKUP_FEATURE_COMPRESSED
) {
1876 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_COMPRESSOK
,
1879 if (featureflags
& DMU_BACKUP_FEATURE_RAW
) {
1880 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_RAWOK
,
1886 * Usually the os->os_encrypted value is tied to the presence of a
1887 * DSL Crypto Key object in the dd. However, that will not be received
1888 * until dmu_recv_stream(), so we set the value manually for now.
1890 if (featureflags
& DMU_BACKUP_FEATURE_RAW
) {
1891 os
->os_encrypted
= B_TRUE
;
1892 drba
->drba_cookie
->drc_raw
= B_TRUE
;
1895 dmu_buf_will_dirty(newds
->ds_dbuf
, tx
);
1896 dsl_dataset_phys(newds
)->ds_flags
|= DS_FLAG_INCONSISTENT
;
1899 * If we actually created a non-clone, we need to create the objset
1900 * in our new dataset. If this is a raw send we postpone this until
1901 * dmu_recv_stream() so that we can allocate the metadnode with the
1902 * properties from the DRR_BEGIN payload.
1904 rrw_enter(&newds
->ds_bp_rwlock
, RW_READER
, FTAG
);
1905 if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds
)) &&
1906 (featureflags
& DMU_BACKUP_FEATURE_RAW
) == 0) {
1907 (void) dmu_objset_create_impl(dp
->dp_spa
,
1908 newds
, dsl_dataset_get_blkptr(newds
), drrb
->drr_type
, tx
);
1910 rrw_exit(&newds
->ds_bp_rwlock
, FTAG
);
1912 drba
->drba_cookie
->drc_ds
= newds
;
1914 spa_history_log_internal_ds(newds
, "receive", tx
, "");
1918 dmu_recv_resume_begin_check(void *arg
, dmu_tx_t
*tx
)
1920 dmu_recv_begin_arg_t
*drba
= arg
;
1921 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1922 struct drr_begin
*drrb
= drba
->drba_cookie
->drc_drrb
;
1924 ds_hold_flags_t dsflags
= 0;
1925 uint64_t featureflags
= DMU_GET_FEATUREFLAGS(drrb
->drr_versioninfo
);
1927 const char *tofs
= drba
->drba_cookie
->drc_tofs
;
1929 /* already checked */
1930 ASSERT3U(drrb
->drr_magic
, ==, DMU_BACKUP_MAGIC
);
1931 ASSERT(featureflags
& DMU_BACKUP_FEATURE_RESUMING
);
1933 if (DMU_GET_STREAM_HDRTYPE(drrb
->drr_versioninfo
) ==
1934 DMU_COMPOUNDSTREAM
||
1935 drrb
->drr_type
>= DMU_OST_NUMTYPES
)
1936 return (SET_ERROR(EINVAL
));
1938 /* Verify pool version supports SA if SA_SPILL feature set */
1939 if ((featureflags
& DMU_BACKUP_FEATURE_SA_SPILL
) &&
1940 spa_version(dp
->dp_spa
) < SPA_VERSION_SA
)
1941 return (SET_ERROR(ENOTSUP
));
1944 * The receiving code doesn't know how to translate a WRITE_EMBEDDED
1945 * record to a plain WRITE record, so the pool must have the
1946 * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED
1947 * records. Same with WRITE_EMBEDDED records that use LZ4 compression.
1949 if ((featureflags
& DMU_BACKUP_FEATURE_EMBED_DATA
) &&
1950 !spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_EMBEDDED_DATA
))
1951 return (SET_ERROR(ENOTSUP
));
1952 if ((featureflags
& DMU_BACKUP_FEATURE_LZ4
) &&
1953 !spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_LZ4_COMPRESS
))
1954 return (SET_ERROR(ENOTSUP
));
1957 * The receiving code doesn't know how to translate large blocks
1958 * to smaller ones, so the pool must have the LARGE_BLOCKS
1959 * feature enabled if the stream has LARGE_BLOCKS. Same with
1962 if ((featureflags
& DMU_BACKUP_FEATURE_LARGE_BLOCKS
) &&
1963 !spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_LARGE_BLOCKS
))
1964 return (SET_ERROR(ENOTSUP
));
1965 if ((featureflags
& DMU_BACKUP_FEATURE_LARGE_DNODE
) &&
1966 !spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_LARGE_DNODE
))
1967 return (SET_ERROR(ENOTSUP
));
1969 /* 6 extra bytes for /%recv */
1970 char recvname
[ZFS_MAX_DATASET_NAME_LEN
+ 6];
1971 (void) snprintf(recvname
, sizeof (recvname
), "%s/%s",
1972 tofs
, recv_clone_name
);
1974 if ((featureflags
& DMU_BACKUP_FEATURE_RAW
) == 0)
1975 dsflags
|= DS_HOLD_FLAG_DECRYPT
;
1977 if (dsl_dataset_hold_flags(dp
, recvname
, dsflags
, FTAG
, &ds
) != 0) {
1978 /* %recv does not exist; continue in tofs */
1979 error
= dsl_dataset_hold_flags(dp
, tofs
, dsflags
, FTAG
, &ds
);
1984 /* check that ds is marked inconsistent */
1985 if (!DS_IS_INCONSISTENT(ds
)) {
1986 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
1987 return (SET_ERROR(EINVAL
));
1990 /* check that there is resuming data, and that the toguid matches */
1991 if (!dsl_dataset_is_zapified(ds
)) {
1992 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
1993 return (SET_ERROR(EINVAL
));
1996 error
= zap_lookup(dp
->dp_meta_objset
, ds
->ds_object
,
1997 DS_FIELD_RESUME_TOGUID
, sizeof (val
), 1, &val
);
1998 if (error
!= 0 || drrb
->drr_toguid
!= val
) {
1999 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
2000 return (SET_ERROR(EINVAL
));
2004 * Check if the receive is still running. If so, it will be owned.
2005 * Note that nothing else can own the dataset (e.g. after the receive
2006 * fails) because it will be marked inconsistent.
2008 if (dsl_dataset_has_owner(ds
)) {
2009 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
2010 return (SET_ERROR(EBUSY
));
2013 /* There should not be any snapshots of this fs yet. */
2014 if (ds
->ds_prev
!= NULL
&& ds
->ds_prev
->ds_dir
== ds
->ds_dir
) {
2015 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
2016 return (SET_ERROR(EINVAL
));
2020 * Note: resume point will be checked when we process the first WRITE
2024 /* check that the origin matches */
2026 (void) zap_lookup(dp
->dp_meta_objset
, ds
->ds_object
,
2027 DS_FIELD_RESUME_FROMGUID
, sizeof (val
), 1, &val
);
2028 if (drrb
->drr_fromguid
!= val
) {
2029 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
2030 return (SET_ERROR(EINVAL
));
2033 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
2038 dmu_recv_resume_begin_sync(void *arg
, dmu_tx_t
*tx
)
2040 dmu_recv_begin_arg_t
*drba
= arg
;
2041 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
2042 const char *tofs
= drba
->drba_cookie
->drc_tofs
;
2043 struct drr_begin
*drrb
= drba
->drba_cookie
->drc_drrb
;
2044 uint64_t featureflags
= DMU_GET_FEATUREFLAGS(drrb
->drr_versioninfo
);
2047 ds_hold_flags_t dsflags
= 0;
2049 /* 6 extra bytes for /%recv */
2050 char recvname
[ZFS_MAX_DATASET_NAME_LEN
+ 6];
2052 (void) snprintf(recvname
, sizeof (recvname
), "%s/%s",
2053 tofs
, recv_clone_name
);
2055 if (featureflags
& DMU_BACKUP_FEATURE_RAW
) {
2056 drba
->drba_cookie
->drc_raw
= B_TRUE
;
2058 dsflags
|= DS_HOLD_FLAG_DECRYPT
;
2061 if (dsl_dataset_hold_flags(dp
, recvname
, dsflags
, FTAG
, &ds
) != 0) {
2062 /* %recv does not exist; continue in tofs */
2063 VERIFY0(dsl_dataset_hold_flags(dp
, tofs
, dsflags
, FTAG
, &ds
));
2064 drba
->drba_cookie
->drc_newfs
= B_TRUE
;
2067 /* clear the inconsistent flag so that we can own it */
2068 ASSERT(DS_IS_INCONSISTENT(ds
));
2069 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
2070 dsl_dataset_phys(ds
)->ds_flags
&= ~DS_FLAG_INCONSISTENT
;
2071 dsobj
= ds
->ds_object
;
2072 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
2074 VERIFY0(dsl_dataset_own_obj(dp
, dsobj
, dsflags
, dmu_recv_tag
, &ds
));
2075 VERIFY0(dmu_objset_from_ds(ds
, &os
));
2077 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
2078 dsl_dataset_phys(ds
)->ds_flags
|= DS_FLAG_INCONSISTENT
;
2080 rrw_enter(&ds
->ds_bp_rwlock
, RW_READER
, FTAG
);
2081 ASSERT(!BP_IS_HOLE(dsl_dataset_get_blkptr(ds
)) ||
2082 drba
->drba_cookie
->drc_raw
);
2083 rrw_exit(&ds
->ds_bp_rwlock
, FTAG
);
2085 drba
->drba_cookie
->drc_ds
= ds
;
2087 spa_history_log_internal_ds(ds
, "resume receive", tx
, "");
2091 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
2092 * succeeds; otherwise we will leak the holds on the datasets.
2095 dmu_recv_begin(char *tofs
, char *tosnap
, dmu_replay_record_t
*drr_begin
,
2096 boolean_t force
, boolean_t resumable
, char *origin
, dmu_recv_cookie_t
*drc
)
2098 dmu_recv_begin_arg_t drba
= { 0 };
2100 bzero(drc
, sizeof (dmu_recv_cookie_t
));
2101 drc
->drc_drr_begin
= drr_begin
;
2102 drc
->drc_drrb
= &drr_begin
->drr_u
.drr_begin
;
2103 drc
->drc_tosnap
= tosnap
;
2104 drc
->drc_tofs
= tofs
;
2105 drc
->drc_force
= force
;
2106 drc
->drc_resumable
= resumable
;
2107 drc
->drc_cred
= CRED();
2108 drc
->drc_clone
= (origin
!= NULL
);
2110 if (drc
->drc_drrb
->drr_magic
== BSWAP_64(DMU_BACKUP_MAGIC
)) {
2111 drc
->drc_byteswap
= B_TRUE
;
2112 (void) fletcher_4_incremental_byteswap(drr_begin
,
2113 sizeof (dmu_replay_record_t
), &drc
->drc_cksum
);
2114 byteswap_record(drr_begin
);
2115 } else if (drc
->drc_drrb
->drr_magic
== DMU_BACKUP_MAGIC
) {
2116 (void) fletcher_4_incremental_native(drr_begin
,
2117 sizeof (dmu_replay_record_t
), &drc
->drc_cksum
);
2119 return (SET_ERROR(EINVAL
));
2122 drba
.drba_origin
= origin
;
2123 drba
.drba_cookie
= drc
;
2124 drba
.drba_cred
= CRED();
2126 if (DMU_GET_FEATUREFLAGS(drc
->drc_drrb
->drr_versioninfo
) &
2127 DMU_BACKUP_FEATURE_RESUMING
) {
2128 return (dsl_sync_task(tofs
,
2129 dmu_recv_resume_begin_check
, dmu_recv_resume_begin_sync
,
2130 &drba
, 5, ZFS_SPACE_CHECK_NORMAL
));
2132 return (dsl_sync_task(tofs
,
2133 dmu_recv_begin_check
, dmu_recv_begin_sync
,
2134 &drba
, 5, ZFS_SPACE_CHECK_NORMAL
));
2138 struct receive_record_arg
{
2139 dmu_replay_record_t header
;
2140 void *payload
; /* Pointer to a buffer containing the payload */
2142 * If the record is a write, pointer to the arc_buf_t containing the
2147 uint64_t bytes_read
; /* bytes read from stream when record created */
2148 boolean_t eos_marker
; /* Marks the end of the stream */
2152 struct receive_writer_arg
{
2158 * These three args are used to signal to the main thread that we're
2166 /* A map from guid to dataset to help handle dedup'd streams. */
2167 avl_tree_t
*guid_to_ds_map
;
2168 boolean_t resumable
;
2170 uint64_t last_object
;
2171 uint64_t last_offset
;
2172 uint64_t max_object
; /* highest object ID referenced in stream */
2173 uint64_t bytes_read
; /* bytes read when current record created */
2175 /* Encryption parameters for the last received DRR_OBJECT_RANGE */
2176 uint64_t or_firstobj
;
2177 uint64_t or_numslots
;
2178 uint8_t or_salt
[ZIO_DATA_SALT_LEN
];
2179 uint8_t or_iv
[ZIO_DATA_IV_LEN
];
2180 uint8_t or_mac
[ZIO_DATA_MAC_LEN
];
2181 boolean_t or_byteorder
;
2185 list_t list
; /* List of struct receive_objnode. */
2187 * Last object looked up. Used to assert that objects are being looked
2188 * up in ascending order.
2190 uint64_t last_lookup
;
2193 struct receive_objnode
{
2198 struct receive_arg
{
2200 vnode_t
*vp
; /* The vnode to read the stream from */
2201 uint64_t voff
; /* The current offset in the stream */
2202 uint64_t bytes_read
;
2204 * A record that has had its payload read in, but hasn't yet been handed
2205 * off to the worker thread.
2207 struct receive_record_arg
*rrd
;
2208 /* A record that has had its header read in, but not its payload. */
2209 struct receive_record_arg
*next_rrd
;
2211 zio_cksum_t prev_cksum
;
2215 uint64_t featureflags
;
2216 /* Sorted list of objects not to issue prefetches for. */
2217 struct objlist ignore_objlist
;
2220 typedef struct guid_map_entry
{
2223 dsl_dataset_t
*gme_ds
;
2228 guid_compare(const void *arg1
, const void *arg2
)
2230 const guid_map_entry_t
*gmep1
= (const guid_map_entry_t
*)arg1
;
2231 const guid_map_entry_t
*gmep2
= (const guid_map_entry_t
*)arg2
;
2233 return (AVL_CMP(gmep1
->guid
, gmep2
->guid
));
2237 free_guid_map_onexit(void *arg
)
2239 avl_tree_t
*ca
= arg
;
2240 void *cookie
= NULL
;
2241 guid_map_entry_t
*gmep
;
2243 while ((gmep
= avl_destroy_nodes(ca
, &cookie
)) != NULL
) {
2244 dsl_dataset_long_rele(gmep
->gme_ds
, gmep
);
2245 dsl_dataset_rele_flags(gmep
->gme_ds
,
2246 (gmep
->raw
) ? 0 : DS_HOLD_FLAG_DECRYPT
, gmep
);
2247 kmem_free(gmep
, sizeof (guid_map_entry_t
));
2250 kmem_free(ca
, sizeof (avl_tree_t
));
2254 receive_read(struct receive_arg
*ra
, int len
, void *buf
)
2259 * The code doesn't rely on this (lengths being multiples of 8). See
2260 * comment in dump_bytes.
2262 ASSERT(len
% 8 == 0 ||
2263 (ra
->featureflags
& DMU_BACKUP_FEATURE_RAW
) != 0);
2265 while (done
< len
) {
2268 ra
->err
= vn_rdwr(UIO_READ
, ra
->vp
,
2269 (char *)buf
+ done
, len
- done
,
2270 ra
->voff
, UIO_SYSSPACE
, FAPPEND
,
2271 RLIM64_INFINITY
, CRED(), &resid
);
2273 if (resid
== len
- done
) {
2275 * Note: ECKSUM indicates that the receive
2276 * was interrupted and can potentially be resumed.
2278 ra
->err
= SET_ERROR(ECKSUM
);
2280 ra
->voff
+= len
- done
- resid
;
2286 ra
->bytes_read
+= len
;
2288 ASSERT3U(done
, ==, len
);
2292 noinline
static void
2293 byteswap_record(dmu_replay_record_t
*drr
)
2295 #define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
2296 #define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
2297 drr
->drr_type
= BSWAP_32(drr
->drr_type
);
2298 drr
->drr_payloadlen
= BSWAP_32(drr
->drr_payloadlen
);
2300 switch (drr
->drr_type
) {
2302 DO64(drr_begin
.drr_magic
);
2303 DO64(drr_begin
.drr_versioninfo
);
2304 DO64(drr_begin
.drr_creation_time
);
2305 DO32(drr_begin
.drr_type
);
2306 DO32(drr_begin
.drr_flags
);
2307 DO64(drr_begin
.drr_toguid
);
2308 DO64(drr_begin
.drr_fromguid
);
2311 DO64(drr_object
.drr_object
);
2312 DO32(drr_object
.drr_type
);
2313 DO32(drr_object
.drr_bonustype
);
2314 DO32(drr_object
.drr_blksz
);
2315 DO32(drr_object
.drr_bonuslen
);
2316 DO32(drr_object
.drr_raw_bonuslen
);
2317 DO64(drr_object
.drr_toguid
);
2318 DO64(drr_object
.drr_maxblkid
);
2320 case DRR_FREEOBJECTS
:
2321 DO64(drr_freeobjects
.drr_firstobj
);
2322 DO64(drr_freeobjects
.drr_numobjs
);
2323 DO64(drr_freeobjects
.drr_toguid
);
2326 DO64(drr_write
.drr_object
);
2327 DO32(drr_write
.drr_type
);
2328 DO64(drr_write
.drr_offset
);
2329 DO64(drr_write
.drr_logical_size
);
2330 DO64(drr_write
.drr_toguid
);
2331 ZIO_CHECKSUM_BSWAP(&drr
->drr_u
.drr_write
.drr_key
.ddk_cksum
);
2332 DO64(drr_write
.drr_key
.ddk_prop
);
2333 DO64(drr_write
.drr_compressed_size
);
2335 case DRR_WRITE_BYREF
:
2336 DO64(drr_write_byref
.drr_object
);
2337 DO64(drr_write_byref
.drr_offset
);
2338 DO64(drr_write_byref
.drr_length
);
2339 DO64(drr_write_byref
.drr_toguid
);
2340 DO64(drr_write_byref
.drr_refguid
);
2341 DO64(drr_write_byref
.drr_refobject
);
2342 DO64(drr_write_byref
.drr_refoffset
);
2343 ZIO_CHECKSUM_BSWAP(&drr
->drr_u
.drr_write_byref
.
2345 DO64(drr_write_byref
.drr_key
.ddk_prop
);
2347 case DRR_WRITE_EMBEDDED
:
2348 DO64(drr_write_embedded
.drr_object
);
2349 DO64(drr_write_embedded
.drr_offset
);
2350 DO64(drr_write_embedded
.drr_length
);
2351 DO64(drr_write_embedded
.drr_toguid
);
2352 DO32(drr_write_embedded
.drr_lsize
);
2353 DO32(drr_write_embedded
.drr_psize
);
2356 DO64(drr_free
.drr_object
);
2357 DO64(drr_free
.drr_offset
);
2358 DO64(drr_free
.drr_length
);
2359 DO64(drr_free
.drr_toguid
);
2362 DO64(drr_spill
.drr_object
);
2363 DO64(drr_spill
.drr_length
);
2364 DO64(drr_spill
.drr_toguid
);
2365 DO64(drr_spill
.drr_compressed_size
);
2366 DO32(drr_spill
.drr_type
);
2368 case DRR_OBJECT_RANGE
:
2369 DO64(drr_object_range
.drr_firstobj
);
2370 DO64(drr_object_range
.drr_numslots
);
2371 DO64(drr_object_range
.drr_toguid
);
2374 DO64(drr_end
.drr_toguid
);
2375 ZIO_CHECKSUM_BSWAP(&drr
->drr_u
.drr_end
.drr_checksum
);
2381 if (drr
->drr_type
!= DRR_BEGIN
) {
2382 ZIO_CHECKSUM_BSWAP(&drr
->drr_u
.drr_checksum
.drr_checksum
);
2389 static inline uint8_t
2390 deduce_nblkptr(dmu_object_type_t bonus_type
, uint64_t bonus_size
)
2392 if (bonus_type
== DMU_OT_SA
) {
2396 ((DN_OLD_MAX_BONUSLEN
-
2397 MIN(DN_OLD_MAX_BONUSLEN
, bonus_size
)) >> SPA_BLKPTRSHIFT
));
2402 save_resume_state(struct receive_writer_arg
*rwa
,
2403 uint64_t object
, uint64_t offset
, dmu_tx_t
*tx
)
2405 int txgoff
= dmu_tx_get_txg(tx
) & TXG_MASK
;
2407 if (!rwa
->resumable
)
2411 * We use ds_resume_bytes[] != 0 to indicate that we need to
2412 * update this on disk, so it must not be 0.
2414 ASSERT(rwa
->bytes_read
!= 0);
2417 * We only resume from write records, which have a valid
2418 * (non-meta-dnode) object number.
2420 ASSERT(object
!= 0);
2423 * For resuming to work correctly, we must receive records in order,
2424 * sorted by object,offset. This is checked by the callers, but
2425 * assert it here for good measure.
2427 ASSERT3U(object
, >=, rwa
->os
->os_dsl_dataset
->ds_resume_object
[txgoff
]);
2428 ASSERT(object
!= rwa
->os
->os_dsl_dataset
->ds_resume_object
[txgoff
] ||
2429 offset
>= rwa
->os
->os_dsl_dataset
->ds_resume_offset
[txgoff
]);
2430 ASSERT3U(rwa
->bytes_read
, >=,
2431 rwa
->os
->os_dsl_dataset
->ds_resume_bytes
[txgoff
]);
2433 rwa
->os
->os_dsl_dataset
->ds_resume_object
[txgoff
] = object
;
2434 rwa
->os
->os_dsl_dataset
->ds_resume_offset
[txgoff
] = offset
;
2435 rwa
->os
->os_dsl_dataset
->ds_resume_bytes
[txgoff
] = rwa
->bytes_read
;
2439 receive_object(struct receive_writer_arg
*rwa
, struct drr_object
*drro
,
2442 dmu_object_info_t doi
;
2447 if (drro
->drr_type
== DMU_OT_NONE
||
2448 !DMU_OT_IS_VALID(drro
->drr_type
) ||
2449 !DMU_OT_IS_VALID(drro
->drr_bonustype
) ||
2450 drro
->drr_checksumtype
>= ZIO_CHECKSUM_FUNCTIONS
||
2451 drro
->drr_compress
>= ZIO_COMPRESS_FUNCTIONS
||
2452 P2PHASE(drro
->drr_blksz
, SPA_MINBLOCKSIZE
) ||
2453 drro
->drr_blksz
< SPA_MINBLOCKSIZE
||
2454 drro
->drr_blksz
> spa_maxblocksize(dmu_objset_spa(rwa
->os
)) ||
2455 drro
->drr_bonuslen
>
2456 DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(rwa
->os
))) ||
2457 drro
->drr_dn_slots
>
2458 (spa_maxdnodesize(dmu_objset_spa(rwa
->os
)) >> DNODE_SHIFT
)) {
2459 return (SET_ERROR(EINVAL
));
2464 * We should have received a DRR_OBJECT_RANGE record
2465 * containing this block and stored it in rwa.
2467 if (drro
->drr_object
< rwa
->or_firstobj
||
2468 drro
->drr_object
>= rwa
->or_firstobj
+ rwa
->or_numslots
||
2469 drro
->drr_raw_bonuslen
< drro
->drr_bonuslen
||
2470 drro
->drr_indblkshift
> SPA_MAXBLOCKSHIFT
||
2471 drro
->drr_nlevels
> DN_MAX_LEVELS
||
2472 drro
->drr_nblkptr
> DN_MAX_NBLKPTR
||
2473 DN_SLOTS_TO_BONUSLEN(drro
->drr_dn_slots
) <
2474 drro
->drr_raw_bonuslen
)
2475 return (SET_ERROR(EINVAL
));
2477 if (drro
->drr_flags
!= 0 || drro
->drr_raw_bonuslen
!= 0 ||
2478 drro
->drr_indblkshift
!= 0 || drro
->drr_nlevels
!= 0 ||
2479 drro
->drr_nblkptr
!= 0)
2480 return (SET_ERROR(EINVAL
));
2483 err
= dmu_object_info(rwa
->os
, drro
->drr_object
, &doi
);
2484 if (err
!= 0 && err
!= ENOENT
&& err
!= EEXIST
)
2485 return (SET_ERROR(EINVAL
));
2487 if (drro
->drr_object
> rwa
->max_object
)
2488 rwa
->max_object
= drro
->drr_object
;
2491 * If we are losing blkptrs or changing the block size this must
2492 * be a new file instance. We must clear out the previous file
2493 * contents before we can change this type of metadata in the dnode.
2494 * Raw receives will also check that the indirect structure of the
2495 * dnode hasn't changed.
2498 uint32_t indblksz
= drro
->drr_indblkshift
?
2499 1ULL << drro
->drr_indblkshift
: 0;
2500 int nblkptr
= deduce_nblkptr(drro
->drr_bonustype
,
2501 drro
->drr_bonuslen
);
2503 object
= drro
->drr_object
;
2505 /* nblkptr will be bounded by the bonus size and type */
2506 if (rwa
->raw
&& nblkptr
!= drro
->drr_nblkptr
)
2507 return (SET_ERROR(EINVAL
));
2510 (drro
->drr_blksz
!= doi
.doi_data_block_size
||
2511 nblkptr
< doi
.doi_nblkptr
||
2512 indblksz
!= doi
.doi_metadata_block_size
||
2513 drro
->drr_nlevels
< doi
.doi_indirection
||
2514 drro
->drr_dn_slots
!= doi
.doi_dnodesize
>> DNODE_SHIFT
)) {
2515 err
= dmu_free_long_range_raw(rwa
->os
,
2516 drro
->drr_object
, 0, DMU_OBJECT_END
);
2518 return (SET_ERROR(EINVAL
));
2519 } else if (drro
->drr_blksz
!= doi
.doi_data_block_size
||
2520 nblkptr
< doi
.doi_nblkptr
||
2521 drro
->drr_dn_slots
!= doi
.doi_dnodesize
>> DNODE_SHIFT
) {
2522 err
= dmu_free_long_range(rwa
->os
, drro
->drr_object
,
2525 return (SET_ERROR(EINVAL
));
2529 * The dmu does not currently support decreasing nlevels
2530 * on an object. For non-raw sends, this does not matter
2531 * and the new object can just use the previous one's nlevels.
2532 * For raw sends, however, the structure of the received dnode
2533 * (including nlevels) must match that of the send side.
2534 * Therefore, instead of using dmu_object_reclaim(), we must
2535 * free the object completely and call dmu_object_claim_dnsize()
2538 if ((rwa
->raw
&& drro
->drr_nlevels
< doi
.doi_indirection
) ||
2539 drro
->drr_dn_slots
!= doi
.doi_dnodesize
>> DNODE_SHIFT
) {
2541 err
= dmu_free_long_object_raw(rwa
->os
,
2544 err
= dmu_free_long_object(rwa
->os
,
2548 return (SET_ERROR(EINVAL
));
2550 txg_wait_synced(dmu_objset_pool(rwa
->os
), 0);
2551 object
= DMU_NEW_OBJECT
;
2553 } else if (err
== EEXIST
) {
2555 * The object requested is currently an interior slot of a
2556 * multi-slot dnode. This will be resolved when the next txg
2557 * is synced out, since the send stream will have told us
2558 * to free this slot when we freed the associated dnode
2559 * earlier in the stream.
2561 txg_wait_synced(dmu_objset_pool(rwa
->os
), 0);
2562 object
= drro
->drr_object
;
2564 /* object is free and we are about to allocate a new one */
2565 object
= DMU_NEW_OBJECT
;
2569 * If this is a multi-slot dnode there is a chance that this
2570 * object will expand into a slot that is already used by
2571 * another object from the previous snapshot. We must free
2572 * these objects before we attempt to allocate the new dnode.
2574 if (drro
->drr_dn_slots
> 1) {
2575 boolean_t need_sync
= B_FALSE
;
2577 for (uint64_t slot
= drro
->drr_object
+ 1;
2578 slot
< drro
->drr_object
+ drro
->drr_dn_slots
;
2580 dmu_object_info_t slot_doi
;
2582 err
= dmu_object_info(rwa
->os
, slot
, &slot_doi
);
2583 if (err
== ENOENT
|| err
== EEXIST
)
2589 err
= dmu_free_long_object_raw(rwa
->os
, slot
);
2591 err
= dmu_free_long_object(rwa
->os
, slot
);
2600 txg_wait_synced(dmu_objset_pool(rwa
->os
), 0);
2603 tx
= dmu_tx_create(rwa
->os
);
2604 dmu_tx_hold_bonus(tx
, object
);
2605 dmu_tx_hold_write(tx
, object
, 0, 0);
2606 err
= dmu_tx_assign(tx
, TXG_WAIT
);
2612 if (object
== DMU_NEW_OBJECT
) {
2613 /* currently free, want to be allocated */
2614 err
= dmu_object_claim_dnsize(rwa
->os
, drro
->drr_object
,
2615 drro
->drr_type
, drro
->drr_blksz
,
2616 drro
->drr_bonustype
, drro
->drr_bonuslen
,
2617 drro
->drr_dn_slots
<< DNODE_SHIFT
, tx
);
2618 } else if (drro
->drr_type
!= doi
.doi_type
||
2619 drro
->drr_blksz
!= doi
.doi_data_block_size
||
2620 drro
->drr_bonustype
!= doi
.doi_bonus_type
||
2621 drro
->drr_bonuslen
!= doi
.doi_bonus_size
) {
2622 /* currently allocated, but with different properties */
2623 err
= dmu_object_reclaim(rwa
->os
, drro
->drr_object
,
2624 drro
->drr_type
, drro
->drr_blksz
,
2625 drro
->drr_bonustype
, drro
->drr_bonuslen
, tx
);
2629 return (SET_ERROR(EINVAL
));
2634 * Convert the buffer associated with this range of dnodes
2635 * to a raw buffer. This ensures that it will be written out
2636 * as a raw buffer when we fill in the dnode object. Since we
2637 * are committing this tx now, it is possible for the dnode
2638 * block to end up on-disk with the incorrect MAC. Despite
2639 * this, the dataset is marked as inconsistent so no other
2640 * code paths (apart from scrubs) will attempt to read this
2641 * data. Scrubs will not be effected by this either since
2642 * scrubs only read raw data and do not attempt to check
2645 err
= dmu_convert_mdn_block_to_raw(rwa
->os
, rwa
->or_firstobj
,
2646 rwa
->or_byteorder
, rwa
->or_salt
, rwa
->or_iv
, rwa
->or_mac
,
2650 return (SET_ERROR(EINVAL
));
2654 dmu_object_set_checksum(rwa
->os
, drro
->drr_object
,
2655 drro
->drr_checksumtype
, tx
);
2656 dmu_object_set_compress(rwa
->os
, drro
->drr_object
,
2657 drro
->drr_compress
, tx
);
2659 /* handle more restrictive dnode structuring for raw recvs */
2662 * Set the indirect block shift and nlevels. This will not fail
2663 * because we ensured all of the blocks were free earlier if
2664 * this is a new object.
2666 VERIFY0(dmu_object_set_blocksize(rwa
->os
, drro
->drr_object
,
2667 drro
->drr_blksz
, drro
->drr_indblkshift
, tx
));
2668 VERIFY0(dmu_object_set_nlevels(rwa
->os
, drro
->drr_object
,
2669 drro
->drr_nlevels
, tx
));
2670 VERIFY0(dmu_object_set_maxblkid(rwa
->os
, drro
->drr_object
,
2671 drro
->drr_maxblkid
, tx
));
2676 uint32_t flags
= DMU_READ_NO_PREFETCH
;
2679 flags
|= DMU_READ_NO_DECRYPT
;
2681 VERIFY0(dmu_bonus_hold_impl(rwa
->os
, drro
->drr_object
,
2683 dmu_buf_will_dirty(db
, tx
);
2685 ASSERT3U(db
->db_size
, >=, drro
->drr_bonuslen
);
2686 bcopy(data
, db
->db_data
, DRR_OBJECT_PAYLOAD_SIZE(drro
));
2689 * Raw bonus buffers have their byteorder determined by the
2690 * DRR_OBJECT_RANGE record.
2692 if (rwa
->byteswap
&& !rwa
->raw
) {
2693 dmu_object_byteswap_t byteswap
=
2694 DMU_OT_BYTESWAP(drro
->drr_bonustype
);
2695 dmu_ot_byteswap
[byteswap
].ob_func(db
->db_data
,
2696 DRR_OBJECT_PAYLOAD_SIZE(drro
));
2698 dmu_buf_rele(db
, FTAG
);
2707 receive_freeobjects(struct receive_writer_arg
*rwa
,
2708 struct drr_freeobjects
*drrfo
)
2713 if (drrfo
->drr_firstobj
+ drrfo
->drr_numobjs
< drrfo
->drr_firstobj
)
2714 return (SET_ERROR(EINVAL
));
2716 for (obj
= drrfo
->drr_firstobj
== 0 ? 1 : drrfo
->drr_firstobj
;
2717 obj
< drrfo
->drr_firstobj
+ drrfo
->drr_numobjs
&& next_err
== 0;
2718 next_err
= dmu_object_next(rwa
->os
, &obj
, FALSE
, 0)) {
2719 dmu_object_info_t doi
;
2722 err
= dmu_object_info(rwa
->os
, obj
, &doi
);
2729 err
= dmu_free_long_object_raw(rwa
->os
, obj
);
2731 err
= dmu_free_long_object(rwa
->os
, obj
);
2736 if (obj
> rwa
->max_object
)
2737 rwa
->max_object
= obj
;
2739 if (next_err
!= ESRCH
)
2745 receive_write(struct receive_writer_arg
*rwa
, struct drr_write
*drrw
,
2752 if (drrw
->drr_offset
+ drrw
->drr_logical_size
< drrw
->drr_offset
||
2753 !DMU_OT_IS_VALID(drrw
->drr_type
))
2754 return (SET_ERROR(EINVAL
));
2757 * For resuming to work, records must be in increasing order
2758 * by (object, offset).
2760 if (drrw
->drr_object
< rwa
->last_object
||
2761 (drrw
->drr_object
== rwa
->last_object
&&
2762 drrw
->drr_offset
< rwa
->last_offset
)) {
2763 return (SET_ERROR(EINVAL
));
2765 rwa
->last_object
= drrw
->drr_object
;
2766 rwa
->last_offset
= drrw
->drr_offset
;
2768 if (rwa
->last_object
> rwa
->max_object
)
2769 rwa
->max_object
= rwa
->last_object
;
2771 if (dmu_object_info(rwa
->os
, drrw
->drr_object
, NULL
) != 0)
2772 return (SET_ERROR(EINVAL
));
2774 tx
= dmu_tx_create(rwa
->os
);
2775 dmu_tx_hold_write(tx
, drrw
->drr_object
,
2776 drrw
->drr_offset
, drrw
->drr_logical_size
);
2777 err
= dmu_tx_assign(tx
, TXG_WAIT
);
2784 VERIFY0(dmu_object_dirty_raw(rwa
->os
, drrw
->drr_object
, tx
));
2786 if (rwa
->byteswap
&& !arc_is_encrypted(abuf
) &&
2787 arc_get_compression(abuf
) == ZIO_COMPRESS_OFF
) {
2788 dmu_object_byteswap_t byteswap
=
2789 DMU_OT_BYTESWAP(drrw
->drr_type
);
2790 dmu_ot_byteswap
[byteswap
].ob_func(abuf
->b_data
,
2791 DRR_WRITE_PAYLOAD_SIZE(drrw
));
2794 VERIFY0(dnode_hold(rwa
->os
, drrw
->drr_object
, FTAG
, &dn
));
2795 dmu_assign_arcbuf_by_dnode(dn
, drrw
->drr_offset
, abuf
, tx
);
2796 dnode_rele(dn
, FTAG
);
2799 * Note: If the receive fails, we want the resume stream to start
2800 * with the same record that we last successfully received (as opposed
2801 * to the next record), so that we can verify that we are
2802 * resuming from the correct location.
2804 save_resume_state(rwa
, drrw
->drr_object
, drrw
->drr_offset
, tx
);
2811 * Handle a DRR_WRITE_BYREF record. This record is used in dedup'ed
2812 * streams to refer to a copy of the data that is already on the
2813 * system because it came in earlier in the stream. This function
2814 * finds the earlier copy of the data, and uses that copy instead of
2815 * data from the stream to fulfill this write.
2818 receive_write_byref(struct receive_writer_arg
*rwa
,
2819 struct drr_write_byref
*drrwbr
)
2823 guid_map_entry_t gmesrch
;
2824 guid_map_entry_t
*gmep
;
2826 objset_t
*ref_os
= NULL
;
2827 int flags
= DMU_READ_PREFETCH
;
2830 if (drrwbr
->drr_offset
+ drrwbr
->drr_length
< drrwbr
->drr_offset
)
2831 return (SET_ERROR(EINVAL
));
2834 * If the GUID of the referenced dataset is different from the
2835 * GUID of the target dataset, find the referenced dataset.
2837 if (drrwbr
->drr_toguid
!= drrwbr
->drr_refguid
) {
2838 gmesrch
.guid
= drrwbr
->drr_refguid
;
2839 if ((gmep
= avl_find(rwa
->guid_to_ds_map
, &gmesrch
,
2841 return (SET_ERROR(EINVAL
));
2843 if (dmu_objset_from_ds(gmep
->gme_ds
, &ref_os
))
2844 return (SET_ERROR(EINVAL
));
2849 if (drrwbr
->drr_object
> rwa
->max_object
)
2850 rwa
->max_object
= drrwbr
->drr_object
;
2853 flags
|= DMU_READ_NO_DECRYPT
;
2855 /* may return either a regular db or an encrypted one */
2856 err
= dmu_buf_hold(ref_os
, drrwbr
->drr_refobject
,
2857 drrwbr
->drr_refoffset
, FTAG
, &dbp
, flags
);
2861 tx
= dmu_tx_create(rwa
->os
);
2863 dmu_tx_hold_write(tx
, drrwbr
->drr_object
,
2864 drrwbr
->drr_offset
, drrwbr
->drr_length
);
2865 err
= dmu_tx_assign(tx
, TXG_WAIT
);
2872 VERIFY0(dmu_object_dirty_raw(rwa
->os
, drrwbr
->drr_object
, tx
));
2873 dmu_copy_from_buf(rwa
->os
, drrwbr
->drr_object
,
2874 drrwbr
->drr_offset
, dbp
, tx
);
2876 dmu_write(rwa
->os
, drrwbr
->drr_object
,
2877 drrwbr
->drr_offset
, drrwbr
->drr_length
, dbp
->db_data
, tx
);
2879 dmu_buf_rele(dbp
, FTAG
);
2881 /* See comment in restore_write. */
2882 save_resume_state(rwa
, drrwbr
->drr_object
, drrwbr
->drr_offset
, tx
);
2888 receive_write_embedded(struct receive_writer_arg
*rwa
,
2889 struct drr_write_embedded
*drrwe
, void *data
)
2894 if (drrwe
->drr_offset
+ drrwe
->drr_length
< drrwe
->drr_offset
)
2895 return (SET_ERROR(EINVAL
));
2897 if (drrwe
->drr_psize
> BPE_PAYLOAD_SIZE
)
2898 return (SET_ERROR(EINVAL
));
2900 if (drrwe
->drr_etype
>= NUM_BP_EMBEDDED_TYPES
)
2901 return (SET_ERROR(EINVAL
));
2902 if (drrwe
->drr_compression
>= ZIO_COMPRESS_FUNCTIONS
)
2903 return (SET_ERROR(EINVAL
));
2905 return (SET_ERROR(EINVAL
));
2907 if (drrwe
->drr_object
> rwa
->max_object
)
2908 rwa
->max_object
= drrwe
->drr_object
;
2910 tx
= dmu_tx_create(rwa
->os
);
2912 dmu_tx_hold_write(tx
, drrwe
->drr_object
,
2913 drrwe
->drr_offset
, drrwe
->drr_length
);
2914 err
= dmu_tx_assign(tx
, TXG_WAIT
);
2920 dmu_write_embedded(rwa
->os
, drrwe
->drr_object
,
2921 drrwe
->drr_offset
, data
, drrwe
->drr_etype
,
2922 drrwe
->drr_compression
, drrwe
->drr_lsize
, drrwe
->drr_psize
,
2923 rwa
->byteswap
^ ZFS_HOST_BYTEORDER
, tx
);
2925 /* See comment in restore_write. */
2926 save_resume_state(rwa
, drrwe
->drr_object
, drrwe
->drr_offset
, tx
);
2932 receive_spill(struct receive_writer_arg
*rwa
, struct drr_spill
*drrs
,
2936 dmu_buf_t
*db
, *db_spill
;
2939 if (drrs
->drr_length
< SPA_MINBLOCKSIZE
||
2940 drrs
->drr_length
> spa_maxblocksize(dmu_objset_spa(rwa
->os
)))
2941 return (SET_ERROR(EINVAL
));
2944 if (!DMU_OT_IS_VALID(drrs
->drr_type
) ||
2945 drrs
->drr_compressiontype
>= ZIO_COMPRESS_FUNCTIONS
||
2946 drrs
->drr_compressed_size
== 0)
2947 return (SET_ERROR(EINVAL
));
2950 if (dmu_object_info(rwa
->os
, drrs
->drr_object
, NULL
) != 0)
2951 return (SET_ERROR(EINVAL
));
2953 if (drrs
->drr_object
> rwa
->max_object
)
2954 rwa
->max_object
= drrs
->drr_object
;
2956 VERIFY0(dmu_bonus_hold(rwa
->os
, drrs
->drr_object
, FTAG
, &db
));
2957 if ((err
= dmu_spill_hold_by_bonus(db
, FTAG
, &db_spill
)) != 0) {
2958 dmu_buf_rele(db
, FTAG
);
2962 tx
= dmu_tx_create(rwa
->os
);
2964 dmu_tx_hold_spill(tx
, db
->db_object
);
2966 err
= dmu_tx_assign(tx
, TXG_WAIT
);
2968 dmu_buf_rele(db
, FTAG
);
2969 dmu_buf_rele(db_spill
, FTAG
);
2975 VERIFY0(dmu_object_dirty_raw(rwa
->os
, drrs
->drr_object
, tx
));
2976 dmu_buf_will_change_crypt_params(db_spill
, tx
);
2978 dmu_buf_will_dirty(db_spill
, tx
);
2981 if (db_spill
->db_size
< drrs
->drr_length
)
2982 VERIFY(0 == dbuf_spill_set_blksz(db_spill
,
2983 drrs
->drr_length
, tx
));
2985 if (rwa
->byteswap
&& !arc_is_encrypted(abuf
) &&
2986 arc_get_compression(abuf
) == ZIO_COMPRESS_OFF
) {
2987 dmu_object_byteswap_t byteswap
=
2988 DMU_OT_BYTESWAP(drrs
->drr_type
);
2989 dmu_ot_byteswap
[byteswap
].ob_func(abuf
->b_data
,
2990 DRR_SPILL_PAYLOAD_SIZE(drrs
));
2993 dbuf_assign_arcbuf((dmu_buf_impl_t
*)db_spill
, abuf
, tx
);
2995 dmu_buf_rele(db
, FTAG
);
2996 dmu_buf_rele(db_spill
, FTAG
);
3004 receive_free(struct receive_writer_arg
*rwa
, struct drr_free
*drrf
)
3008 if (drrf
->drr_length
!= DMU_OBJECT_END
&&
3009 drrf
->drr_offset
+ drrf
->drr_length
< drrf
->drr_offset
)
3010 return (SET_ERROR(EINVAL
));
3012 if (dmu_object_info(rwa
->os
, drrf
->drr_object
, NULL
) != 0)
3013 return (SET_ERROR(EINVAL
));
3015 if (drrf
->drr_object
> rwa
->max_object
)
3016 rwa
->max_object
= drrf
->drr_object
;
3019 err
= dmu_free_long_range_raw(rwa
->os
, drrf
->drr_object
,
3020 drrf
->drr_offset
, drrf
->drr_length
);
3022 err
= dmu_free_long_range(rwa
->os
, drrf
->drr_object
,
3023 drrf
->drr_offset
, drrf
->drr_length
);
3030 receive_object_range(struct receive_writer_arg
*rwa
,
3031 struct drr_object_range
*drror
)
3034 * By default, we assume this block is in our native format
3035 * (ZFS_HOST_BYTEORDER). We then take into account whether
3036 * the send stream is byteswapped (rwa->byteswap). Finally,
3037 * we need to byteswap again if this particular block was
3038 * in non-native format on the send side.
3040 boolean_t byteorder
= ZFS_HOST_BYTEORDER
^ rwa
->byteswap
^
3041 !!DRR_IS_RAW_BYTESWAPPED(drror
->drr_flags
);
3044 * Since dnode block sizes are constant, we should not need to worry
3045 * about making sure that the dnode block size is the same on the
3046 * sending and receiving sides for the time being. For non-raw sends,
3047 * this does not matter (and in fact we do not send a DRR_OBJECT_RANGE
3048 * record at all). Raw sends require this record type because the
3049 * encryption parameters are used to protect an entire block of bonus
3050 * buffers. If the size of dnode blocks ever becomes variable,
3051 * handling will need to be added to ensure that dnode block sizes
3052 * match on the sending and receiving side.
3054 if (drror
->drr_numslots
!= DNODES_PER_BLOCK
||
3055 P2PHASE(drror
->drr_firstobj
, DNODES_PER_BLOCK
) != 0 ||
3057 return (SET_ERROR(EINVAL
));
3059 if (drror
->drr_firstobj
> rwa
->max_object
)
3060 rwa
->max_object
= drror
->drr_firstobj
;
3063 * The DRR_OBJECT_RANGE handling must be deferred to receive_object()
3064 * so that the encryption parameters are set with each object that is
3065 * written into that block.
3067 rwa
->or_firstobj
= drror
->drr_firstobj
;
3068 rwa
->or_numslots
= drror
->drr_numslots
;
3069 bcopy(drror
->drr_salt
, rwa
->or_salt
, ZIO_DATA_SALT_LEN
);
3070 bcopy(drror
->drr_iv
, rwa
->or_iv
, ZIO_DATA_IV_LEN
);
3071 bcopy(drror
->drr_mac
, rwa
->or_mac
, ZIO_DATA_MAC_LEN
);
3072 rwa
->or_byteorder
= byteorder
;
3077 /* used to destroy the drc_ds on error */
3079 dmu_recv_cleanup_ds(dmu_recv_cookie_t
*drc
)
3081 dsl_dataset_t
*ds
= drc
->drc_ds
;
3082 ds_hold_flags_t dsflags
= (drc
->drc_raw
) ? 0 : DS_HOLD_FLAG_DECRYPT
;
3085 * Wait for the txg sync before cleaning up the receive. For
3086 * resumable receives, this ensures that our resume state has
3087 * been written out to disk. For raw receives, this ensures
3088 * that the user accounting code will not attempt to do anything
3089 * after we stopped receiving the dataset.
3091 txg_wait_synced(ds
->ds_dir
->dd_pool
, 0);
3093 rrw_enter(&ds
->ds_bp_rwlock
, RW_READER
, FTAG
);
3094 if (drc
->drc_resumable
&& !BP_IS_HOLE(dsl_dataset_get_blkptr(ds
))) {
3095 rrw_exit(&ds
->ds_bp_rwlock
, FTAG
);
3096 dsl_dataset_disown(ds
, dsflags
, dmu_recv_tag
);
3098 char name
[ZFS_MAX_DATASET_NAME_LEN
];
3099 rrw_exit(&ds
->ds_bp_rwlock
, FTAG
);
3100 dsl_dataset_name(ds
, name
);
3101 dsl_dataset_disown(ds
, dsflags
, dmu_recv_tag
);
3102 (void) dsl_destroy_head(name
);
3107 receive_cksum(struct receive_arg
*ra
, int len
, void *buf
)
3110 (void) fletcher_4_incremental_byteswap(buf
, len
, &ra
->cksum
);
3112 (void) fletcher_4_incremental_native(buf
, len
, &ra
->cksum
);
3117 * Read the payload into a buffer of size len, and update the current record's
3119 * Allocate ra->next_rrd and read the next record's header into
3120 * ra->next_rrd->header.
3121 * Verify checksum of payload and next record.
3124 receive_read_payload_and_next_header(struct receive_arg
*ra
, int len
, void *buf
)
3127 zio_cksum_t cksum_orig
;
3128 zio_cksum_t
*cksump
;
3131 ASSERT3U(len
, <=, SPA_MAXBLOCKSIZE
);
3132 err
= receive_read(ra
, len
, buf
);
3135 receive_cksum(ra
, len
, buf
);
3137 /* note: rrd is NULL when reading the begin record's payload */
3138 if (ra
->rrd
!= NULL
) {
3139 ra
->rrd
->payload
= buf
;
3140 ra
->rrd
->payload_size
= len
;
3141 ra
->rrd
->bytes_read
= ra
->bytes_read
;
3145 ra
->prev_cksum
= ra
->cksum
;
3147 ra
->next_rrd
= kmem_zalloc(sizeof (*ra
->next_rrd
), KM_SLEEP
);
3148 err
= receive_read(ra
, sizeof (ra
->next_rrd
->header
),
3149 &ra
->next_rrd
->header
);
3150 ra
->next_rrd
->bytes_read
= ra
->bytes_read
;
3153 kmem_free(ra
->next_rrd
, sizeof (*ra
->next_rrd
));
3154 ra
->next_rrd
= NULL
;
3157 if (ra
->next_rrd
->header
.drr_type
== DRR_BEGIN
) {
3158 kmem_free(ra
->next_rrd
, sizeof (*ra
->next_rrd
));
3159 ra
->next_rrd
= NULL
;
3160 return (SET_ERROR(EINVAL
));
3164 * Note: checksum is of everything up to but not including the
3167 ASSERT3U(offsetof(dmu_replay_record_t
, drr_u
.drr_checksum
.drr_checksum
),
3168 ==, sizeof (dmu_replay_record_t
) - sizeof (zio_cksum_t
));
3170 offsetof(dmu_replay_record_t
, drr_u
.drr_checksum
.drr_checksum
),
3171 &ra
->next_rrd
->header
);
3173 cksum_orig
= ra
->next_rrd
->header
.drr_u
.drr_checksum
.drr_checksum
;
3174 cksump
= &ra
->next_rrd
->header
.drr_u
.drr_checksum
.drr_checksum
;
3177 byteswap_record(&ra
->next_rrd
->header
);
3179 if ((!ZIO_CHECKSUM_IS_ZERO(cksump
)) &&
3180 !ZIO_CHECKSUM_EQUAL(ra
->cksum
, *cksump
)) {
3181 kmem_free(ra
->next_rrd
, sizeof (*ra
->next_rrd
));
3182 ra
->next_rrd
= NULL
;
3183 return (SET_ERROR(ECKSUM
));
3186 receive_cksum(ra
, sizeof (cksum_orig
), &cksum_orig
);
3192 objlist_create(struct objlist
*list
)
3194 list_create(&list
->list
, sizeof (struct receive_objnode
),
3195 offsetof(struct receive_objnode
, node
));
3196 list
->last_lookup
= 0;
3200 objlist_destroy(struct objlist
*list
)
3202 for (struct receive_objnode
*n
= list_remove_head(&list
->list
);
3203 n
!= NULL
; n
= list_remove_head(&list
->list
)) {
3204 kmem_free(n
, sizeof (*n
));
3206 list_destroy(&list
->list
);
3210 * This function looks through the objlist to see if the specified object number
3211 * is contained in the objlist. In the process, it will remove all object
3212 * numbers in the list that are smaller than the specified object number. Thus,
3213 * any lookup of an object number smaller than a previously looked up object
3214 * number will always return false; therefore, all lookups should be done in
3218 objlist_exists(struct objlist
*list
, uint64_t object
)
3220 struct receive_objnode
*node
= list_head(&list
->list
);
3221 ASSERT3U(object
, >=, list
->last_lookup
);
3222 list
->last_lookup
= object
;
3223 while (node
!= NULL
&& node
->object
< object
) {
3224 VERIFY3P(node
, ==, list_remove_head(&list
->list
));
3225 kmem_free(node
, sizeof (*node
));
3226 node
= list_head(&list
->list
);
3228 return (node
!= NULL
&& node
->object
== object
);
3232 * The objlist is a list of object numbers stored in ascending order. However,
3233 * the insertion of new object numbers does not seek out the correct location to
3234 * store a new object number; instead, it appends it to the list for simplicity.
3235 * Thus, any users must take care to only insert new object numbers in ascending
3239 objlist_insert(struct objlist
*list
, uint64_t object
)
3241 struct receive_objnode
*node
= kmem_zalloc(sizeof (*node
), KM_SLEEP
);
3242 node
->object
= object
;
3245 struct receive_objnode
*last_object
= list_tail(&list
->list
);
3246 uint64_t last_objnum
= (last_object
!= NULL
? last_object
->object
: 0);
3247 ASSERT3U(node
->object
, >, last_objnum
);
3250 list_insert_tail(&list
->list
, node
);
3254 * Issue the prefetch reads for any necessary indirect blocks.
3256 * We use the object ignore list to tell us whether or not to issue prefetches
3257 * for a given object. We do this for both correctness (in case the blocksize
3258 * of an object has changed) and performance (if the object doesn't exist, don't
3259 * needlessly try to issue prefetches). We also trim the list as we go through
3260 * the stream to prevent it from growing to an unbounded size.
3262 * The object numbers within will always be in sorted order, and any write
3263 * records we see will also be in sorted order, but they're not sorted with
3264 * respect to each other (i.e. we can get several object records before
3265 * receiving each object's write records). As a result, once we've reached a
3266 * given object number, we can safely remove any reference to lower object
3267 * numbers in the ignore list. In practice, we receive up to 32 object records
3268 * before receiving write records, so the list can have up to 32 nodes in it.
3272 receive_read_prefetch(struct receive_arg
*ra
,
3273 uint64_t object
, uint64_t offset
, uint64_t length
)
3275 if (!objlist_exists(&ra
->ignore_objlist
, object
)) {
3276 dmu_prefetch(ra
->os
, object
, 1, offset
, length
,
3277 ZIO_PRIORITY_SYNC_READ
);
3282 * Read records off the stream, issuing any necessary prefetches.
3285 receive_read_record(struct receive_arg
*ra
)
3289 switch (ra
->rrd
->header
.drr_type
) {
3292 struct drr_object
*drro
= &ra
->rrd
->header
.drr_u
.drr_object
;
3293 uint32_t size
= DRR_OBJECT_PAYLOAD_SIZE(drro
);
3294 void *buf
= kmem_zalloc(size
, KM_SLEEP
);
3295 dmu_object_info_t doi
;
3297 err
= receive_read_payload_and_next_header(ra
, size
, buf
);
3299 kmem_free(buf
, size
);
3302 err
= dmu_object_info(ra
->os
, drro
->drr_object
, &doi
);
3304 * See receive_read_prefetch for an explanation why we're
3305 * storing this object in the ignore_obj_list.
3307 if (err
== ENOENT
|| err
== EEXIST
||
3308 (err
== 0 && doi
.doi_data_block_size
!= drro
->drr_blksz
)) {
3309 objlist_insert(&ra
->ignore_objlist
, drro
->drr_object
);
3314 case DRR_FREEOBJECTS
:
3316 err
= receive_read_payload_and_next_header(ra
, 0, NULL
);
3321 struct drr_write
*drrw
= &ra
->rrd
->header
.drr_u
.drr_write
;
3323 boolean_t is_meta
= DMU_OT_IS_METADATA(drrw
->drr_type
);
3326 boolean_t byteorder
= ZFS_HOST_BYTEORDER
^
3327 !!DRR_IS_RAW_BYTESWAPPED(drrw
->drr_flags
) ^
3330 abuf
= arc_loan_raw_buf(dmu_objset_spa(ra
->os
),
3331 drrw
->drr_object
, byteorder
, drrw
->drr_salt
,
3332 drrw
->drr_iv
, drrw
->drr_mac
, drrw
->drr_type
,
3333 drrw
->drr_compressed_size
, drrw
->drr_logical_size
,
3334 drrw
->drr_compressiontype
);
3335 } else if (DRR_WRITE_COMPRESSED(drrw
)) {
3336 ASSERT3U(drrw
->drr_compressed_size
, >, 0);
3337 ASSERT3U(drrw
->drr_logical_size
, >=,
3338 drrw
->drr_compressed_size
);
3340 abuf
= arc_loan_compressed_buf(
3341 dmu_objset_spa(ra
->os
),
3342 drrw
->drr_compressed_size
, drrw
->drr_logical_size
,
3343 drrw
->drr_compressiontype
);
3345 abuf
= arc_loan_buf(dmu_objset_spa(ra
->os
),
3346 is_meta
, drrw
->drr_logical_size
);
3349 err
= receive_read_payload_and_next_header(ra
,
3350 DRR_WRITE_PAYLOAD_SIZE(drrw
), abuf
->b_data
);
3352 dmu_return_arcbuf(abuf
);
3355 ra
->rrd
->arc_buf
= abuf
;
3356 receive_read_prefetch(ra
, drrw
->drr_object
, drrw
->drr_offset
,
3357 drrw
->drr_logical_size
);
3360 case DRR_WRITE_BYREF
:
3362 struct drr_write_byref
*drrwb
=
3363 &ra
->rrd
->header
.drr_u
.drr_write_byref
;
3364 err
= receive_read_payload_and_next_header(ra
, 0, NULL
);
3365 receive_read_prefetch(ra
, drrwb
->drr_object
, drrwb
->drr_offset
,
3369 case DRR_WRITE_EMBEDDED
:
3371 struct drr_write_embedded
*drrwe
=
3372 &ra
->rrd
->header
.drr_u
.drr_write_embedded
;
3373 uint32_t size
= P2ROUNDUP(drrwe
->drr_psize
, 8);
3374 void *buf
= kmem_zalloc(size
, KM_SLEEP
);
3376 err
= receive_read_payload_and_next_header(ra
, size
, buf
);
3378 kmem_free(buf
, size
);
3382 receive_read_prefetch(ra
, drrwe
->drr_object
, drrwe
->drr_offset
,
3389 * It might be beneficial to prefetch indirect blocks here, but
3390 * we don't really have the data to decide for sure.
3392 err
= receive_read_payload_and_next_header(ra
, 0, NULL
);
3397 struct drr_end
*drre
= &ra
->rrd
->header
.drr_u
.drr_end
;
3398 if (!ZIO_CHECKSUM_EQUAL(ra
->prev_cksum
, drre
->drr_checksum
))
3399 return (SET_ERROR(ECKSUM
));
3404 struct drr_spill
*drrs
= &ra
->rrd
->header
.drr_u
.drr_spill
;
3406 int len
= DRR_SPILL_PAYLOAD_SIZE(drrs
);
3408 /* DRR_SPILL records are either raw or uncompressed */
3410 boolean_t byteorder
= ZFS_HOST_BYTEORDER
^
3411 !!DRR_IS_RAW_BYTESWAPPED(drrs
->drr_flags
) ^
3414 abuf
= arc_loan_raw_buf(dmu_objset_spa(ra
->os
),
3415 drrs
->drr_object
, byteorder
, drrs
->drr_salt
,
3416 drrs
->drr_iv
, drrs
->drr_mac
, drrs
->drr_type
,
3417 drrs
->drr_compressed_size
, drrs
->drr_length
,
3418 drrs
->drr_compressiontype
);
3420 abuf
= arc_loan_buf(dmu_objset_spa(ra
->os
),
3421 DMU_OT_IS_METADATA(drrs
->drr_type
),
3425 err
= receive_read_payload_and_next_header(ra
, len
,
3428 dmu_return_arcbuf(abuf
);
3431 ra
->rrd
->arc_buf
= abuf
;
3434 case DRR_OBJECT_RANGE
:
3436 err
= receive_read_payload_and_next_header(ra
, 0, NULL
);
3440 return (SET_ERROR(EINVAL
));
3445 dprintf_drr(struct receive_record_arg
*rrd
, int err
)
3448 switch (rrd
->header
.drr_type
) {
3451 struct drr_object
*drro
= &rrd
->header
.drr_u
.drr_object
;
3452 dprintf("drr_type = OBJECT obj = %llu type = %u "
3453 "bonustype = %u blksz = %u bonuslen = %u cksumtype = %u "
3454 "compress = %u dn_slots = %u err = %d\n",
3455 drro
->drr_object
, drro
->drr_type
, drro
->drr_bonustype
,
3456 drro
->drr_blksz
, drro
->drr_bonuslen
,
3457 drro
->drr_checksumtype
, drro
->drr_compress
,
3458 drro
->drr_dn_slots
, err
);
3461 case DRR_FREEOBJECTS
:
3463 struct drr_freeobjects
*drrfo
=
3464 &rrd
->header
.drr_u
.drr_freeobjects
;
3465 dprintf("drr_type = FREEOBJECTS firstobj = %llu "
3466 "numobjs = %llu err = %d\n",
3467 drrfo
->drr_firstobj
, drrfo
->drr_numobjs
, err
);
3472 struct drr_write
*drrw
= &rrd
->header
.drr_u
.drr_write
;
3473 dprintf("drr_type = WRITE obj = %llu type = %u offset = %llu "
3474 "lsize = %llu cksumtype = %u cksumflags = %u "
3475 "compress = %u psize = %llu err = %d\n",
3476 drrw
->drr_object
, drrw
->drr_type
, drrw
->drr_offset
,
3477 drrw
->drr_logical_size
, drrw
->drr_checksumtype
,
3478 drrw
->drr_flags
, drrw
->drr_compressiontype
,
3479 drrw
->drr_compressed_size
, err
);
3482 case DRR_WRITE_BYREF
:
3484 struct drr_write_byref
*drrwbr
=
3485 &rrd
->header
.drr_u
.drr_write_byref
;
3486 dprintf("drr_type = WRITE_BYREF obj = %llu offset = %llu "
3487 "length = %llu toguid = %llx refguid = %llx "
3488 "refobject = %llu refoffset = %llu cksumtype = %u "
3489 "cksumflags = %u err = %d\n",
3490 drrwbr
->drr_object
, drrwbr
->drr_offset
,
3491 drrwbr
->drr_length
, drrwbr
->drr_toguid
,
3492 drrwbr
->drr_refguid
, drrwbr
->drr_refobject
,
3493 drrwbr
->drr_refoffset
, drrwbr
->drr_checksumtype
,
3494 drrwbr
->drr_flags
, err
);
3497 case DRR_WRITE_EMBEDDED
:
3499 struct drr_write_embedded
*drrwe
=
3500 &rrd
->header
.drr_u
.drr_write_embedded
;
3501 dprintf("drr_type = WRITE_EMBEDDED obj = %llu offset = %llu "
3502 "length = %llu compress = %u etype = %u lsize = %u "
3503 "psize = %u err = %d\n",
3504 drrwe
->drr_object
, drrwe
->drr_offset
, drrwe
->drr_length
,
3505 drrwe
->drr_compression
, drrwe
->drr_etype
,
3506 drrwe
->drr_lsize
, drrwe
->drr_psize
, err
);
3511 struct drr_free
*drrf
= &rrd
->header
.drr_u
.drr_free
;
3512 dprintf("drr_type = FREE obj = %llu offset = %llu "
3513 "length = %lld err = %d\n",
3514 drrf
->drr_object
, drrf
->drr_offset
, drrf
->drr_length
,
3520 struct drr_spill
*drrs
= &rrd
->header
.drr_u
.drr_spill
;
3521 dprintf("drr_type = SPILL obj = %llu length = %llu "
3522 "err = %d\n", drrs
->drr_object
, drrs
->drr_length
, err
);
3532 * Commit the records to the pool.
3535 receive_process_record(struct receive_writer_arg
*rwa
,
3536 struct receive_record_arg
*rrd
)
3540 /* Processing in order, therefore bytes_read should be increasing. */
3541 ASSERT3U(rrd
->bytes_read
, >=, rwa
->bytes_read
);
3542 rwa
->bytes_read
= rrd
->bytes_read
;
3544 switch (rrd
->header
.drr_type
) {
3547 struct drr_object
*drro
= &rrd
->header
.drr_u
.drr_object
;
3548 err
= receive_object(rwa
, drro
, rrd
->payload
);
3549 kmem_free(rrd
->payload
, rrd
->payload_size
);
3550 rrd
->payload
= NULL
;
3553 case DRR_FREEOBJECTS
:
3555 struct drr_freeobjects
*drrfo
=
3556 &rrd
->header
.drr_u
.drr_freeobjects
;
3557 err
= receive_freeobjects(rwa
, drrfo
);
3562 struct drr_write
*drrw
= &rrd
->header
.drr_u
.drr_write
;
3563 err
= receive_write(rwa
, drrw
, rrd
->arc_buf
);
3564 /* if receive_write() is successful, it consumes the arc_buf */
3566 dmu_return_arcbuf(rrd
->arc_buf
);
3567 rrd
->arc_buf
= NULL
;
3568 rrd
->payload
= NULL
;
3571 case DRR_WRITE_BYREF
:
3573 struct drr_write_byref
*drrwbr
=
3574 &rrd
->header
.drr_u
.drr_write_byref
;
3575 err
= receive_write_byref(rwa
, drrwbr
);
3578 case DRR_WRITE_EMBEDDED
:
3580 struct drr_write_embedded
*drrwe
=
3581 &rrd
->header
.drr_u
.drr_write_embedded
;
3582 err
= receive_write_embedded(rwa
, drrwe
, rrd
->payload
);
3583 kmem_free(rrd
->payload
, rrd
->payload_size
);
3584 rrd
->payload
= NULL
;
3589 struct drr_free
*drrf
= &rrd
->header
.drr_u
.drr_free
;
3590 err
= receive_free(rwa
, drrf
);
3595 struct drr_spill
*drrs
= &rrd
->header
.drr_u
.drr_spill
;
3596 err
= receive_spill(rwa
, drrs
, rrd
->arc_buf
);
3597 /* if receive_spill() is successful, it consumes the arc_buf */
3599 dmu_return_arcbuf(rrd
->arc_buf
);
3600 rrd
->arc_buf
= NULL
;
3601 rrd
->payload
= NULL
;
3604 case DRR_OBJECT_RANGE
:
3606 struct drr_object_range
*drror
=
3607 &rrd
->header
.drr_u
.drr_object_range
;
3608 return (receive_object_range(rwa
, drror
));
3611 return (SET_ERROR(EINVAL
));
3615 dprintf_drr(rrd
, err
);
3621 * dmu_recv_stream's worker thread; pull records off the queue, and then call
3622 * receive_process_record When we're done, signal the main thread and exit.
3625 receive_writer_thread(void *arg
)
3627 struct receive_writer_arg
*rwa
= arg
;
3628 struct receive_record_arg
*rrd
;
3629 fstrans_cookie_t cookie
= spl_fstrans_mark();
3631 for (rrd
= bqueue_dequeue(&rwa
->q
); !rrd
->eos_marker
;
3632 rrd
= bqueue_dequeue(&rwa
->q
)) {
3634 * If there's an error, the main thread will stop putting things
3635 * on the queue, but we need to clear everything in it before we
3638 if (rwa
->err
== 0) {
3639 rwa
->err
= receive_process_record(rwa
, rrd
);
3640 } else if (rrd
->arc_buf
!= NULL
) {
3641 dmu_return_arcbuf(rrd
->arc_buf
);
3642 rrd
->arc_buf
= NULL
;
3643 rrd
->payload
= NULL
;
3644 } else if (rrd
->payload
!= NULL
) {
3645 kmem_free(rrd
->payload
, rrd
->payload_size
);
3646 rrd
->payload
= NULL
;
3648 kmem_free(rrd
, sizeof (*rrd
));
3650 kmem_free(rrd
, sizeof (*rrd
));
3651 mutex_enter(&rwa
->mutex
);
3653 cv_signal(&rwa
->cv
);
3654 mutex_exit(&rwa
->mutex
);
3655 spl_fstrans_unmark(cookie
);
3660 resume_check(struct receive_arg
*ra
, nvlist_t
*begin_nvl
)
3663 objset_t
*mos
= dmu_objset_pool(ra
->os
)->dp_meta_objset
;
3664 uint64_t dsobj
= dmu_objset_id(ra
->os
);
3665 uint64_t resume_obj
, resume_off
;
3667 if (nvlist_lookup_uint64(begin_nvl
,
3668 "resume_object", &resume_obj
) != 0 ||
3669 nvlist_lookup_uint64(begin_nvl
,
3670 "resume_offset", &resume_off
) != 0) {
3671 return (SET_ERROR(EINVAL
));
3673 VERIFY0(zap_lookup(mos
, dsobj
,
3674 DS_FIELD_RESUME_OBJECT
, sizeof (val
), 1, &val
));
3675 if (resume_obj
!= val
)
3676 return (SET_ERROR(EINVAL
));
3677 VERIFY0(zap_lookup(mos
, dsobj
,
3678 DS_FIELD_RESUME_OFFSET
, sizeof (val
), 1, &val
));
3679 if (resume_off
!= val
)
3680 return (SET_ERROR(EINVAL
));
3686 * Read in the stream's records, one by one, and apply them to the pool. There
3687 * are two threads involved; the thread that calls this function will spin up a
3688 * worker thread, read the records off the stream one by one, and issue
3689 * prefetches for any necessary indirect blocks. It will then push the records
3690 * onto an internal blocking queue. The worker thread will pull the records off
3691 * the queue, and actually write the data into the DMU. This way, the worker
3692 * thread doesn't have to wait for reads to complete, since everything it needs
3693 * (the indirect blocks) will be prefetched.
3695 * NB: callers *must* call dmu_recv_end() if this succeeds.
3698 dmu_recv_stream(dmu_recv_cookie_t
*drc
, vnode_t
*vp
, offset_t
*voffp
,
3699 int cleanup_fd
, uint64_t *action_handlep
)
3702 struct receive_arg
*ra
;
3703 struct receive_writer_arg
*rwa
;
3705 uint32_t payloadlen
;
3707 nvlist_t
*begin_nvl
= NULL
;
3709 ra
= kmem_zalloc(sizeof (*ra
), KM_SLEEP
);
3710 rwa
= kmem_zalloc(sizeof (*rwa
), KM_SLEEP
);
3712 ra
->byteswap
= drc
->drc_byteswap
;
3713 ra
->raw
= drc
->drc_raw
;
3714 ra
->cksum
= drc
->drc_cksum
;
3718 if (dsl_dataset_is_zapified(drc
->drc_ds
)) {
3719 (void) zap_lookup(drc
->drc_ds
->ds_dir
->dd_pool
->dp_meta_objset
,
3720 drc
->drc_ds
->ds_object
, DS_FIELD_RESUME_BYTES
,
3721 sizeof (ra
->bytes_read
), 1, &ra
->bytes_read
);
3724 objlist_create(&ra
->ignore_objlist
);
3726 /* these were verified in dmu_recv_begin */
3727 ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc
->drc_drrb
->drr_versioninfo
), ==,
3729 ASSERT3U(drc
->drc_drrb
->drr_type
, <, DMU_OST_NUMTYPES
);
3732 * Open the objset we are modifying.
3734 VERIFY0(dmu_objset_from_ds(drc
->drc_ds
, &ra
->os
));
3736 ASSERT(dsl_dataset_phys(drc
->drc_ds
)->ds_flags
& DS_FLAG_INCONSISTENT
);
3738 featureflags
= DMU_GET_FEATUREFLAGS(drc
->drc_drrb
->drr_versioninfo
);
3739 ra
->featureflags
= featureflags
;
3741 /* embedded data is incompatible with encrypted datasets */
3742 if (ra
->os
->os_encrypted
&&
3743 (featureflags
& DMU_BACKUP_FEATURE_EMBED_DATA
)) {
3744 err
= SET_ERROR(EINVAL
);
3748 /* if this stream is dedup'ed, set up the avl tree for guid mapping */
3749 if (featureflags
& DMU_BACKUP_FEATURE_DEDUP
) {
3752 if (cleanup_fd
== -1) {
3753 err
= SET_ERROR(EBADF
);
3756 err
= zfs_onexit_fd_hold(cleanup_fd
, &minor
);
3762 if (*action_handlep
== 0) {
3763 rwa
->guid_to_ds_map
=
3764 kmem_alloc(sizeof (avl_tree_t
), KM_SLEEP
);
3765 avl_create(rwa
->guid_to_ds_map
, guid_compare
,
3766 sizeof (guid_map_entry_t
),
3767 offsetof(guid_map_entry_t
, avlnode
));
3768 err
= zfs_onexit_add_cb(minor
,
3769 free_guid_map_onexit
, rwa
->guid_to_ds_map
,
3774 err
= zfs_onexit_cb_data(minor
, *action_handlep
,
3775 (void **)&rwa
->guid_to_ds_map
);
3780 drc
->drc_guid_to_ds_map
= rwa
->guid_to_ds_map
;
3783 payloadlen
= drc
->drc_drr_begin
->drr_payloadlen
;
3785 if (payloadlen
!= 0)
3786 payload
= kmem_alloc(payloadlen
, KM_SLEEP
);
3788 err
= receive_read_payload_and_next_header(ra
, payloadlen
, payload
);
3790 if (payloadlen
!= 0)
3791 kmem_free(payload
, payloadlen
);
3794 if (payloadlen
!= 0) {
3795 err
= nvlist_unpack(payload
, payloadlen
, &begin_nvl
, KM_SLEEP
);
3796 kmem_free(payload
, payloadlen
);
3801 /* handle DSL encryption key payload */
3802 if (featureflags
& DMU_BACKUP_FEATURE_RAW
) {
3803 nvlist_t
*keynvl
= NULL
;
3805 ASSERT(ra
->os
->os_encrypted
);
3806 ASSERT(drc
->drc_raw
);
3808 err
= nvlist_lookup_nvlist(begin_nvl
, "crypt_keydata", &keynvl
);
3813 * If this is a new dataset we set the key immediately.
3814 * Otherwise we don't want to change the key until we
3815 * are sure the rest of the receive succeeded so we stash
3816 * the keynvl away until then.
3818 err
= dsl_crypto_recv_raw(spa_name(ra
->os
->os_spa
),
3819 drc
->drc_ds
->ds_object
, drc
->drc_drrb
->drr_type
,
3820 keynvl
, drc
->drc_newfs
);
3824 if (!drc
->drc_newfs
)
3825 drc
->drc_keynvl
= fnvlist_dup(keynvl
);
3828 if (featureflags
& DMU_BACKUP_FEATURE_RESUMING
) {
3829 err
= resume_check(ra
, begin_nvl
);
3834 (void) bqueue_init(&rwa
->q
, zfs_recv_queue_length
,
3835 offsetof(struct receive_record_arg
, node
));
3836 cv_init(&rwa
->cv
, NULL
, CV_DEFAULT
, NULL
);
3837 mutex_init(&rwa
->mutex
, NULL
, MUTEX_DEFAULT
, NULL
);
3839 rwa
->byteswap
= drc
->drc_byteswap
;
3840 rwa
->resumable
= drc
->drc_resumable
;
3841 rwa
->raw
= drc
->drc_raw
;
3843 (void) thread_create(NULL
, 0, receive_writer_thread
, rwa
, 0, curproc
,
3844 TS_RUN
, minclsyspri
);
3846 * We're reading rwa->err without locks, which is safe since we are the
3847 * only reader, and the worker thread is the only writer. It's ok if we
3848 * miss a write for an iteration or two of the loop, since the writer
3849 * thread will keep freeing records we send it until we send it an eos
3852 * We can leave this loop in 3 ways: First, if rwa->err is
3853 * non-zero. In that case, the writer thread will free the rrd we just
3854 * pushed. Second, if we're interrupted; in that case, either it's the
3855 * first loop and ra->rrd was never allocated, or it's later and ra->rrd
3856 * has been handed off to the writer thread who will free it. Finally,
3857 * if receive_read_record fails or we're at the end of the stream, then
3858 * we free ra->rrd and exit.
3860 while (rwa
->err
== 0) {
3861 if (issig(JUSTLOOKING
) && issig(FORREAL
)) {
3862 err
= SET_ERROR(EINTR
);
3866 ASSERT3P(ra
->rrd
, ==, NULL
);
3867 ra
->rrd
= ra
->next_rrd
;
3868 ra
->next_rrd
= NULL
;
3869 /* Allocates and loads header into ra->next_rrd */
3870 err
= receive_read_record(ra
);
3872 if (ra
->rrd
->header
.drr_type
== DRR_END
|| err
!= 0) {
3873 kmem_free(ra
->rrd
, sizeof (*ra
->rrd
));
3878 bqueue_enqueue(&rwa
->q
, ra
->rrd
,
3879 sizeof (struct receive_record_arg
) + ra
->rrd
->payload_size
);
3882 if (ra
->next_rrd
== NULL
)
3883 ra
->next_rrd
= kmem_zalloc(sizeof (*ra
->next_rrd
), KM_SLEEP
);
3884 ra
->next_rrd
->eos_marker
= B_TRUE
;
3885 bqueue_enqueue(&rwa
->q
, ra
->next_rrd
, 1);
3887 mutex_enter(&rwa
->mutex
);
3888 while (!rwa
->done
) {
3889 cv_wait(&rwa
->cv
, &rwa
->mutex
);
3891 mutex_exit(&rwa
->mutex
);
3894 * If we are receiving a full stream as a clone, all object IDs which
3895 * are greater than the maximum ID referenced in the stream are
3896 * by definition unused and must be freed.
3898 if (drc
->drc_clone
&& drc
->drc_drrb
->drr_fromguid
== 0) {
3899 uint64_t obj
= rwa
->max_object
+ 1;
3903 while (next_err
== 0) {
3905 free_err
= dmu_free_long_object_raw(rwa
->os
,
3908 free_err
= dmu_free_long_object(rwa
->os
, obj
);
3910 if (free_err
!= 0 && free_err
!= ENOENT
)
3913 next_err
= dmu_object_next(rwa
->os
, &obj
, FALSE
, 0);
3917 if (free_err
!= 0 && free_err
!= ENOENT
)
3919 else if (next_err
!= ESRCH
)
3924 cv_destroy(&rwa
->cv
);
3925 mutex_destroy(&rwa
->mutex
);
3926 bqueue_destroy(&rwa
->q
);
3931 nvlist_free(begin_nvl
);
3932 if ((featureflags
& DMU_BACKUP_FEATURE_DEDUP
) && (cleanup_fd
!= -1))
3933 zfs_onexit_fd_rele(cleanup_fd
);
3937 * Clean up references. If receive is not resumable,
3938 * destroy what we created, so we don't leave it in
3939 * the inconsistent state.
3941 dmu_recv_cleanup_ds(drc
);
3942 nvlist_free(drc
->drc_keynvl
);
3946 objlist_destroy(&ra
->ignore_objlist
);
3947 kmem_free(ra
, sizeof (*ra
));
3948 kmem_free(rwa
, sizeof (*rwa
));
3953 dmu_recv_end_check(void *arg
, dmu_tx_t
*tx
)
3955 dmu_recv_cookie_t
*drc
= arg
;
3956 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
3959 ASSERT3P(drc
->drc_ds
->ds_owner
, ==, dmu_recv_tag
);
3961 if (!drc
->drc_newfs
) {
3962 dsl_dataset_t
*origin_head
;
3964 error
= dsl_dataset_hold(dp
, drc
->drc_tofs
, FTAG
, &origin_head
);
3967 if (drc
->drc_force
) {
3969 * We will destroy any snapshots in tofs (i.e. before
3970 * origin_head) that are after the origin (which is
3971 * the snap before drc_ds, because drc_ds can not
3972 * have any snaps of its own).
3976 obj
= dsl_dataset_phys(origin_head
)->ds_prev_snap_obj
;
3978 dsl_dataset_phys(drc
->drc_ds
)->ds_prev_snap_obj
) {
3979 dsl_dataset_t
*snap
;
3980 error
= dsl_dataset_hold_obj(dp
, obj
, FTAG
,
3984 if (snap
->ds_dir
!= origin_head
->ds_dir
)
3985 error
= SET_ERROR(EINVAL
);
3987 error
= dsl_destroy_snapshot_check_impl(
3990 obj
= dsl_dataset_phys(snap
)->ds_prev_snap_obj
;
3991 dsl_dataset_rele(snap
, FTAG
);
3996 dsl_dataset_rele(origin_head
, FTAG
);
4000 if (drc
->drc_keynvl
!= NULL
) {
4001 error
= dsl_crypto_recv_raw_key_check(drc
->drc_ds
,
4002 drc
->drc_keynvl
, tx
);
4004 dsl_dataset_rele(origin_head
, FTAG
);
4009 error
= dsl_dataset_clone_swap_check_impl(drc
->drc_ds
,
4010 origin_head
, drc
->drc_force
, drc
->drc_owner
, tx
);
4012 dsl_dataset_rele(origin_head
, FTAG
);
4015 error
= dsl_dataset_snapshot_check_impl(origin_head
,
4016 drc
->drc_tosnap
, tx
, B_TRUE
, 1, drc
->drc_cred
);
4017 dsl_dataset_rele(origin_head
, FTAG
);
4021 error
= dsl_destroy_head_check_impl(drc
->drc_ds
, 1);
4023 error
= dsl_dataset_snapshot_check_impl(drc
->drc_ds
,
4024 drc
->drc_tosnap
, tx
, B_TRUE
, 1, drc
->drc_cred
);
4030 dmu_recv_end_sync(void *arg
, dmu_tx_t
*tx
)
4032 dmu_recv_cookie_t
*drc
= arg
;
4033 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
4034 boolean_t encrypted
= drc
->drc_ds
->ds_dir
->dd_crypto_obj
!= 0;
4036 spa_history_log_internal_ds(drc
->drc_ds
, "finish receiving",
4037 tx
, "snap=%s", drc
->drc_tosnap
);
4039 if (!drc
->drc_newfs
) {
4040 dsl_dataset_t
*origin_head
;
4042 VERIFY0(dsl_dataset_hold(dp
, drc
->drc_tofs
, FTAG
,
4045 if (drc
->drc_force
) {
4047 * Destroy any snapshots of drc_tofs (origin_head)
4048 * after the origin (the snap before drc_ds).
4052 obj
= dsl_dataset_phys(origin_head
)->ds_prev_snap_obj
;
4054 dsl_dataset_phys(drc
->drc_ds
)->ds_prev_snap_obj
) {
4055 dsl_dataset_t
*snap
;
4056 VERIFY0(dsl_dataset_hold_obj(dp
, obj
, FTAG
,
4058 ASSERT3P(snap
->ds_dir
, ==, origin_head
->ds_dir
);
4059 obj
= dsl_dataset_phys(snap
)->ds_prev_snap_obj
;
4060 dsl_destroy_snapshot_sync_impl(snap
,
4062 dsl_dataset_rele(snap
, FTAG
);
4065 if (drc
->drc_keynvl
!= NULL
) {
4066 dsl_crypto_recv_raw_key_sync(drc
->drc_ds
,
4067 drc
->drc_keynvl
, tx
);
4068 nvlist_free(drc
->drc_keynvl
);
4069 drc
->drc_keynvl
= NULL
;
4072 VERIFY3P(drc
->drc_ds
->ds_prev
, ==, origin_head
->ds_prev
);
4074 dsl_dataset_clone_swap_sync_impl(drc
->drc_ds
,
4076 dsl_dataset_snapshot_sync_impl(origin_head
,
4077 drc
->drc_tosnap
, tx
);
4079 /* set snapshot's creation time and guid */
4080 dmu_buf_will_dirty(origin_head
->ds_prev
->ds_dbuf
, tx
);
4081 dsl_dataset_phys(origin_head
->ds_prev
)->ds_creation_time
=
4082 drc
->drc_drrb
->drr_creation_time
;
4083 dsl_dataset_phys(origin_head
->ds_prev
)->ds_guid
=
4084 drc
->drc_drrb
->drr_toguid
;
4085 dsl_dataset_phys(origin_head
->ds_prev
)->ds_flags
&=
4086 ~DS_FLAG_INCONSISTENT
;
4088 dmu_buf_will_dirty(origin_head
->ds_dbuf
, tx
);
4089 dsl_dataset_phys(origin_head
)->ds_flags
&=
4090 ~DS_FLAG_INCONSISTENT
;
4092 drc
->drc_newsnapobj
=
4093 dsl_dataset_phys(origin_head
)->ds_prev_snap_obj
;
4095 dsl_dataset_rele(origin_head
, FTAG
);
4096 dsl_destroy_head_sync_impl(drc
->drc_ds
, tx
);
4098 if (drc
->drc_owner
!= NULL
)
4099 VERIFY3P(origin_head
->ds_owner
, ==, drc
->drc_owner
);
4101 dsl_dataset_t
*ds
= drc
->drc_ds
;
4103 dsl_dataset_snapshot_sync_impl(ds
, drc
->drc_tosnap
, tx
);
4105 /* set snapshot's creation time and guid */
4106 dmu_buf_will_dirty(ds
->ds_prev
->ds_dbuf
, tx
);
4107 dsl_dataset_phys(ds
->ds_prev
)->ds_creation_time
=
4108 drc
->drc_drrb
->drr_creation_time
;
4109 dsl_dataset_phys(ds
->ds_prev
)->ds_guid
=
4110 drc
->drc_drrb
->drr_toguid
;
4111 dsl_dataset_phys(ds
->ds_prev
)->ds_flags
&=
4112 ~DS_FLAG_INCONSISTENT
;
4114 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
4115 dsl_dataset_phys(ds
)->ds_flags
&= ~DS_FLAG_INCONSISTENT
;
4116 if (dsl_dataset_has_resume_receive_state(ds
)) {
4117 (void) zap_remove(dp
->dp_meta_objset
, ds
->ds_object
,
4118 DS_FIELD_RESUME_FROMGUID
, tx
);
4119 (void) zap_remove(dp
->dp_meta_objset
, ds
->ds_object
,
4120 DS_FIELD_RESUME_OBJECT
, tx
);
4121 (void) zap_remove(dp
->dp_meta_objset
, ds
->ds_object
,
4122 DS_FIELD_RESUME_OFFSET
, tx
);
4123 (void) zap_remove(dp
->dp_meta_objset
, ds
->ds_object
,
4124 DS_FIELD_RESUME_BYTES
, tx
);
4125 (void) zap_remove(dp
->dp_meta_objset
, ds
->ds_object
,
4126 DS_FIELD_RESUME_TOGUID
, tx
);
4127 (void) zap_remove(dp
->dp_meta_objset
, ds
->ds_object
,
4128 DS_FIELD_RESUME_TONAME
, tx
);
4130 drc
->drc_newsnapobj
=
4131 dsl_dataset_phys(drc
->drc_ds
)->ds_prev_snap_obj
;
4133 zvol_create_minors(dp
->dp_spa
, drc
->drc_tofs
, B_TRUE
);
4136 * Release the hold from dmu_recv_begin. This must be done before
4137 * we return to open context, so that when we free the dataset's dnode
4138 * we can evict its bonus buffer. Since the dataset may be destroyed
4139 * at this point (and therefore won't have a valid pointer to the spa)
4140 * we release the key mapping manually here while we do have a valid
4141 * pointer, if it exists.
4143 if (!drc
->drc_raw
&& encrypted
) {
4144 (void) spa_keystore_remove_mapping(dmu_tx_pool(tx
)->dp_spa
,
4145 drc
->drc_ds
->ds_object
, drc
->drc_ds
);
4147 dsl_dataset_disown(drc
->drc_ds
, 0, dmu_recv_tag
);
4152 add_ds_to_guidmap(const char *name
, avl_tree_t
*guid_map
, uint64_t snapobj
,
4156 dsl_dataset_t
*snapds
;
4157 guid_map_entry_t
*gmep
;
4158 ds_hold_flags_t dsflags
= (raw
) ? 0 : DS_HOLD_FLAG_DECRYPT
;
4161 ASSERT(guid_map
!= NULL
);
4163 err
= dsl_pool_hold(name
, FTAG
, &dp
);
4166 gmep
= kmem_alloc(sizeof (*gmep
), KM_SLEEP
);
4167 err
= dsl_dataset_hold_obj_flags(dp
, snapobj
, dsflags
, gmep
, &snapds
);
4169 gmep
->guid
= dsl_dataset_phys(snapds
)->ds_guid
;
4171 gmep
->gme_ds
= snapds
;
4172 avl_add(guid_map
, gmep
);
4173 dsl_dataset_long_hold(snapds
, gmep
);
4175 kmem_free(gmep
, sizeof (*gmep
));
4178 dsl_pool_rele(dp
, FTAG
);
4182 static int dmu_recv_end_modified_blocks
= 3;
4185 dmu_recv_existing_end(dmu_recv_cookie_t
*drc
)
4189 * We will be destroying the ds; make sure its origin is unmounted if
4192 char name
[ZFS_MAX_DATASET_NAME_LEN
];
4193 dsl_dataset_name(drc
->drc_ds
, name
);
4194 zfs_destroy_unmount_origin(name
);
4197 return (dsl_sync_task(drc
->drc_tofs
,
4198 dmu_recv_end_check
, dmu_recv_end_sync
, drc
,
4199 dmu_recv_end_modified_blocks
, ZFS_SPACE_CHECK_NORMAL
));
4203 dmu_recv_new_end(dmu_recv_cookie_t
*drc
)
4205 return (dsl_sync_task(drc
->drc_tofs
,
4206 dmu_recv_end_check
, dmu_recv_end_sync
, drc
,
4207 dmu_recv_end_modified_blocks
, ZFS_SPACE_CHECK_NORMAL
));
4211 dmu_recv_end(dmu_recv_cookie_t
*drc
, void *owner
)
4215 drc
->drc_owner
= owner
;
4218 error
= dmu_recv_new_end(drc
);
4220 error
= dmu_recv_existing_end(drc
);
4223 dmu_recv_cleanup_ds(drc
);
4224 nvlist_free(drc
->drc_keynvl
);
4225 } else if (drc
->drc_guid_to_ds_map
!= NULL
) {
4226 (void) add_ds_to_guidmap(drc
->drc_tofs
, drc
->drc_guid_to_ds_map
,
4227 drc
->drc_newsnapobj
, drc
->drc_raw
);
4233 * Return TRUE if this objset is currently being received into.
4236 dmu_objset_is_receiving(objset_t
*os
)
4238 return (os
->os_dsl_dataset
!= NULL
&&
4239 os
->os_dsl_dataset
->ds_owner
== dmu_recv_tag
);
4242 #if defined(_KERNEL)
4243 module_param(zfs_send_corrupt_data
, int, 0644);
4244 MODULE_PARM_DESC(zfs_send_corrupt_data
, "Allow sending corrupt data");