4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2011, 2020 by Delphix. All rights reserved.
25 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
26 * Copyright 2014 HybridCluster. All rights reserved.
27 * Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
28 * Copyright (c) 2019, Klara Inc.
29 * Copyright (c) 2019, Allan Jude
33 #include <sys/dmu_impl.h>
34 #include <sys/dmu_send.h>
35 #include <sys/dmu_recv.h>
36 #include <sys/dmu_tx.h>
38 #include <sys/dnode.h>
39 #include <sys/zfs_context.h>
40 #include <sys/dmu_objset.h>
41 #include <sys/dmu_traverse.h>
42 #include <sys/dsl_dataset.h>
43 #include <sys/dsl_dir.h>
44 #include <sys/dsl_prop.h>
45 #include <sys/dsl_pool.h>
46 #include <sys/dsl_synctask.h>
47 #include <sys/zfs_ioctl.h>
50 #include <sys/zio_checksum.h>
51 #include <sys/zfs_znode.h>
52 #include <zfs_fletcher.h>
55 #include <sys/zfs_onexit.h>
56 #include <sys/dsl_destroy.h>
57 #include <sys/blkptr.h>
58 #include <sys/dsl_bookmark.h>
59 #include <sys/zfeature.h>
60 #include <sys/bqueue.h>
61 #include <sys/objlist.h>
63 #include <sys/zfs_vfsops.h>
65 #include <sys/zfs_file.h>
67 static int zfs_recv_queue_length
= SPA_MAXBLOCKSIZE
;
68 static int zfs_recv_queue_ff
= 20;
69 static int zfs_recv_write_batch_size
= 1024 * 1024;
71 static void *const dmu_recv_tag
= "dmu_recv_tag";
72 const char *const recv_clone_name
= "%recv";
74 static int receive_read_payload_and_next_header(dmu_recv_cookie_t
*ra
, int len
,
77 struct receive_record_arg
{
78 dmu_replay_record_t header
;
79 void *payload
; /* Pointer to a buffer containing the payload */
81 * If the record is a WRITE or SPILL, pointer to the abd containing the
86 uint64_t bytes_read
; /* bytes read from stream when record created */
87 boolean_t eos_marker
; /* Marks the end of the stream */
91 struct receive_writer_arg
{
97 * These three members are used to signal to the main thread when
106 boolean_t raw
; /* DMU_BACKUP_FEATURE_RAW set */
107 boolean_t spill
; /* DRR_FLAG_SPILL_BLOCK set */
108 boolean_t full
; /* this is a full send stream */
109 uint64_t last_object
;
110 uint64_t last_offset
;
111 uint64_t max_object
; /* highest object ID referenced in stream */
112 uint64_t bytes_read
; /* bytes read when current record created */
116 /* Encryption parameters for the last received DRR_OBJECT_RANGE */
117 boolean_t or_crypt_params_present
;
118 uint64_t or_firstobj
;
119 uint64_t or_numslots
;
120 uint8_t or_salt
[ZIO_DATA_SALT_LEN
];
121 uint8_t or_iv
[ZIO_DATA_IV_LEN
];
122 uint8_t or_mac
[ZIO_DATA_MAC_LEN
];
123 boolean_t or_byteorder
;
126 typedef struct dmu_recv_begin_arg
{
127 const char *drba_origin
;
128 dmu_recv_cookie_t
*drba_cookie
;
131 dsl_crypto_params_t
*drba_dcp
;
132 } dmu_recv_begin_arg_t
;
135 byteswap_record(dmu_replay_record_t
*drr
)
137 #define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
138 #define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
139 drr
->drr_type
= BSWAP_32(drr
->drr_type
);
140 drr
->drr_payloadlen
= BSWAP_32(drr
->drr_payloadlen
);
142 switch (drr
->drr_type
) {
144 DO64(drr_begin
.drr_magic
);
145 DO64(drr_begin
.drr_versioninfo
);
146 DO64(drr_begin
.drr_creation_time
);
147 DO32(drr_begin
.drr_type
);
148 DO32(drr_begin
.drr_flags
);
149 DO64(drr_begin
.drr_toguid
);
150 DO64(drr_begin
.drr_fromguid
);
153 DO64(drr_object
.drr_object
);
154 DO32(drr_object
.drr_type
);
155 DO32(drr_object
.drr_bonustype
);
156 DO32(drr_object
.drr_blksz
);
157 DO32(drr_object
.drr_bonuslen
);
158 DO32(drr_object
.drr_raw_bonuslen
);
159 DO64(drr_object
.drr_toguid
);
160 DO64(drr_object
.drr_maxblkid
);
162 case DRR_FREEOBJECTS
:
163 DO64(drr_freeobjects
.drr_firstobj
);
164 DO64(drr_freeobjects
.drr_numobjs
);
165 DO64(drr_freeobjects
.drr_toguid
);
168 DO64(drr_write
.drr_object
);
169 DO32(drr_write
.drr_type
);
170 DO64(drr_write
.drr_offset
);
171 DO64(drr_write
.drr_logical_size
);
172 DO64(drr_write
.drr_toguid
);
173 ZIO_CHECKSUM_BSWAP(&drr
->drr_u
.drr_write
.drr_key
.ddk_cksum
);
174 DO64(drr_write
.drr_key
.ddk_prop
);
175 DO64(drr_write
.drr_compressed_size
);
177 case DRR_WRITE_EMBEDDED
:
178 DO64(drr_write_embedded
.drr_object
);
179 DO64(drr_write_embedded
.drr_offset
);
180 DO64(drr_write_embedded
.drr_length
);
181 DO64(drr_write_embedded
.drr_toguid
);
182 DO32(drr_write_embedded
.drr_lsize
);
183 DO32(drr_write_embedded
.drr_psize
);
186 DO64(drr_free
.drr_object
);
187 DO64(drr_free
.drr_offset
);
188 DO64(drr_free
.drr_length
);
189 DO64(drr_free
.drr_toguid
);
192 DO64(drr_spill
.drr_object
);
193 DO64(drr_spill
.drr_length
);
194 DO64(drr_spill
.drr_toguid
);
195 DO64(drr_spill
.drr_compressed_size
);
196 DO32(drr_spill
.drr_type
);
198 case DRR_OBJECT_RANGE
:
199 DO64(drr_object_range
.drr_firstobj
);
200 DO64(drr_object_range
.drr_numslots
);
201 DO64(drr_object_range
.drr_toguid
);
204 DO64(drr_redact
.drr_object
);
205 DO64(drr_redact
.drr_offset
);
206 DO64(drr_redact
.drr_length
);
207 DO64(drr_redact
.drr_toguid
);
210 DO64(drr_end
.drr_toguid
);
211 ZIO_CHECKSUM_BSWAP(&drr
->drr_u
.drr_end
.drr_checksum
);
217 if (drr
->drr_type
!= DRR_BEGIN
) {
218 ZIO_CHECKSUM_BSWAP(&drr
->drr_u
.drr_checksum
.drr_checksum
);
226 redact_snaps_contains(uint64_t *snaps
, uint64_t num_snaps
, uint64_t guid
)
228 for (int i
= 0; i
< num_snaps
; i
++) {
229 if (snaps
[i
] == guid
)
236 * Check that the new stream we're trying to receive is redacted with respect to
237 * a subset of the snapshots that the origin was redacted with respect to. For
238 * the reasons behind this, see the man page on redacted zfs sends and receives.
241 compatible_redact_snaps(uint64_t *origin_snaps
, uint64_t origin_num_snaps
,
242 uint64_t *redact_snaps
, uint64_t num_redact_snaps
)
245 * Short circuit the comparison; if we are redacted with respect to
246 * more snapshots than the origin, we can't be redacted with respect
249 if (num_redact_snaps
> origin_num_snaps
) {
253 for (int i
= 0; i
< num_redact_snaps
; i
++) {
254 if (!redact_snaps_contains(origin_snaps
, origin_num_snaps
,
263 redact_check(dmu_recv_begin_arg_t
*drba
, dsl_dataset_t
*origin
)
265 uint64_t *origin_snaps
;
266 uint64_t origin_num_snaps
;
267 dmu_recv_cookie_t
*drc
= drba
->drba_cookie
;
268 struct drr_begin
*drrb
= drc
->drc_drrb
;
269 int featureflags
= DMU_GET_FEATUREFLAGS(drrb
->drr_versioninfo
);
271 boolean_t ret
= B_TRUE
;
272 uint64_t *redact_snaps
;
273 uint_t numredactsnaps
;
276 * If this is a full send stream, we're safe no matter what.
278 if (drrb
->drr_fromguid
== 0)
281 VERIFY(dsl_dataset_get_uint64_array_feature(origin
,
282 SPA_FEATURE_REDACTED_DATASETS
, &origin_num_snaps
, &origin_snaps
));
284 if (nvlist_lookup_uint64_array(drc
->drc_begin_nvl
,
285 BEGINNV_REDACT_FROM_SNAPS
, &redact_snaps
, &numredactsnaps
) ==
288 * If the send stream was sent from the redaction bookmark or
289 * the redacted version of the dataset, then we're safe. Verify
290 * that this is from the a compatible redaction bookmark or
293 if (!compatible_redact_snaps(origin_snaps
, origin_num_snaps
,
294 redact_snaps
, numredactsnaps
)) {
297 } else if (featureflags
& DMU_BACKUP_FEATURE_REDACTED
) {
299 * If the stream is redacted, it must be redacted with respect
300 * to a subset of what the origin is redacted with respect to.
301 * See case number 2 in the zfs man page section on redacted zfs
304 err
= nvlist_lookup_uint64_array(drc
->drc_begin_nvl
,
305 BEGINNV_REDACT_SNAPS
, &redact_snaps
, &numredactsnaps
);
307 if (err
!= 0 || !compatible_redact_snaps(origin_snaps
,
308 origin_num_snaps
, redact_snaps
, numredactsnaps
)) {
311 } else if (!redact_snaps_contains(origin_snaps
, origin_num_snaps
,
314 * If the stream isn't redacted but the origin is, this must be
315 * one of the snapshots the origin is redacted with respect to.
316 * See case number 1 in the zfs man page section on redacted zfs
328 * If we previously received a stream with --large-block, we don't support
329 * receiving an incremental on top of it without --large-block. This avoids
330 * forcing a read-modify-write or trying to re-aggregate a string of WRITE
334 recv_check_large_blocks(dsl_dataset_t
*ds
, uint64_t featureflags
)
336 if (dsl_dataset_feature_is_active(ds
, SPA_FEATURE_LARGE_BLOCKS
) &&
337 !(featureflags
& DMU_BACKUP_FEATURE_LARGE_BLOCKS
))
338 return (SET_ERROR(ZFS_ERR_STREAM_LARGE_BLOCK_MISMATCH
));
343 recv_begin_check_existing_impl(dmu_recv_begin_arg_t
*drba
, dsl_dataset_t
*ds
,
344 uint64_t fromguid
, uint64_t featureflags
)
349 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
350 boolean_t encrypted
= ds
->ds_dir
->dd_crypto_obj
!= 0;
351 boolean_t raw
= (featureflags
& DMU_BACKUP_FEATURE_RAW
) != 0;
352 boolean_t embed
= (featureflags
& DMU_BACKUP_FEATURE_EMBED_DATA
) != 0;
354 /* Temporary clone name must not exist. */
355 error
= zap_lookup(dp
->dp_meta_objset
,
356 dsl_dir_phys(ds
->ds_dir
)->dd_child_dir_zapobj
, recv_clone_name
,
359 return (error
== 0 ? SET_ERROR(EBUSY
) : error
);
361 /* Resume state must not be set. */
362 if (dsl_dataset_has_resume_receive_state(ds
))
363 return (SET_ERROR(EBUSY
));
365 /* New snapshot name must not exist. */
366 error
= zap_lookup(dp
->dp_meta_objset
,
367 dsl_dataset_phys(ds
)->ds_snapnames_zapobj
,
368 drba
->drba_cookie
->drc_tosnap
, 8, 1, &val
);
370 return (error
== 0 ? SET_ERROR(EEXIST
) : error
);
372 /* Must not have children if receiving a ZVOL. */
373 error
= zap_count(dp
->dp_meta_objset
,
374 dsl_dir_phys(ds
->ds_dir
)->dd_child_dir_zapobj
, &children
);
377 if (drba
->drba_cookie
->drc_drrb
->drr_type
!= DMU_OST_ZFS
&&
379 return (SET_ERROR(ZFS_ERR_WRONG_PARENT
));
382 * Check snapshot limit before receiving. We'll recheck again at the
383 * end, but might as well abort before receiving if we're already over
386 * Note that we do not check the file system limit with
387 * dsl_dir_fscount_check because the temporary %clones don't count
388 * against that limit.
390 error
= dsl_fs_ss_limit_check(ds
->ds_dir
, 1, ZFS_PROP_SNAPSHOT_LIMIT
,
391 NULL
, drba
->drba_cred
, drba
->drba_proc
);
397 uint64_t obj
= dsl_dataset_phys(ds
)->ds_prev_snap_obj
;
399 /* Can't perform a raw receive on top of a non-raw receive */
400 if (!encrypted
&& raw
)
401 return (SET_ERROR(EINVAL
));
403 /* Encryption is incompatible with embedded data */
404 if (encrypted
&& embed
)
405 return (SET_ERROR(EINVAL
));
407 /* Find snapshot in this dir that matches fromguid. */
409 error
= dsl_dataset_hold_obj(dp
, obj
, FTAG
,
412 return (SET_ERROR(ENODEV
));
413 if (snap
->ds_dir
!= ds
->ds_dir
) {
414 dsl_dataset_rele(snap
, FTAG
);
415 return (SET_ERROR(ENODEV
));
417 if (dsl_dataset_phys(snap
)->ds_guid
== fromguid
)
419 obj
= dsl_dataset_phys(snap
)->ds_prev_snap_obj
;
420 dsl_dataset_rele(snap
, FTAG
);
423 return (SET_ERROR(ENODEV
));
425 if (drba
->drba_cookie
->drc_force
) {
426 drba
->drba_cookie
->drc_fromsnapobj
= obj
;
429 * If we are not forcing, there must be no
430 * changes since fromsnap. Raw sends have an
431 * additional constraint that requires that
432 * no "noop" snapshots exist between fromsnap
433 * and tosnap for the IVset checking code to
436 if (dsl_dataset_modified_since_snap(ds
, snap
) ||
438 dsl_dataset_phys(ds
)->ds_prev_snap_obj
!=
440 dsl_dataset_rele(snap
, FTAG
);
441 return (SET_ERROR(ETXTBSY
));
443 drba
->drba_cookie
->drc_fromsnapobj
=
444 ds
->ds_prev
->ds_object
;
447 if (dsl_dataset_feature_is_active(snap
,
448 SPA_FEATURE_REDACTED_DATASETS
) && !redact_check(drba
,
450 dsl_dataset_rele(snap
, FTAG
);
451 return (SET_ERROR(EINVAL
));
454 error
= recv_check_large_blocks(snap
, featureflags
);
456 dsl_dataset_rele(snap
, FTAG
);
460 dsl_dataset_rele(snap
, FTAG
);
462 /* if full, then must be forced */
463 if (!drba
->drba_cookie
->drc_force
)
464 return (SET_ERROR(EEXIST
));
467 * We don't support using zfs recv -F to blow away
468 * encrypted filesystems. This would require the
469 * dsl dir to point to the old encryption key and
470 * the new one at the same time during the receive.
472 if ((!encrypted
&& raw
) || encrypted
)
473 return (SET_ERROR(EINVAL
));
476 * Perform the same encryption checks we would if
477 * we were creating a new dataset from scratch.
480 boolean_t will_encrypt
;
482 error
= dmu_objset_create_crypt_check(
483 ds
->ds_dir
->dd_parent
, drba
->drba_dcp
,
488 if (will_encrypt
&& embed
)
489 return (SET_ERROR(EINVAL
));
497 * Check that any feature flags used in the data stream we're receiving are
498 * supported by the pool we are receiving into.
500 * Note that some of the features we explicitly check here have additional
501 * (implicit) features they depend on, but those dependencies are enforced
502 * through the zfeature_register() calls declaring the features that we
506 recv_begin_check_feature_flags_impl(uint64_t featureflags
, spa_t
*spa
)
509 * Check if there are any unsupported feature flags.
511 if (!DMU_STREAM_SUPPORTED(featureflags
)) {
512 return (SET_ERROR(ZFS_ERR_UNKNOWN_SEND_STREAM_FEATURE
));
515 /* Verify pool version supports SA if SA_SPILL feature set */
516 if ((featureflags
& DMU_BACKUP_FEATURE_SA_SPILL
) &&
517 spa_version(spa
) < SPA_VERSION_SA
)
518 return (SET_ERROR(ENOTSUP
));
521 * LZ4 compressed, ZSTD compressed, embedded, mooched, large blocks,
522 * and large_dnodes in the stream can only be used if those pool
523 * features are enabled because we don't attempt to decompress /
524 * un-embed / un-mooch / split up the blocks / dnodes during the
527 if ((featureflags
& DMU_BACKUP_FEATURE_LZ4
) &&
528 !spa_feature_is_enabled(spa
, SPA_FEATURE_LZ4_COMPRESS
))
529 return (SET_ERROR(ENOTSUP
));
530 if ((featureflags
& DMU_BACKUP_FEATURE_ZSTD
) &&
531 !spa_feature_is_enabled(spa
, SPA_FEATURE_ZSTD_COMPRESS
))
532 return (SET_ERROR(ENOTSUP
));
533 if ((featureflags
& DMU_BACKUP_FEATURE_EMBED_DATA
) &&
534 !spa_feature_is_enabled(spa
, SPA_FEATURE_EMBEDDED_DATA
))
535 return (SET_ERROR(ENOTSUP
));
536 if ((featureflags
& DMU_BACKUP_FEATURE_LARGE_BLOCKS
) &&
537 !spa_feature_is_enabled(spa
, SPA_FEATURE_LARGE_BLOCKS
))
538 return (SET_ERROR(ENOTSUP
));
539 if ((featureflags
& DMU_BACKUP_FEATURE_LARGE_DNODE
) &&
540 !spa_feature_is_enabled(spa
, SPA_FEATURE_LARGE_DNODE
))
541 return (SET_ERROR(ENOTSUP
));
544 * Receiving redacted streams requires that redacted datasets are
547 if ((featureflags
& DMU_BACKUP_FEATURE_REDACTED
) &&
548 !spa_feature_is_enabled(spa
, SPA_FEATURE_REDACTED_DATASETS
))
549 return (SET_ERROR(ENOTSUP
));
555 dmu_recv_begin_check(void *arg
, dmu_tx_t
*tx
)
557 dmu_recv_begin_arg_t
*drba
= arg
;
558 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
559 struct drr_begin
*drrb
= drba
->drba_cookie
->drc_drrb
;
560 uint64_t fromguid
= drrb
->drr_fromguid
;
561 int flags
= drrb
->drr_flags
;
562 ds_hold_flags_t dsflags
= DS_HOLD_FLAG_NONE
;
564 uint64_t featureflags
= drba
->drba_cookie
->drc_featureflags
;
566 const char *tofs
= drba
->drba_cookie
->drc_tofs
;
568 /* already checked */
569 ASSERT3U(drrb
->drr_magic
, ==, DMU_BACKUP_MAGIC
);
570 ASSERT(!(featureflags
& DMU_BACKUP_FEATURE_RESUMING
));
572 if (DMU_GET_STREAM_HDRTYPE(drrb
->drr_versioninfo
) ==
573 DMU_COMPOUNDSTREAM
||
574 drrb
->drr_type
>= DMU_OST_NUMTYPES
||
575 ((flags
& DRR_FLAG_CLONE
) && drba
->drba_origin
== NULL
))
576 return (SET_ERROR(EINVAL
));
578 error
= recv_begin_check_feature_flags_impl(featureflags
, dp
->dp_spa
);
582 /* Resumable receives require extensible datasets */
583 if (drba
->drba_cookie
->drc_resumable
&&
584 !spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_EXTENSIBLE_DATASET
))
585 return (SET_ERROR(ENOTSUP
));
587 if (featureflags
& DMU_BACKUP_FEATURE_RAW
) {
588 /* raw receives require the encryption feature */
589 if (!spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_ENCRYPTION
))
590 return (SET_ERROR(ENOTSUP
));
592 /* embedded data is incompatible with encryption and raw recv */
593 if (featureflags
& DMU_BACKUP_FEATURE_EMBED_DATA
)
594 return (SET_ERROR(EINVAL
));
596 /* raw receives require spill block allocation flag */
597 if (!(flags
& DRR_FLAG_SPILL_BLOCK
))
598 return (SET_ERROR(ZFS_ERR_SPILL_BLOCK_FLAG_MISSING
));
600 dsflags
|= DS_HOLD_FLAG_DECRYPT
;
603 error
= dsl_dataset_hold_flags(dp
, tofs
, dsflags
, FTAG
, &ds
);
605 /* target fs already exists; recv into temp clone */
607 /* Can't recv a clone into an existing fs */
608 if (flags
& DRR_FLAG_CLONE
|| drba
->drba_origin
) {
609 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
610 return (SET_ERROR(EINVAL
));
613 error
= recv_begin_check_existing_impl(drba
, ds
, fromguid
,
615 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
616 } else if (error
== ENOENT
) {
617 /* target fs does not exist; must be a full backup or clone */
618 char buf
[ZFS_MAX_DATASET_NAME_LEN
];
622 * If it's a non-clone incremental, we are missing the
623 * target fs, so fail the recv.
625 if (fromguid
!= 0 && !((flags
& DRR_FLAG_CLONE
) ||
627 return (SET_ERROR(ENOENT
));
630 * If we're receiving a full send as a clone, and it doesn't
631 * contain all the necessary free records and freeobject
632 * records, reject it.
634 if (fromguid
== 0 && drba
->drba_origin
!= NULL
&&
635 !(flags
& DRR_FLAG_FREERECORDS
))
636 return (SET_ERROR(EINVAL
));
638 /* Open the parent of tofs */
639 ASSERT3U(strlen(tofs
), <, sizeof (buf
));
640 (void) strlcpy(buf
, tofs
, strrchr(tofs
, '/') - tofs
+ 1);
641 error
= dsl_dataset_hold(dp
, buf
, FTAG
, &ds
);
645 if ((featureflags
& DMU_BACKUP_FEATURE_RAW
) == 0 &&
646 drba
->drba_origin
== NULL
) {
647 boolean_t will_encrypt
;
650 * Check that we aren't breaking any encryption rules
651 * and that we have all the parameters we need to
652 * create an encrypted dataset if necessary. If we are
653 * making an encrypted dataset the stream can't have
656 error
= dmu_objset_create_crypt_check(ds
->ds_dir
,
657 drba
->drba_dcp
, &will_encrypt
);
659 dsl_dataset_rele(ds
, FTAG
);
664 (featureflags
& DMU_BACKUP_FEATURE_EMBED_DATA
)) {
665 dsl_dataset_rele(ds
, FTAG
);
666 return (SET_ERROR(EINVAL
));
671 * Check filesystem and snapshot limits before receiving. We'll
672 * recheck snapshot limits again at the end (we create the
673 * filesystems and increment those counts during begin_sync).
675 error
= dsl_fs_ss_limit_check(ds
->ds_dir
, 1,
676 ZFS_PROP_FILESYSTEM_LIMIT
, NULL
,
677 drba
->drba_cred
, drba
->drba_proc
);
679 dsl_dataset_rele(ds
, FTAG
);
683 error
= dsl_fs_ss_limit_check(ds
->ds_dir
, 1,
684 ZFS_PROP_SNAPSHOT_LIMIT
, NULL
,
685 drba
->drba_cred
, drba
->drba_proc
);
687 dsl_dataset_rele(ds
, FTAG
);
691 /* can't recv below anything but filesystems (eg. no ZVOLs) */
692 error
= dmu_objset_from_ds(ds
, &os
);
694 dsl_dataset_rele(ds
, FTAG
);
697 if (dmu_objset_type(os
) != DMU_OST_ZFS
) {
698 dsl_dataset_rele(ds
, FTAG
);
699 return (SET_ERROR(ZFS_ERR_WRONG_PARENT
));
702 if (drba
->drba_origin
!= NULL
) {
703 dsl_dataset_t
*origin
;
704 error
= dsl_dataset_hold_flags(dp
, drba
->drba_origin
,
705 dsflags
, FTAG
, &origin
);
707 dsl_dataset_rele(ds
, FTAG
);
710 if (!origin
->ds_is_snapshot
) {
711 dsl_dataset_rele_flags(origin
, dsflags
, FTAG
);
712 dsl_dataset_rele(ds
, FTAG
);
713 return (SET_ERROR(EINVAL
));
715 if (dsl_dataset_phys(origin
)->ds_guid
!= fromguid
&&
717 dsl_dataset_rele_flags(origin
, dsflags
, FTAG
);
718 dsl_dataset_rele(ds
, FTAG
);
719 return (SET_ERROR(ENODEV
));
722 if (origin
->ds_dir
->dd_crypto_obj
!= 0 &&
723 (featureflags
& DMU_BACKUP_FEATURE_EMBED_DATA
)) {
724 dsl_dataset_rele_flags(origin
, dsflags
, FTAG
);
725 dsl_dataset_rele(ds
, FTAG
);
726 return (SET_ERROR(EINVAL
));
730 * If the origin is redacted we need to verify that this
731 * send stream can safely be received on top of the
734 if (dsl_dataset_feature_is_active(origin
,
735 SPA_FEATURE_REDACTED_DATASETS
)) {
736 if (!redact_check(drba
, origin
)) {
737 dsl_dataset_rele_flags(origin
, dsflags
,
739 dsl_dataset_rele_flags(ds
, dsflags
,
741 return (SET_ERROR(EINVAL
));
745 error
= recv_check_large_blocks(ds
, featureflags
);
747 dsl_dataset_rele_flags(origin
, dsflags
, FTAG
);
748 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
752 dsl_dataset_rele_flags(origin
, dsflags
, FTAG
);
755 dsl_dataset_rele(ds
, FTAG
);
762 dmu_recv_begin_sync(void *arg
, dmu_tx_t
*tx
)
764 dmu_recv_begin_arg_t
*drba
= arg
;
765 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
766 objset_t
*mos
= dp
->dp_meta_objset
;
767 dmu_recv_cookie_t
*drc
= drba
->drba_cookie
;
768 struct drr_begin
*drrb
= drc
->drc_drrb
;
769 const char *tofs
= drc
->drc_tofs
;
770 uint64_t featureflags
= drc
->drc_featureflags
;
771 dsl_dataset_t
*ds
, *newds
;
774 ds_hold_flags_t dsflags
= DS_HOLD_FLAG_NONE
;
776 uint64_t crflags
= 0;
777 dsl_crypto_params_t dummy_dcp
= { 0 };
778 dsl_crypto_params_t
*dcp
= drba
->drba_dcp
;
780 if (drrb
->drr_flags
& DRR_FLAG_CI_DATA
)
781 crflags
|= DS_FLAG_CI_DATASET
;
783 if ((featureflags
& DMU_BACKUP_FEATURE_RAW
) == 0)
784 dsflags
|= DS_HOLD_FLAG_DECRYPT
;
787 * Raw, non-incremental recvs always use a dummy dcp with
788 * the raw cmd set. Raw incremental recvs do not use a dcp
789 * since the encryption parameters are already set in stone.
791 if (dcp
== NULL
&& drrb
->drr_fromguid
== 0 &&
792 drba
->drba_origin
== NULL
) {
793 ASSERT3P(dcp
, ==, NULL
);
796 if (featureflags
& DMU_BACKUP_FEATURE_RAW
)
797 dcp
->cp_cmd
= DCP_CMD_RAW_RECV
;
800 error
= dsl_dataset_hold_flags(dp
, tofs
, dsflags
, FTAG
, &ds
);
802 /* create temporary clone */
803 dsl_dataset_t
*snap
= NULL
;
805 if (drba
->drba_cookie
->drc_fromsnapobj
!= 0) {
806 VERIFY0(dsl_dataset_hold_obj(dp
,
807 drba
->drba_cookie
->drc_fromsnapobj
, FTAG
, &snap
));
808 ASSERT3P(dcp
, ==, NULL
);
810 dsobj
= dsl_dataset_create_sync(ds
->ds_dir
, recv_clone_name
,
811 snap
, crflags
, drba
->drba_cred
, dcp
, tx
);
812 if (drba
->drba_cookie
->drc_fromsnapobj
!= 0)
813 dsl_dataset_rele(snap
, FTAG
);
814 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
818 dsl_dataset_t
*origin
= NULL
;
820 VERIFY0(dsl_dir_hold(dp
, tofs
, FTAG
, &dd
, &tail
));
822 if (drba
->drba_origin
!= NULL
) {
823 VERIFY0(dsl_dataset_hold(dp
, drba
->drba_origin
,
825 ASSERT3P(dcp
, ==, NULL
);
828 /* Create new dataset. */
829 dsobj
= dsl_dataset_create_sync(dd
, strrchr(tofs
, '/') + 1,
830 origin
, crflags
, drba
->drba_cred
, dcp
, tx
);
832 dsl_dataset_rele(origin
, FTAG
);
833 dsl_dir_rele(dd
, FTAG
);
834 drc
->drc_newfs
= B_TRUE
;
836 VERIFY0(dsl_dataset_own_obj_force(dp
, dsobj
, dsflags
, dmu_recv_tag
,
838 if (dsl_dataset_feature_is_active(newds
,
839 SPA_FEATURE_REDACTED_DATASETS
)) {
841 * If the origin dataset is redacted, the child will be redacted
842 * when we create it. We clear the new dataset's
843 * redaction info; if it should be redacted, we'll fill
844 * in its information later.
846 dsl_dataset_deactivate_feature(newds
,
847 SPA_FEATURE_REDACTED_DATASETS
, tx
);
849 VERIFY0(dmu_objset_from_ds(newds
, &os
));
851 if (drc
->drc_resumable
) {
852 dsl_dataset_zapify(newds
, tx
);
853 if (drrb
->drr_fromguid
!= 0) {
854 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_FROMGUID
,
855 8, 1, &drrb
->drr_fromguid
, tx
));
857 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_TOGUID
,
858 8, 1, &drrb
->drr_toguid
, tx
));
859 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_TONAME
,
860 1, strlen(drrb
->drr_toname
) + 1, drrb
->drr_toname
, tx
));
863 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_OBJECT
,
865 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_OFFSET
,
867 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_BYTES
,
869 if (featureflags
& DMU_BACKUP_FEATURE_LARGE_BLOCKS
) {
870 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_LARGEBLOCK
,
873 if (featureflags
& DMU_BACKUP_FEATURE_EMBED_DATA
) {
874 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_EMBEDOK
,
877 if (featureflags
& DMU_BACKUP_FEATURE_COMPRESSED
) {
878 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_COMPRESSOK
,
881 if (featureflags
& DMU_BACKUP_FEATURE_RAW
) {
882 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_RAWOK
,
886 uint64_t *redact_snaps
;
887 uint_t numredactsnaps
;
888 if (nvlist_lookup_uint64_array(drc
->drc_begin_nvl
,
889 BEGINNV_REDACT_FROM_SNAPS
, &redact_snaps
,
890 &numredactsnaps
) == 0) {
891 VERIFY0(zap_add(mos
, dsobj
,
892 DS_FIELD_RESUME_REDACT_BOOKMARK_SNAPS
,
893 sizeof (*redact_snaps
), numredactsnaps
,
899 * Usually the os->os_encrypted value is tied to the presence of a
900 * DSL Crypto Key object in the dd. However, that will not be received
901 * until dmu_recv_stream(), so we set the value manually for now.
903 if (featureflags
& DMU_BACKUP_FEATURE_RAW
) {
904 os
->os_encrypted
= B_TRUE
;
905 drba
->drba_cookie
->drc_raw
= B_TRUE
;
908 if (featureflags
& DMU_BACKUP_FEATURE_REDACTED
) {
909 uint64_t *redact_snaps
;
910 uint_t numredactsnaps
;
911 VERIFY0(nvlist_lookup_uint64_array(drc
->drc_begin_nvl
,
912 BEGINNV_REDACT_SNAPS
, &redact_snaps
, &numredactsnaps
));
913 dsl_dataset_activate_redaction(newds
, redact_snaps
,
917 dmu_buf_will_dirty(newds
->ds_dbuf
, tx
);
918 dsl_dataset_phys(newds
)->ds_flags
|= DS_FLAG_INCONSISTENT
;
921 * If we actually created a non-clone, we need to create the objset
922 * in our new dataset. If this is a raw send we postpone this until
923 * dmu_recv_stream() so that we can allocate the metadnode with the
924 * properties from the DRR_BEGIN payload.
926 rrw_enter(&newds
->ds_bp_rwlock
, RW_READER
, FTAG
);
927 if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds
)) &&
928 (featureflags
& DMU_BACKUP_FEATURE_RAW
) == 0) {
929 (void) dmu_objset_create_impl(dp
->dp_spa
,
930 newds
, dsl_dataset_get_blkptr(newds
), drrb
->drr_type
, tx
);
932 rrw_exit(&newds
->ds_bp_rwlock
, FTAG
);
934 drba
->drba_cookie
->drc_ds
= newds
;
935 drba
->drba_cookie
->drc_os
= os
;
937 spa_history_log_internal_ds(newds
, "receive", tx
, " ");
941 dmu_recv_resume_begin_check(void *arg
, dmu_tx_t
*tx
)
943 dmu_recv_begin_arg_t
*drba
= arg
;
944 dmu_recv_cookie_t
*drc
= drba
->drba_cookie
;
945 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
946 struct drr_begin
*drrb
= drc
->drc_drrb
;
948 ds_hold_flags_t dsflags
= DS_HOLD_FLAG_NONE
;
950 const char *tofs
= drc
->drc_tofs
;
952 /* already checked */
953 ASSERT3U(drrb
->drr_magic
, ==, DMU_BACKUP_MAGIC
);
954 ASSERT(drc
->drc_featureflags
& DMU_BACKUP_FEATURE_RESUMING
);
956 if (DMU_GET_STREAM_HDRTYPE(drrb
->drr_versioninfo
) ==
957 DMU_COMPOUNDSTREAM
||
958 drrb
->drr_type
>= DMU_OST_NUMTYPES
)
959 return (SET_ERROR(EINVAL
));
962 * This is mostly a sanity check since we should have already done these
963 * checks during a previous attempt to receive the data.
965 error
= recv_begin_check_feature_flags_impl(drc
->drc_featureflags
,
970 /* 6 extra bytes for /%recv */
971 char recvname
[ZFS_MAX_DATASET_NAME_LEN
+ 6];
973 (void) snprintf(recvname
, sizeof (recvname
), "%s/%s",
974 tofs
, recv_clone_name
);
976 if (drc
->drc_featureflags
& DMU_BACKUP_FEATURE_RAW
) {
977 /* raw receives require spill block allocation flag */
978 if (!(drrb
->drr_flags
& DRR_FLAG_SPILL_BLOCK
))
979 return (SET_ERROR(ZFS_ERR_SPILL_BLOCK_FLAG_MISSING
));
981 dsflags
|= DS_HOLD_FLAG_DECRYPT
;
984 if (dsl_dataset_hold_flags(dp
, recvname
, dsflags
, FTAG
, &ds
) != 0) {
985 /* %recv does not exist; continue in tofs */
986 error
= dsl_dataset_hold_flags(dp
, tofs
, dsflags
, FTAG
, &ds
);
991 /* check that ds is marked inconsistent */
992 if (!DS_IS_INCONSISTENT(ds
)) {
993 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
994 return (SET_ERROR(EINVAL
));
997 /* check that there is resuming data, and that the toguid matches */
998 if (!dsl_dataset_is_zapified(ds
)) {
999 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
1000 return (SET_ERROR(EINVAL
));
1003 error
= zap_lookup(dp
->dp_meta_objset
, ds
->ds_object
,
1004 DS_FIELD_RESUME_TOGUID
, sizeof (val
), 1, &val
);
1005 if (error
!= 0 || drrb
->drr_toguid
!= val
) {
1006 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
1007 return (SET_ERROR(EINVAL
));
1011 * Check if the receive is still running. If so, it will be owned.
1012 * Note that nothing else can own the dataset (e.g. after the receive
1013 * fails) because it will be marked inconsistent.
1015 if (dsl_dataset_has_owner(ds
)) {
1016 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
1017 return (SET_ERROR(EBUSY
));
1020 /* There should not be any snapshots of this fs yet. */
1021 if (ds
->ds_prev
!= NULL
&& ds
->ds_prev
->ds_dir
== ds
->ds_dir
) {
1022 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
1023 return (SET_ERROR(EINVAL
));
1027 * Note: resume point will be checked when we process the first WRITE
1031 /* check that the origin matches */
1033 (void) zap_lookup(dp
->dp_meta_objset
, ds
->ds_object
,
1034 DS_FIELD_RESUME_FROMGUID
, sizeof (val
), 1, &val
);
1035 if (drrb
->drr_fromguid
!= val
) {
1036 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
1037 return (SET_ERROR(EINVAL
));
1040 if (ds
->ds_prev
!= NULL
&& drrb
->drr_fromguid
!= 0)
1041 drc
->drc_fromsnapobj
= ds
->ds_prev
->ds_object
;
1044 * If we're resuming, and the send is redacted, then the original send
1045 * must have been redacted, and must have been redacted with respect to
1046 * the same snapshots.
1048 if (drc
->drc_featureflags
& DMU_BACKUP_FEATURE_REDACTED
) {
1049 uint64_t num_ds_redact_snaps
;
1050 uint64_t *ds_redact_snaps
;
1052 uint_t num_stream_redact_snaps
;
1053 uint64_t *stream_redact_snaps
;
1055 if (nvlist_lookup_uint64_array(drc
->drc_begin_nvl
,
1056 BEGINNV_REDACT_SNAPS
, &stream_redact_snaps
,
1057 &num_stream_redact_snaps
) != 0) {
1058 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
1059 return (SET_ERROR(EINVAL
));
1062 if (!dsl_dataset_get_uint64_array_feature(ds
,
1063 SPA_FEATURE_REDACTED_DATASETS
, &num_ds_redact_snaps
,
1064 &ds_redact_snaps
)) {
1065 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
1066 return (SET_ERROR(EINVAL
));
1069 for (int i
= 0; i
< num_ds_redact_snaps
; i
++) {
1070 if (!redact_snaps_contains(ds_redact_snaps
,
1071 num_ds_redact_snaps
, stream_redact_snaps
[i
])) {
1072 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
1073 return (SET_ERROR(EINVAL
));
1078 error
= recv_check_large_blocks(ds
, drc
->drc_featureflags
);
1080 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
1084 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
1089 dmu_recv_resume_begin_sync(void *arg
, dmu_tx_t
*tx
)
1091 dmu_recv_begin_arg_t
*drba
= arg
;
1092 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1093 const char *tofs
= drba
->drba_cookie
->drc_tofs
;
1094 uint64_t featureflags
= drba
->drba_cookie
->drc_featureflags
;
1096 ds_hold_flags_t dsflags
= DS_HOLD_FLAG_NONE
;
1097 /* 6 extra bytes for /%recv */
1098 char recvname
[ZFS_MAX_DATASET_NAME_LEN
+ 6];
1100 (void) snprintf(recvname
, sizeof (recvname
), "%s/%s", tofs
,
1103 if (featureflags
& DMU_BACKUP_FEATURE_RAW
) {
1104 drba
->drba_cookie
->drc_raw
= B_TRUE
;
1106 dsflags
|= DS_HOLD_FLAG_DECRYPT
;
1109 if (dsl_dataset_own_force(dp
, recvname
, dsflags
, dmu_recv_tag
, &ds
)
1111 /* %recv does not exist; continue in tofs */
1112 VERIFY0(dsl_dataset_own_force(dp
, tofs
, dsflags
, dmu_recv_tag
,
1114 drba
->drba_cookie
->drc_newfs
= B_TRUE
;
1117 ASSERT(DS_IS_INCONSISTENT(ds
));
1118 rrw_enter(&ds
->ds_bp_rwlock
, RW_READER
, FTAG
);
1119 ASSERT(!BP_IS_HOLE(dsl_dataset_get_blkptr(ds
)) ||
1120 drba
->drba_cookie
->drc_raw
);
1121 rrw_exit(&ds
->ds_bp_rwlock
, FTAG
);
1123 drba
->drba_cookie
->drc_ds
= ds
;
1124 VERIFY0(dmu_objset_from_ds(ds
, &drba
->drba_cookie
->drc_os
));
1125 drba
->drba_cookie
->drc_should_save
= B_TRUE
;
1127 spa_history_log_internal_ds(ds
, "resume receive", tx
, " ");
1131 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
1132 * succeeds; otherwise we will leak the holds on the datasets.
1135 dmu_recv_begin(char *tofs
, char *tosnap
, dmu_replay_record_t
*drr_begin
,
1136 boolean_t force
, boolean_t resumable
, nvlist_t
*localprops
,
1137 nvlist_t
*hidden_args
, char *origin
, dmu_recv_cookie_t
*drc
,
1138 zfs_file_t
*fp
, offset_t
*voffp
)
1140 dmu_recv_begin_arg_t drba
= { 0 };
1143 bzero(drc
, sizeof (dmu_recv_cookie_t
));
1144 drc
->drc_drr_begin
= drr_begin
;
1145 drc
->drc_drrb
= &drr_begin
->drr_u
.drr_begin
;
1146 drc
->drc_tosnap
= tosnap
;
1147 drc
->drc_tofs
= tofs
;
1148 drc
->drc_force
= force
;
1149 drc
->drc_resumable
= resumable
;
1150 drc
->drc_cred
= CRED();
1151 drc
->drc_proc
= curproc
;
1152 drc
->drc_clone
= (origin
!= NULL
);
1154 if (drc
->drc_drrb
->drr_magic
== BSWAP_64(DMU_BACKUP_MAGIC
)) {
1155 drc
->drc_byteswap
= B_TRUE
;
1156 (void) fletcher_4_incremental_byteswap(drr_begin
,
1157 sizeof (dmu_replay_record_t
), &drc
->drc_cksum
);
1158 byteswap_record(drr_begin
);
1159 } else if (drc
->drc_drrb
->drr_magic
== DMU_BACKUP_MAGIC
) {
1160 (void) fletcher_4_incremental_native(drr_begin
,
1161 sizeof (dmu_replay_record_t
), &drc
->drc_cksum
);
1163 return (SET_ERROR(EINVAL
));
1167 drc
->drc_voff
= *voffp
;
1168 drc
->drc_featureflags
=
1169 DMU_GET_FEATUREFLAGS(drc
->drc_drrb
->drr_versioninfo
);
1171 uint32_t payloadlen
= drc
->drc_drr_begin
->drr_payloadlen
;
1172 void *payload
= NULL
;
1173 if (payloadlen
!= 0)
1174 payload
= kmem_alloc(payloadlen
, KM_SLEEP
);
1176 err
= receive_read_payload_and_next_header(drc
, payloadlen
,
1179 kmem_free(payload
, payloadlen
);
1182 if (payloadlen
!= 0) {
1183 err
= nvlist_unpack(payload
, payloadlen
, &drc
->drc_begin_nvl
,
1185 kmem_free(payload
, payloadlen
);
1187 kmem_free(drc
->drc_next_rrd
,
1188 sizeof (*drc
->drc_next_rrd
));
1193 if (drc
->drc_drrb
->drr_flags
& DRR_FLAG_SPILL_BLOCK
)
1194 drc
->drc_spill
= B_TRUE
;
1196 drba
.drba_origin
= origin
;
1197 drba
.drba_cookie
= drc
;
1198 drba
.drba_cred
= CRED();
1199 drba
.drba_proc
= curproc
;
1201 if (drc
->drc_featureflags
& DMU_BACKUP_FEATURE_RESUMING
) {
1202 err
= dsl_sync_task(tofs
,
1203 dmu_recv_resume_begin_check
, dmu_recv_resume_begin_sync
,
1204 &drba
, 5, ZFS_SPACE_CHECK_NORMAL
);
1208 * For non-raw, non-incremental, non-resuming receives the
1209 * user can specify encryption parameters on the command line
1210 * with "zfs recv -o". For these receives we create a dcp and
1211 * pass it to the sync task. Creating the dcp will implicitly
1212 * remove the encryption params from the localprops nvlist,
1213 * which avoids errors when trying to set these normally
1214 * read-only properties. Any other kind of receive that
1215 * attempts to set these properties will fail as a result.
1217 if ((DMU_GET_FEATUREFLAGS(drc
->drc_drrb
->drr_versioninfo
) &
1218 DMU_BACKUP_FEATURE_RAW
) == 0 &&
1219 origin
== NULL
&& drc
->drc_drrb
->drr_fromguid
== 0) {
1220 err
= dsl_crypto_params_create_nvlist(DCP_CMD_NONE
,
1221 localprops
, hidden_args
, &drba
.drba_dcp
);
1225 err
= dsl_sync_task(tofs
,
1226 dmu_recv_begin_check
, dmu_recv_begin_sync
,
1227 &drba
, 5, ZFS_SPACE_CHECK_NORMAL
);
1228 dsl_crypto_params_free(drba
.drba_dcp
, !!err
);
1233 kmem_free(drc
->drc_next_rrd
, sizeof (*drc
->drc_next_rrd
));
1234 nvlist_free(drc
->drc_begin_nvl
);
1240 receive_read(dmu_recv_cookie_t
*drc
, int len
, void *buf
)
1245 * The code doesn't rely on this (lengths being multiples of 8). See
1246 * comment in dump_bytes.
1248 ASSERT(len
% 8 == 0 ||
1249 (drc
->drc_featureflags
& DMU_BACKUP_FEATURE_RAW
) != 0);
1251 while (done
< len
) {
1253 zfs_file_t
*fp
= drc
->drc_fp
;
1254 int err
= zfs_file_read(fp
, (char *)buf
+ done
,
1255 len
- done
, &resid
);
1256 if (resid
== len
- done
) {
1258 * Note: ECKSUM or ZFS_ERR_STREAM_TRUNCATED indicates
1259 * that the receive was interrupted and can
1260 * potentially be resumed.
1262 err
= SET_ERROR(ZFS_ERR_STREAM_TRUNCATED
);
1264 drc
->drc_voff
+= len
- done
- resid
;
1270 drc
->drc_bytes_read
+= len
;
1272 ASSERT3U(done
, ==, len
);
1276 static inline uint8_t
1277 deduce_nblkptr(dmu_object_type_t bonus_type
, uint64_t bonus_size
)
1279 if (bonus_type
== DMU_OT_SA
) {
1283 ((DN_OLD_MAX_BONUSLEN
-
1284 MIN(DN_OLD_MAX_BONUSLEN
, bonus_size
)) >> SPA_BLKPTRSHIFT
));
1289 save_resume_state(struct receive_writer_arg
*rwa
,
1290 uint64_t object
, uint64_t offset
, dmu_tx_t
*tx
)
1292 int txgoff
= dmu_tx_get_txg(tx
) & TXG_MASK
;
1294 if (!rwa
->resumable
)
1298 * We use ds_resume_bytes[] != 0 to indicate that we need to
1299 * update this on disk, so it must not be 0.
1301 ASSERT(rwa
->bytes_read
!= 0);
1304 * We only resume from write records, which have a valid
1305 * (non-meta-dnode) object number.
1307 ASSERT(object
!= 0);
1310 * For resuming to work correctly, we must receive records in order,
1311 * sorted by object,offset. This is checked by the callers, but
1312 * assert it here for good measure.
1314 ASSERT3U(object
, >=, rwa
->os
->os_dsl_dataset
->ds_resume_object
[txgoff
]);
1315 ASSERT(object
!= rwa
->os
->os_dsl_dataset
->ds_resume_object
[txgoff
] ||
1316 offset
>= rwa
->os
->os_dsl_dataset
->ds_resume_offset
[txgoff
]);
1317 ASSERT3U(rwa
->bytes_read
, >=,
1318 rwa
->os
->os_dsl_dataset
->ds_resume_bytes
[txgoff
]);
1320 rwa
->os
->os_dsl_dataset
->ds_resume_object
[txgoff
] = object
;
1321 rwa
->os
->os_dsl_dataset
->ds_resume_offset
[txgoff
] = offset
;
1322 rwa
->os
->os_dsl_dataset
->ds_resume_bytes
[txgoff
] = rwa
->bytes_read
;
1326 receive_object_is_same_generation(objset_t
*os
, uint64_t object
,
1327 dmu_object_type_t old_bonus_type
, dmu_object_type_t new_bonus_type
,
1328 const void *new_bonus
, boolean_t
*samegenp
)
1330 zfs_file_info_t zoi
;
1333 dmu_buf_t
*old_bonus_dbuf
;
1334 err
= dmu_bonus_hold(os
, object
, FTAG
, &old_bonus_dbuf
);
1337 err
= dmu_get_file_info(os
, old_bonus_type
, old_bonus_dbuf
->db_data
,
1339 dmu_buf_rele(old_bonus_dbuf
, FTAG
);
1342 uint64_t old_gen
= zoi
.zfi_generation
;
1344 err
= dmu_get_file_info(os
, new_bonus_type
, new_bonus
, &zoi
);
1347 uint64_t new_gen
= zoi
.zfi_generation
;
1349 *samegenp
= (old_gen
== new_gen
);
1354 receive_handle_existing_object(const struct receive_writer_arg
*rwa
,
1355 const struct drr_object
*drro
, const dmu_object_info_t
*doi
,
1356 const void *bonus_data
,
1357 uint64_t *object_to_hold
, uint32_t *new_blksz
)
1359 uint32_t indblksz
= drro
->drr_indblkshift
?
1360 1ULL << drro
->drr_indblkshift
: 0;
1361 int nblkptr
= deduce_nblkptr(drro
->drr_bonustype
,
1362 drro
->drr_bonuslen
);
1363 uint8_t dn_slots
= drro
->drr_dn_slots
!= 0 ?
1364 drro
->drr_dn_slots
: DNODE_MIN_SLOTS
;
1365 boolean_t do_free_range
= B_FALSE
;
1368 *object_to_hold
= drro
->drr_object
;
1370 /* nblkptr should be bounded by the bonus size and type */
1371 if (rwa
->raw
&& nblkptr
!= drro
->drr_nblkptr
)
1372 return (SET_ERROR(EINVAL
));
1375 * After the previous send stream, the sending system may
1376 * have freed this object, and then happened to re-allocate
1377 * this object number in a later txg. In this case, we are
1378 * receiving a different logical file, and the block size may
1379 * appear to be different. i.e. we may have a different
1380 * block size for this object than what the send stream says.
1381 * In this case we need to remove the object's contents,
1382 * so that its structure can be changed and then its contents
1383 * entirely replaced by subsequent WRITE records.
1385 * If this is a -L (--large-block) incremental stream, and
1386 * the previous stream was not -L, the block size may appear
1387 * to increase. i.e. we may have a smaller block size for
1388 * this object than what the send stream says. In this case
1389 * we need to keep the object's contents and block size
1390 * intact, so that we don't lose parts of the object's
1391 * contents that are not changed by this incremental send
1394 * We can distinguish between the two above cases by using
1395 * the ZPL's generation number (see
1396 * receive_object_is_same_generation()). However, we only
1397 * want to rely on the generation number when absolutely
1398 * necessary, because with raw receives, the generation is
1399 * encrypted. We also want to minimize dependence on the
1400 * ZPL, so that other types of datasets can also be received
1401 * (e.g. ZVOLs, although note that ZVOLS currently do not
1402 * reallocate their objects or change their structure).
1403 * Therefore, we check a number of different cases where we
1404 * know it is safe to discard the object's contents, before
1405 * using the ZPL's generation number to make the above
1408 if (drro
->drr_blksz
!= doi
->doi_data_block_size
) {
1411 * RAW streams always have large blocks, so
1412 * we are sure that the data is not needed
1413 * due to changing --large-block to be on.
1414 * Which is fortunate since the bonus buffer
1415 * (which contains the ZPL generation) is
1416 * encrypted, and the key might not be
1419 do_free_range
= B_TRUE
;
1420 } else if (rwa
->full
) {
1422 * This is a full send stream, so it always
1423 * replaces what we have. Even if the
1424 * generation numbers happen to match, this
1425 * can not actually be the same logical file.
1426 * This is relevant when receiving a full
1429 do_free_range
= B_TRUE
;
1430 } else if (drro
->drr_type
!=
1431 DMU_OT_PLAIN_FILE_CONTENTS
||
1432 doi
->doi_type
!= DMU_OT_PLAIN_FILE_CONTENTS
) {
1434 * PLAIN_FILE_CONTENTS are the only type of
1435 * objects that have ever been stored with
1436 * large blocks, so we don't need the special
1437 * logic below. ZAP blocks can shrink (when
1438 * there's only one block), so we don't want
1439 * to hit the error below about block size
1442 do_free_range
= B_TRUE
;
1443 } else if (doi
->doi_max_offset
<=
1444 doi
->doi_data_block_size
) {
1446 * There is only one block. We can free it,
1447 * because its contents will be replaced by a
1448 * WRITE record. This can not be the no-L ->
1449 * -L case, because the no-L case would have
1450 * resulted in multiple blocks. If we
1451 * supported -L -> no-L, it would not be safe
1452 * to free the file's contents. Fortunately,
1453 * that is not allowed (see
1454 * recv_check_large_blocks()).
1456 do_free_range
= B_TRUE
;
1458 boolean_t is_same_gen
;
1459 err
= receive_object_is_same_generation(rwa
->os
,
1460 drro
->drr_object
, doi
->doi_bonus_type
,
1461 drro
->drr_bonustype
, bonus_data
, &is_same_gen
);
1463 return (SET_ERROR(EINVAL
));
1467 * This is the same logical file, and
1468 * the block size must be increasing.
1469 * It could only decrease if
1470 * --large-block was changed to be
1471 * off, which is checked in
1472 * recv_check_large_blocks().
1474 if (drro
->drr_blksz
<=
1475 doi
->doi_data_block_size
)
1476 return (SET_ERROR(EINVAL
));
1478 * We keep the existing blocksize and
1482 doi
->doi_data_block_size
;
1484 do_free_range
= B_TRUE
;
1489 /* nblkptr can only decrease if the object was reallocated */
1490 if (nblkptr
< doi
->doi_nblkptr
)
1491 do_free_range
= B_TRUE
;
1493 /* number of slots can only change on reallocation */
1494 if (dn_slots
!= doi
->doi_dnodesize
>> DNODE_SHIFT
)
1495 do_free_range
= B_TRUE
;
1498 * For raw sends we also check a few other fields to
1499 * ensure we are preserving the objset structure exactly
1500 * as it was on the receive side:
1501 * - A changed indirect block size
1502 * - A smaller nlevels
1505 if (indblksz
!= doi
->doi_metadata_block_size
)
1506 do_free_range
= B_TRUE
;
1507 if (drro
->drr_nlevels
< doi
->doi_indirection
)
1508 do_free_range
= B_TRUE
;
1511 if (do_free_range
) {
1512 err
= dmu_free_long_range(rwa
->os
, drro
->drr_object
,
1515 return (SET_ERROR(EINVAL
));
1519 * The dmu does not currently support decreasing nlevels
1520 * or changing the number of dnode slots on an object. For
1521 * non-raw sends, this does not matter and the new object
1522 * can just use the previous one's nlevels. For raw sends,
1523 * however, the structure of the received dnode (including
1524 * nlevels and dnode slots) must match that of the send
1525 * side. Therefore, instead of using dmu_object_reclaim(),
1526 * we must free the object completely and call
1527 * dmu_object_claim_dnsize() instead.
1529 if ((rwa
->raw
&& drro
->drr_nlevels
< doi
->doi_indirection
) ||
1530 dn_slots
!= doi
->doi_dnodesize
>> DNODE_SHIFT
) {
1531 err
= dmu_free_long_object(rwa
->os
, drro
->drr_object
);
1533 return (SET_ERROR(EINVAL
));
1535 txg_wait_synced(dmu_objset_pool(rwa
->os
), 0);
1536 *object_to_hold
= DMU_NEW_OBJECT
;
1540 * For raw receives, free everything beyond the new incoming
1541 * maxblkid. Normally this would be done with a DRR_FREE
1542 * record that would come after this DRR_OBJECT record is
1543 * processed. However, for raw receives we manually set the
1544 * maxblkid from the drr_maxblkid and so we must first free
1545 * everything above that blkid to ensure the DMU is always
1546 * consistent with itself. We will never free the first block
1547 * of the object here because a maxblkid of 0 could indicate
1548 * an object with a single block or one with no blocks. This
1549 * free may be skipped when dmu_free_long_range() was called
1550 * above since it covers the entire object's contents.
1552 if (rwa
->raw
&& *object_to_hold
!= DMU_NEW_OBJECT
&& !do_free_range
) {
1553 err
= dmu_free_long_range(rwa
->os
, drro
->drr_object
,
1554 (drro
->drr_maxblkid
+ 1) * doi
->doi_data_block_size
,
1557 return (SET_ERROR(EINVAL
));
1563 receive_object(struct receive_writer_arg
*rwa
, struct drr_object
*drro
,
1566 dmu_object_info_t doi
;
1569 uint32_t new_blksz
= drro
->drr_blksz
;
1570 uint8_t dn_slots
= drro
->drr_dn_slots
!= 0 ?
1571 drro
->drr_dn_slots
: DNODE_MIN_SLOTS
;
1573 if (drro
->drr_type
== DMU_OT_NONE
||
1574 !DMU_OT_IS_VALID(drro
->drr_type
) ||
1575 !DMU_OT_IS_VALID(drro
->drr_bonustype
) ||
1576 drro
->drr_checksumtype
>= ZIO_CHECKSUM_FUNCTIONS
||
1577 drro
->drr_compress
>= ZIO_COMPRESS_FUNCTIONS
||
1578 P2PHASE(drro
->drr_blksz
, SPA_MINBLOCKSIZE
) ||
1579 drro
->drr_blksz
< SPA_MINBLOCKSIZE
||
1580 drro
->drr_blksz
> spa_maxblocksize(dmu_objset_spa(rwa
->os
)) ||
1581 drro
->drr_bonuslen
>
1582 DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(rwa
->os
))) ||
1584 (spa_maxdnodesize(dmu_objset_spa(rwa
->os
)) >> DNODE_SHIFT
)) {
1585 return (SET_ERROR(EINVAL
));
1590 * We should have received a DRR_OBJECT_RANGE record
1591 * containing this block and stored it in rwa.
1593 if (drro
->drr_object
< rwa
->or_firstobj
||
1594 drro
->drr_object
>= rwa
->or_firstobj
+ rwa
->or_numslots
||
1595 drro
->drr_raw_bonuslen
< drro
->drr_bonuslen
||
1596 drro
->drr_indblkshift
> SPA_MAXBLOCKSHIFT
||
1597 drro
->drr_nlevels
> DN_MAX_LEVELS
||
1598 drro
->drr_nblkptr
> DN_MAX_NBLKPTR
||
1599 DN_SLOTS_TO_BONUSLEN(dn_slots
) <
1600 drro
->drr_raw_bonuslen
)
1601 return (SET_ERROR(EINVAL
));
1604 * The DRR_OBJECT_SPILL flag is valid when the DRR_BEGIN
1605 * record indicates this by setting DRR_FLAG_SPILL_BLOCK.
1607 if (((drro
->drr_flags
& ~(DRR_OBJECT_SPILL
))) ||
1608 (!rwa
->spill
&& DRR_OBJECT_HAS_SPILL(drro
->drr_flags
))) {
1609 return (SET_ERROR(EINVAL
));
1612 if (drro
->drr_raw_bonuslen
!= 0 || drro
->drr_nblkptr
!= 0 ||
1613 drro
->drr_indblkshift
!= 0 || drro
->drr_nlevels
!= 0) {
1614 return (SET_ERROR(EINVAL
));
1618 err
= dmu_object_info(rwa
->os
, drro
->drr_object
, &doi
);
1620 if (err
!= 0 && err
!= ENOENT
&& err
!= EEXIST
)
1621 return (SET_ERROR(EINVAL
));
1623 if (drro
->drr_object
> rwa
->max_object
)
1624 rwa
->max_object
= drro
->drr_object
;
1627 * If we are losing blkptrs or changing the block size this must
1628 * be a new file instance. We must clear out the previous file
1629 * contents before we can change this type of metadata in the dnode.
1630 * Raw receives will also check that the indirect structure of the
1631 * dnode hasn't changed.
1633 uint64_t object_to_hold
;
1635 err
= receive_handle_existing_object(rwa
, drro
, &doi
, data
,
1636 &object_to_hold
, &new_blksz
);
1637 } else if (err
== EEXIST
) {
1639 * The object requested is currently an interior slot of a
1640 * multi-slot dnode. This will be resolved when the next txg
1641 * is synced out, since the send stream will have told us
1642 * to free this slot when we freed the associated dnode
1643 * earlier in the stream.
1645 txg_wait_synced(dmu_objset_pool(rwa
->os
), 0);
1647 if (dmu_object_info(rwa
->os
, drro
->drr_object
, NULL
) != ENOENT
)
1648 return (SET_ERROR(EINVAL
));
1650 /* object was freed and we are about to allocate a new one */
1651 object_to_hold
= DMU_NEW_OBJECT
;
1653 /* object is free and we are about to allocate a new one */
1654 object_to_hold
= DMU_NEW_OBJECT
;
1658 * If this is a multi-slot dnode there is a chance that this
1659 * object will expand into a slot that is already used by
1660 * another object from the previous snapshot. We must free
1661 * these objects before we attempt to allocate the new dnode.
1664 boolean_t need_sync
= B_FALSE
;
1666 for (uint64_t slot
= drro
->drr_object
+ 1;
1667 slot
< drro
->drr_object
+ dn_slots
;
1669 dmu_object_info_t slot_doi
;
1671 err
= dmu_object_info(rwa
->os
, slot
, &slot_doi
);
1672 if (err
== ENOENT
|| err
== EEXIST
)
1677 err
= dmu_free_long_object(rwa
->os
, slot
);
1685 txg_wait_synced(dmu_objset_pool(rwa
->os
), 0);
1688 tx
= dmu_tx_create(rwa
->os
);
1689 dmu_tx_hold_bonus(tx
, object_to_hold
);
1690 dmu_tx_hold_write(tx
, object_to_hold
, 0, 0);
1691 err
= dmu_tx_assign(tx
, TXG_WAIT
);
1697 if (object_to_hold
== DMU_NEW_OBJECT
) {
1698 /* Currently free, wants to be allocated */
1699 err
= dmu_object_claim_dnsize(rwa
->os
, drro
->drr_object
,
1700 drro
->drr_type
, new_blksz
,
1701 drro
->drr_bonustype
, drro
->drr_bonuslen
,
1702 dn_slots
<< DNODE_SHIFT
, tx
);
1703 } else if (drro
->drr_type
!= doi
.doi_type
||
1704 new_blksz
!= doi
.doi_data_block_size
||
1705 drro
->drr_bonustype
!= doi
.doi_bonus_type
||
1706 drro
->drr_bonuslen
!= doi
.doi_bonus_size
) {
1707 /* Currently allocated, but with different properties */
1708 err
= dmu_object_reclaim_dnsize(rwa
->os
, drro
->drr_object
,
1709 drro
->drr_type
, new_blksz
,
1710 drro
->drr_bonustype
, drro
->drr_bonuslen
,
1711 dn_slots
<< DNODE_SHIFT
, rwa
->spill
?
1712 DRR_OBJECT_HAS_SPILL(drro
->drr_flags
) : B_FALSE
, tx
);
1713 } else if (rwa
->spill
&& !DRR_OBJECT_HAS_SPILL(drro
->drr_flags
)) {
1715 * Currently allocated, the existing version of this object
1716 * may reference a spill block that is no longer allocated
1717 * at the source and needs to be freed.
1719 err
= dmu_object_rm_spill(rwa
->os
, drro
->drr_object
, tx
);
1724 return (SET_ERROR(EINVAL
));
1727 if (rwa
->or_crypt_params_present
) {
1729 * Set the crypt params for the buffer associated with this
1730 * range of dnodes. This causes the blkptr_t to have the
1731 * same crypt params (byteorder, salt, iv, mac) as on the
1734 * Since we are committing this tx now, it is possible for
1735 * the dnode block to end up on-disk with the incorrect MAC,
1736 * if subsequent objects in this block are received in a
1737 * different txg. However, since the dataset is marked as
1738 * inconsistent, no code paths will do a non-raw read (or
1739 * decrypt the block / verify the MAC). The receive code and
1740 * scrub code can safely do raw reads and verify the
1741 * checksum. They don't need to verify the MAC.
1743 dmu_buf_t
*db
= NULL
;
1744 uint64_t offset
= rwa
->or_firstobj
* DNODE_MIN_SIZE
;
1746 err
= dmu_buf_hold_by_dnode(DMU_META_DNODE(rwa
->os
),
1747 offset
, FTAG
, &db
, DMU_READ_PREFETCH
| DMU_READ_NO_DECRYPT
);
1750 return (SET_ERROR(EINVAL
));
1753 dmu_buf_set_crypt_params(db
, rwa
->or_byteorder
,
1754 rwa
->or_salt
, rwa
->or_iv
, rwa
->or_mac
, tx
);
1756 dmu_buf_rele(db
, FTAG
);
1758 rwa
->or_crypt_params_present
= B_FALSE
;
1761 dmu_object_set_checksum(rwa
->os
, drro
->drr_object
,
1762 drro
->drr_checksumtype
, tx
);
1763 dmu_object_set_compress(rwa
->os
, drro
->drr_object
,
1764 drro
->drr_compress
, tx
);
1766 /* handle more restrictive dnode structuring for raw recvs */
1769 * Set the indirect block size, block shift, nlevels.
1770 * This will not fail because we ensured all of the
1771 * blocks were freed earlier if this is a new object.
1772 * For non-new objects block size and indirect block
1773 * shift cannot change and nlevels can only increase.
1775 ASSERT3U(new_blksz
, ==, drro
->drr_blksz
);
1776 VERIFY0(dmu_object_set_blocksize(rwa
->os
, drro
->drr_object
,
1777 drro
->drr_blksz
, drro
->drr_indblkshift
, tx
));
1778 VERIFY0(dmu_object_set_nlevels(rwa
->os
, drro
->drr_object
,
1779 drro
->drr_nlevels
, tx
));
1782 * Set the maxblkid. This will always succeed because
1783 * we freed all blocks beyond the new maxblkid above.
1785 VERIFY0(dmu_object_set_maxblkid(rwa
->os
, drro
->drr_object
,
1786 drro
->drr_maxblkid
, tx
));
1792 uint32_t flags
= DMU_READ_NO_PREFETCH
;
1795 flags
|= DMU_READ_NO_DECRYPT
;
1797 VERIFY0(dnode_hold(rwa
->os
, drro
->drr_object
, FTAG
, &dn
));
1798 VERIFY0(dmu_bonus_hold_by_dnode(dn
, FTAG
, &db
, flags
));
1800 dmu_buf_will_dirty(db
, tx
);
1802 ASSERT3U(db
->db_size
, >=, drro
->drr_bonuslen
);
1803 bcopy(data
, db
->db_data
, DRR_OBJECT_PAYLOAD_SIZE(drro
));
1806 * Raw bonus buffers have their byteorder determined by the
1807 * DRR_OBJECT_RANGE record.
1809 if (rwa
->byteswap
&& !rwa
->raw
) {
1810 dmu_object_byteswap_t byteswap
=
1811 DMU_OT_BYTESWAP(drro
->drr_bonustype
);
1812 dmu_ot_byteswap
[byteswap
].ob_func(db
->db_data
,
1813 DRR_OBJECT_PAYLOAD_SIZE(drro
));
1815 dmu_buf_rele(db
, FTAG
);
1816 dnode_rele(dn
, FTAG
);
1825 receive_freeobjects(struct receive_writer_arg
*rwa
,
1826 struct drr_freeobjects
*drrfo
)
1831 if (drrfo
->drr_firstobj
+ drrfo
->drr_numobjs
< drrfo
->drr_firstobj
)
1832 return (SET_ERROR(EINVAL
));
1834 for (obj
= drrfo
->drr_firstobj
== 0 ? 1 : drrfo
->drr_firstobj
;
1835 obj
< drrfo
->drr_firstobj
+ drrfo
->drr_numobjs
&&
1836 obj
< DN_MAX_OBJECT
&& next_err
== 0;
1837 next_err
= dmu_object_next(rwa
->os
, &obj
, FALSE
, 0)) {
1838 dmu_object_info_t doi
;
1841 err
= dmu_object_info(rwa
->os
, obj
, &doi
);
1847 err
= dmu_free_long_object(rwa
->os
, obj
);
1852 if (next_err
!= ESRCH
)
1858 * Note: if this fails, the caller will clean up any records left on the
1859 * rwa->write_batch list.
1862 flush_write_batch_impl(struct receive_writer_arg
*rwa
)
1867 if (dnode_hold(rwa
->os
, rwa
->last_object
, FTAG
, &dn
) != 0)
1868 return (SET_ERROR(EINVAL
));
1870 struct receive_record_arg
*last_rrd
= list_tail(&rwa
->write_batch
);
1871 struct drr_write
*last_drrw
= &last_rrd
->header
.drr_u
.drr_write
;
1873 struct receive_record_arg
*first_rrd
= list_head(&rwa
->write_batch
);
1874 struct drr_write
*first_drrw
= &first_rrd
->header
.drr_u
.drr_write
;
1876 ASSERT3U(rwa
->last_object
, ==, last_drrw
->drr_object
);
1877 ASSERT3U(rwa
->last_offset
, ==, last_drrw
->drr_offset
);
1879 dmu_tx_t
*tx
= dmu_tx_create(rwa
->os
);
1880 dmu_tx_hold_write_by_dnode(tx
, dn
, first_drrw
->drr_offset
,
1881 last_drrw
->drr_offset
- first_drrw
->drr_offset
+
1882 last_drrw
->drr_logical_size
);
1883 err
= dmu_tx_assign(tx
, TXG_WAIT
);
1886 dnode_rele(dn
, FTAG
);
1890 struct receive_record_arg
*rrd
;
1891 while ((rrd
= list_head(&rwa
->write_batch
)) != NULL
) {
1892 struct drr_write
*drrw
= &rrd
->header
.drr_u
.drr_write
;
1893 abd_t
*abd
= rrd
->abd
;
1895 ASSERT3U(drrw
->drr_object
, ==, rwa
->last_object
);
1897 if (drrw
->drr_logical_size
!= dn
->dn_datablksz
) {
1899 * The WRITE record is larger than the object's block
1900 * size. We must be receiving an incremental
1901 * large-block stream into a dataset that previously did
1902 * a non-large-block receive. Lightweight writes must
1903 * be exactly one block, so we need to decompress the
1904 * data (if compressed) and do a normal dmu_write().
1906 ASSERT3U(drrw
->drr_logical_size
, >, dn
->dn_datablksz
);
1907 if (DRR_WRITE_COMPRESSED(drrw
)) {
1909 abd_alloc_linear(drrw
->drr_logical_size
,
1912 err
= zio_decompress_data(
1913 drrw
->drr_compressiontype
,
1914 abd
, abd_to_buf(decomp_abd
),
1916 abd_get_size(decomp_abd
), NULL
);
1919 dmu_write_by_dnode(dn
,
1921 drrw
->drr_logical_size
,
1922 abd_to_buf(decomp_abd
), tx
);
1924 abd_free(decomp_abd
);
1926 dmu_write_by_dnode(dn
,
1928 drrw
->drr_logical_size
,
1929 abd_to_buf(abd
), tx
);
1935 dmu_write_policy(rwa
->os
, dn
, 0, 0, &zp
);
1937 enum zio_flag zio_flags
= 0;
1940 zp
.zp_encrypt
= B_TRUE
;
1941 zp
.zp_compress
= drrw
->drr_compressiontype
;
1942 zp
.zp_byteorder
= ZFS_HOST_BYTEORDER
^
1943 !!DRR_IS_RAW_BYTESWAPPED(drrw
->drr_flags
) ^
1945 bcopy(drrw
->drr_salt
, zp
.zp_salt
,
1947 bcopy(drrw
->drr_iv
, zp
.zp_iv
,
1949 bcopy(drrw
->drr_mac
, zp
.zp_mac
,
1951 if (DMU_OT_IS_ENCRYPTED(zp
.zp_type
)) {
1952 zp
.zp_nopwrite
= B_FALSE
;
1953 zp
.zp_copies
= MIN(zp
.zp_copies
,
1954 SPA_DVAS_PER_BP
- 1);
1956 zio_flags
|= ZIO_FLAG_RAW
;
1957 } else if (DRR_WRITE_COMPRESSED(drrw
)) {
1958 ASSERT3U(drrw
->drr_compressed_size
, >, 0);
1959 ASSERT3U(drrw
->drr_logical_size
, >=,
1960 drrw
->drr_compressed_size
);
1961 zp
.zp_compress
= drrw
->drr_compressiontype
;
1962 zio_flags
|= ZIO_FLAG_RAW_COMPRESS
;
1963 } else if (rwa
->byteswap
) {
1965 * Note: compressed blocks never need to be
1966 * byteswapped, because WRITE records for
1967 * metadata blocks are never compressed. The
1968 * exception is raw streams, which are written
1969 * in the original byteorder, and the byteorder
1970 * bit is preserved in the BP by setting
1971 * zp_byteorder above.
1973 dmu_object_byteswap_t byteswap
=
1974 DMU_OT_BYTESWAP(drrw
->drr_type
);
1975 dmu_ot_byteswap
[byteswap
].ob_func(
1977 DRR_WRITE_PAYLOAD_SIZE(drrw
));
1981 * Since this data can't be read until the receive
1982 * completes, we can do a "lightweight" write for
1983 * improved performance.
1985 err
= dmu_lightweight_write_by_dnode(dn
,
1986 drrw
->drr_offset
, abd
, &zp
, zio_flags
, tx
);
1991 * This rrd is left on the list, so the caller will
1992 * free it (and the abd).
1998 * Note: If the receive fails, we want the resume stream to
1999 * start with the same record that we last successfully
2000 * received (as opposed to the next record), so that we can
2001 * verify that we are resuming from the correct location.
2003 save_resume_state(rwa
, drrw
->drr_object
, drrw
->drr_offset
, tx
);
2005 list_remove(&rwa
->write_batch
, rrd
);
2006 kmem_free(rrd
, sizeof (*rrd
));
2010 dnode_rele(dn
, FTAG
);
2015 flush_write_batch(struct receive_writer_arg
*rwa
)
2017 if (list_is_empty(&rwa
->write_batch
))
2021 err
= flush_write_batch_impl(rwa
);
2023 struct receive_record_arg
*rrd
;
2024 while ((rrd
= list_remove_head(&rwa
->write_batch
)) != NULL
) {
2026 kmem_free(rrd
, sizeof (*rrd
));
2029 ASSERT(list_is_empty(&rwa
->write_batch
));
2034 receive_process_write_record(struct receive_writer_arg
*rwa
,
2035 struct receive_record_arg
*rrd
)
2039 ASSERT3U(rrd
->header
.drr_type
, ==, DRR_WRITE
);
2040 struct drr_write
*drrw
= &rrd
->header
.drr_u
.drr_write
;
2042 if (drrw
->drr_offset
+ drrw
->drr_logical_size
< drrw
->drr_offset
||
2043 !DMU_OT_IS_VALID(drrw
->drr_type
))
2044 return (SET_ERROR(EINVAL
));
2047 * For resuming to work, records must be in increasing order
2048 * by (object, offset).
2050 if (drrw
->drr_object
< rwa
->last_object
||
2051 (drrw
->drr_object
== rwa
->last_object
&&
2052 drrw
->drr_offset
< rwa
->last_offset
)) {
2053 return (SET_ERROR(EINVAL
));
2056 struct receive_record_arg
*first_rrd
= list_head(&rwa
->write_batch
);
2057 struct drr_write
*first_drrw
= &first_rrd
->header
.drr_u
.drr_write
;
2058 uint64_t batch_size
=
2059 MIN(zfs_recv_write_batch_size
, DMU_MAX_ACCESS
/ 2);
2060 if (first_rrd
!= NULL
&&
2061 (drrw
->drr_object
!= first_drrw
->drr_object
||
2062 drrw
->drr_offset
>= first_drrw
->drr_offset
+ batch_size
)) {
2063 err
= flush_write_batch(rwa
);
2068 rwa
->last_object
= drrw
->drr_object
;
2069 rwa
->last_offset
= drrw
->drr_offset
;
2071 if (rwa
->last_object
> rwa
->max_object
)
2072 rwa
->max_object
= rwa
->last_object
;
2074 list_insert_tail(&rwa
->write_batch
, rrd
);
2076 * Return EAGAIN to indicate that we will use this rrd again,
2077 * so the caller should not free it
2083 receive_write_embedded(struct receive_writer_arg
*rwa
,
2084 struct drr_write_embedded
*drrwe
, void *data
)
2089 if (drrwe
->drr_offset
+ drrwe
->drr_length
< drrwe
->drr_offset
)
2090 return (SET_ERROR(EINVAL
));
2092 if (drrwe
->drr_psize
> BPE_PAYLOAD_SIZE
)
2093 return (SET_ERROR(EINVAL
));
2095 if (drrwe
->drr_etype
>= NUM_BP_EMBEDDED_TYPES
)
2096 return (SET_ERROR(EINVAL
));
2097 if (drrwe
->drr_compression
>= ZIO_COMPRESS_FUNCTIONS
)
2098 return (SET_ERROR(EINVAL
));
2100 return (SET_ERROR(EINVAL
));
2102 if (drrwe
->drr_object
> rwa
->max_object
)
2103 rwa
->max_object
= drrwe
->drr_object
;
2105 tx
= dmu_tx_create(rwa
->os
);
2107 dmu_tx_hold_write(tx
, drrwe
->drr_object
,
2108 drrwe
->drr_offset
, drrwe
->drr_length
);
2109 err
= dmu_tx_assign(tx
, TXG_WAIT
);
2115 dmu_write_embedded(rwa
->os
, drrwe
->drr_object
,
2116 drrwe
->drr_offset
, data
, drrwe
->drr_etype
,
2117 drrwe
->drr_compression
, drrwe
->drr_lsize
, drrwe
->drr_psize
,
2118 rwa
->byteswap
^ ZFS_HOST_BYTEORDER
, tx
);
2120 /* See comment in restore_write. */
2121 save_resume_state(rwa
, drrwe
->drr_object
, drrwe
->drr_offset
, tx
);
2127 receive_spill(struct receive_writer_arg
*rwa
, struct drr_spill
*drrs
,
2130 dmu_buf_t
*db
, *db_spill
;
2133 if (drrs
->drr_length
< SPA_MINBLOCKSIZE
||
2134 drrs
->drr_length
> spa_maxblocksize(dmu_objset_spa(rwa
->os
)))
2135 return (SET_ERROR(EINVAL
));
2138 * This is an unmodified spill block which was added to the stream
2139 * to resolve an issue with incorrectly removing spill blocks. It
2140 * should be ignored by current versions of the code which support
2141 * the DRR_FLAG_SPILL_BLOCK flag.
2143 if (rwa
->spill
&& DRR_SPILL_IS_UNMODIFIED(drrs
->drr_flags
)) {
2149 if (!DMU_OT_IS_VALID(drrs
->drr_type
) ||
2150 drrs
->drr_compressiontype
>= ZIO_COMPRESS_FUNCTIONS
||
2151 drrs
->drr_compressed_size
== 0)
2152 return (SET_ERROR(EINVAL
));
2155 if (dmu_object_info(rwa
->os
, drrs
->drr_object
, NULL
) != 0)
2156 return (SET_ERROR(EINVAL
));
2158 if (drrs
->drr_object
> rwa
->max_object
)
2159 rwa
->max_object
= drrs
->drr_object
;
2161 VERIFY0(dmu_bonus_hold(rwa
->os
, drrs
->drr_object
, FTAG
, &db
));
2162 if ((err
= dmu_spill_hold_by_bonus(db
, DMU_READ_NO_DECRYPT
, FTAG
,
2164 dmu_buf_rele(db
, FTAG
);
2168 dmu_tx_t
*tx
= dmu_tx_create(rwa
->os
);
2170 dmu_tx_hold_spill(tx
, db
->db_object
);
2172 err
= dmu_tx_assign(tx
, TXG_WAIT
);
2174 dmu_buf_rele(db
, FTAG
);
2175 dmu_buf_rele(db_spill
, FTAG
);
2181 * Spill blocks may both grow and shrink. When a change in size
2182 * occurs any existing dbuf must be updated to match the logical
2183 * size of the provided arc_buf_t.
2185 if (db_spill
->db_size
!= drrs
->drr_length
) {
2186 dmu_buf_will_fill(db_spill
, tx
);
2187 VERIFY0(dbuf_spill_set_blksz(db_spill
,
2188 drrs
->drr_length
, tx
));
2193 boolean_t byteorder
= ZFS_HOST_BYTEORDER
^
2194 !!DRR_IS_RAW_BYTESWAPPED(drrs
->drr_flags
) ^
2197 abuf
= arc_loan_raw_buf(dmu_objset_spa(rwa
->os
),
2198 drrs
->drr_object
, byteorder
, drrs
->drr_salt
,
2199 drrs
->drr_iv
, drrs
->drr_mac
, drrs
->drr_type
,
2200 drrs
->drr_compressed_size
, drrs
->drr_length
,
2201 drrs
->drr_compressiontype
, 0);
2203 abuf
= arc_loan_buf(dmu_objset_spa(rwa
->os
),
2204 DMU_OT_IS_METADATA(drrs
->drr_type
),
2206 if (rwa
->byteswap
) {
2207 dmu_object_byteswap_t byteswap
=
2208 DMU_OT_BYTESWAP(drrs
->drr_type
);
2209 dmu_ot_byteswap
[byteswap
].ob_func(abd_to_buf(abd
),
2210 DRR_SPILL_PAYLOAD_SIZE(drrs
));
2214 bcopy(abd_to_buf(abd
), abuf
->b_data
, DRR_SPILL_PAYLOAD_SIZE(drrs
));
2216 dbuf_assign_arcbuf((dmu_buf_impl_t
*)db_spill
, abuf
, tx
);
2218 dmu_buf_rele(db
, FTAG
);
2219 dmu_buf_rele(db_spill
, FTAG
);
2227 receive_free(struct receive_writer_arg
*rwa
, struct drr_free
*drrf
)
2231 if (drrf
->drr_length
!= -1ULL &&
2232 drrf
->drr_offset
+ drrf
->drr_length
< drrf
->drr_offset
)
2233 return (SET_ERROR(EINVAL
));
2235 if (dmu_object_info(rwa
->os
, drrf
->drr_object
, NULL
) != 0)
2236 return (SET_ERROR(EINVAL
));
2238 if (drrf
->drr_object
> rwa
->max_object
)
2239 rwa
->max_object
= drrf
->drr_object
;
2241 err
= dmu_free_long_range(rwa
->os
, drrf
->drr_object
,
2242 drrf
->drr_offset
, drrf
->drr_length
);
2248 receive_object_range(struct receive_writer_arg
*rwa
,
2249 struct drr_object_range
*drror
)
2252 * By default, we assume this block is in our native format
2253 * (ZFS_HOST_BYTEORDER). We then take into account whether
2254 * the send stream is byteswapped (rwa->byteswap). Finally,
2255 * we need to byteswap again if this particular block was
2256 * in non-native format on the send side.
2258 boolean_t byteorder
= ZFS_HOST_BYTEORDER
^ rwa
->byteswap
^
2259 !!DRR_IS_RAW_BYTESWAPPED(drror
->drr_flags
);
2262 * Since dnode block sizes are constant, we should not need to worry
2263 * about making sure that the dnode block size is the same on the
2264 * sending and receiving sides for the time being. For non-raw sends,
2265 * this does not matter (and in fact we do not send a DRR_OBJECT_RANGE
2266 * record at all). Raw sends require this record type because the
2267 * encryption parameters are used to protect an entire block of bonus
2268 * buffers. If the size of dnode blocks ever becomes variable,
2269 * handling will need to be added to ensure that dnode block sizes
2270 * match on the sending and receiving side.
2272 if (drror
->drr_numslots
!= DNODES_PER_BLOCK
||
2273 P2PHASE(drror
->drr_firstobj
, DNODES_PER_BLOCK
) != 0 ||
2275 return (SET_ERROR(EINVAL
));
2277 if (drror
->drr_firstobj
> rwa
->max_object
)
2278 rwa
->max_object
= drror
->drr_firstobj
;
2281 * The DRR_OBJECT_RANGE handling must be deferred to receive_object()
2282 * so that the block of dnodes is not written out when it's empty,
2283 * and converted to a HOLE BP.
2285 rwa
->or_crypt_params_present
= B_TRUE
;
2286 rwa
->or_firstobj
= drror
->drr_firstobj
;
2287 rwa
->or_numslots
= drror
->drr_numslots
;
2288 bcopy(drror
->drr_salt
, rwa
->or_salt
, ZIO_DATA_SALT_LEN
);
2289 bcopy(drror
->drr_iv
, rwa
->or_iv
, ZIO_DATA_IV_LEN
);
2290 bcopy(drror
->drr_mac
, rwa
->or_mac
, ZIO_DATA_MAC_LEN
);
2291 rwa
->or_byteorder
= byteorder
;
2297 * Until we have the ability to redact large ranges of data efficiently, we
2298 * process these records as frees.
2302 receive_redact(struct receive_writer_arg
*rwa
, struct drr_redact
*drrr
)
2304 struct drr_free drrf
= {0};
2305 drrf
.drr_length
= drrr
->drr_length
;
2306 drrf
.drr_object
= drrr
->drr_object
;
2307 drrf
.drr_offset
= drrr
->drr_offset
;
2308 drrf
.drr_toguid
= drrr
->drr_toguid
;
2309 return (receive_free(rwa
, &drrf
));
2312 /* used to destroy the drc_ds on error */
2314 dmu_recv_cleanup_ds(dmu_recv_cookie_t
*drc
)
2316 dsl_dataset_t
*ds
= drc
->drc_ds
;
2317 ds_hold_flags_t dsflags
;
2319 dsflags
= (drc
->drc_raw
) ? DS_HOLD_FLAG_NONE
: DS_HOLD_FLAG_DECRYPT
;
2321 * Wait for the txg sync before cleaning up the receive. For
2322 * resumable receives, this ensures that our resume state has
2323 * been written out to disk. For raw receives, this ensures
2324 * that the user accounting code will not attempt to do anything
2325 * after we stopped receiving the dataset.
2327 txg_wait_synced(ds
->ds_dir
->dd_pool
, 0);
2328 ds
->ds_objset
->os_raw_receive
= B_FALSE
;
2330 rrw_enter(&ds
->ds_bp_rwlock
, RW_READER
, FTAG
);
2331 if (drc
->drc_resumable
&& drc
->drc_should_save
&&
2332 !BP_IS_HOLE(dsl_dataset_get_blkptr(ds
))) {
2333 rrw_exit(&ds
->ds_bp_rwlock
, FTAG
);
2334 dsl_dataset_disown(ds
, dsflags
, dmu_recv_tag
);
2336 char name
[ZFS_MAX_DATASET_NAME_LEN
];
2337 rrw_exit(&ds
->ds_bp_rwlock
, FTAG
);
2338 dsl_dataset_name(ds
, name
);
2339 dsl_dataset_disown(ds
, dsflags
, dmu_recv_tag
);
2340 (void) dsl_destroy_head(name
);
2345 receive_cksum(dmu_recv_cookie_t
*drc
, int len
, void *buf
)
2347 if (drc
->drc_byteswap
) {
2348 (void) fletcher_4_incremental_byteswap(buf
, len
,
2351 (void) fletcher_4_incremental_native(buf
, len
, &drc
->drc_cksum
);
2356 * Read the payload into a buffer of size len, and update the current record's
2358 * Allocate drc->drc_next_rrd and read the next record's header into
2359 * drc->drc_next_rrd->header.
2360 * Verify checksum of payload and next record.
2363 receive_read_payload_and_next_header(dmu_recv_cookie_t
*drc
, int len
, void *buf
)
2368 ASSERT3U(len
, <=, SPA_MAXBLOCKSIZE
);
2369 err
= receive_read(drc
, len
, buf
);
2372 receive_cksum(drc
, len
, buf
);
2374 /* note: rrd is NULL when reading the begin record's payload */
2375 if (drc
->drc_rrd
!= NULL
) {
2376 drc
->drc_rrd
->payload
= buf
;
2377 drc
->drc_rrd
->payload_size
= len
;
2378 drc
->drc_rrd
->bytes_read
= drc
->drc_bytes_read
;
2381 ASSERT3P(buf
, ==, NULL
);
2384 drc
->drc_prev_cksum
= drc
->drc_cksum
;
2386 drc
->drc_next_rrd
= kmem_zalloc(sizeof (*drc
->drc_next_rrd
), KM_SLEEP
);
2387 err
= receive_read(drc
, sizeof (drc
->drc_next_rrd
->header
),
2388 &drc
->drc_next_rrd
->header
);
2389 drc
->drc_next_rrd
->bytes_read
= drc
->drc_bytes_read
;
2392 kmem_free(drc
->drc_next_rrd
, sizeof (*drc
->drc_next_rrd
));
2393 drc
->drc_next_rrd
= NULL
;
2396 if (drc
->drc_next_rrd
->header
.drr_type
== DRR_BEGIN
) {
2397 kmem_free(drc
->drc_next_rrd
, sizeof (*drc
->drc_next_rrd
));
2398 drc
->drc_next_rrd
= NULL
;
2399 return (SET_ERROR(EINVAL
));
2403 * Note: checksum is of everything up to but not including the
2406 ASSERT3U(offsetof(dmu_replay_record_t
, drr_u
.drr_checksum
.drr_checksum
),
2407 ==, sizeof (dmu_replay_record_t
) - sizeof (zio_cksum_t
));
2409 offsetof(dmu_replay_record_t
, drr_u
.drr_checksum
.drr_checksum
),
2410 &drc
->drc_next_rrd
->header
);
2412 zio_cksum_t cksum_orig
=
2413 drc
->drc_next_rrd
->header
.drr_u
.drr_checksum
.drr_checksum
;
2414 zio_cksum_t
*cksump
=
2415 &drc
->drc_next_rrd
->header
.drr_u
.drr_checksum
.drr_checksum
;
2417 if (drc
->drc_byteswap
)
2418 byteswap_record(&drc
->drc_next_rrd
->header
);
2420 if ((!ZIO_CHECKSUM_IS_ZERO(cksump
)) &&
2421 !ZIO_CHECKSUM_EQUAL(drc
->drc_cksum
, *cksump
)) {
2422 kmem_free(drc
->drc_next_rrd
, sizeof (*drc
->drc_next_rrd
));
2423 drc
->drc_next_rrd
= NULL
;
2424 return (SET_ERROR(ECKSUM
));
2427 receive_cksum(drc
, sizeof (cksum_orig
), &cksum_orig
);
2433 * Issue the prefetch reads for any necessary indirect blocks.
2435 * We use the object ignore list to tell us whether or not to issue prefetches
2436 * for a given object. We do this for both correctness (in case the blocksize
2437 * of an object has changed) and performance (if the object doesn't exist, don't
2438 * needlessly try to issue prefetches). We also trim the list as we go through
2439 * the stream to prevent it from growing to an unbounded size.
2441 * The object numbers within will always be in sorted order, and any write
2442 * records we see will also be in sorted order, but they're not sorted with
2443 * respect to each other (i.e. we can get several object records before
2444 * receiving each object's write records). As a result, once we've reached a
2445 * given object number, we can safely remove any reference to lower object
2446 * numbers in the ignore list. In practice, we receive up to 32 object records
2447 * before receiving write records, so the list can have up to 32 nodes in it.
2451 receive_read_prefetch(dmu_recv_cookie_t
*drc
, uint64_t object
, uint64_t offset
,
2454 if (!objlist_exists(drc
->drc_ignore_objlist
, object
)) {
2455 dmu_prefetch(drc
->drc_os
, object
, 1, offset
, length
,
2456 ZIO_PRIORITY_SYNC_READ
);
2461 * Read records off the stream, issuing any necessary prefetches.
2464 receive_read_record(dmu_recv_cookie_t
*drc
)
2468 switch (drc
->drc_rrd
->header
.drr_type
) {
2471 struct drr_object
*drro
=
2472 &drc
->drc_rrd
->header
.drr_u
.drr_object
;
2473 uint32_t size
= DRR_OBJECT_PAYLOAD_SIZE(drro
);
2475 dmu_object_info_t doi
;
2478 buf
= kmem_zalloc(size
, KM_SLEEP
);
2480 err
= receive_read_payload_and_next_header(drc
, size
, buf
);
2482 kmem_free(buf
, size
);
2485 err
= dmu_object_info(drc
->drc_os
, drro
->drr_object
, &doi
);
2487 * See receive_read_prefetch for an explanation why we're
2488 * storing this object in the ignore_obj_list.
2490 if (err
== ENOENT
|| err
== EEXIST
||
2491 (err
== 0 && doi
.doi_data_block_size
!= drro
->drr_blksz
)) {
2492 objlist_insert(drc
->drc_ignore_objlist
,
2498 case DRR_FREEOBJECTS
:
2500 err
= receive_read_payload_and_next_header(drc
, 0, NULL
);
2505 struct drr_write
*drrw
= &drc
->drc_rrd
->header
.drr_u
.drr_write
;
2506 int size
= DRR_WRITE_PAYLOAD_SIZE(drrw
);
2507 abd_t
*abd
= abd_alloc_linear(size
, B_FALSE
);
2508 err
= receive_read_payload_and_next_header(drc
, size
,
2514 drc
->drc_rrd
->abd
= abd
;
2515 receive_read_prefetch(drc
, drrw
->drr_object
, drrw
->drr_offset
,
2516 drrw
->drr_logical_size
);
2519 case DRR_WRITE_EMBEDDED
:
2521 struct drr_write_embedded
*drrwe
=
2522 &drc
->drc_rrd
->header
.drr_u
.drr_write_embedded
;
2523 uint32_t size
= P2ROUNDUP(drrwe
->drr_psize
, 8);
2524 void *buf
= kmem_zalloc(size
, KM_SLEEP
);
2526 err
= receive_read_payload_and_next_header(drc
, size
, buf
);
2528 kmem_free(buf
, size
);
2532 receive_read_prefetch(drc
, drrwe
->drr_object
, drrwe
->drr_offset
,
2540 * It might be beneficial to prefetch indirect blocks here, but
2541 * we don't really have the data to decide for sure.
2543 err
= receive_read_payload_and_next_header(drc
, 0, NULL
);
2548 struct drr_end
*drre
= &drc
->drc_rrd
->header
.drr_u
.drr_end
;
2549 if (!ZIO_CHECKSUM_EQUAL(drc
->drc_prev_cksum
,
2550 drre
->drr_checksum
))
2551 return (SET_ERROR(ECKSUM
));
2556 struct drr_spill
*drrs
= &drc
->drc_rrd
->header
.drr_u
.drr_spill
;
2557 int size
= DRR_SPILL_PAYLOAD_SIZE(drrs
);
2558 abd_t
*abd
= abd_alloc_linear(size
, B_FALSE
);
2559 err
= receive_read_payload_and_next_header(drc
, size
,
2564 drc
->drc_rrd
->abd
= abd
;
2567 case DRR_OBJECT_RANGE
:
2569 err
= receive_read_payload_and_next_header(drc
, 0, NULL
);
2574 return (SET_ERROR(EINVAL
));
2581 dprintf_drr(struct receive_record_arg
*rrd
, int err
)
2584 switch (rrd
->header
.drr_type
) {
2587 struct drr_object
*drro
= &rrd
->header
.drr_u
.drr_object
;
2588 dprintf("drr_type = OBJECT obj = %llu type = %u "
2589 "bonustype = %u blksz = %u bonuslen = %u cksumtype = %u "
2590 "compress = %u dn_slots = %u err = %d\n",
2591 (u_longlong_t
)drro
->drr_object
, drro
->drr_type
,
2592 drro
->drr_bonustype
, drro
->drr_blksz
, drro
->drr_bonuslen
,
2593 drro
->drr_checksumtype
, drro
->drr_compress
,
2594 drro
->drr_dn_slots
, err
);
2597 case DRR_FREEOBJECTS
:
2599 struct drr_freeobjects
*drrfo
=
2600 &rrd
->header
.drr_u
.drr_freeobjects
;
2601 dprintf("drr_type = FREEOBJECTS firstobj = %llu "
2602 "numobjs = %llu err = %d\n",
2603 (u_longlong_t
)drrfo
->drr_firstobj
,
2604 (u_longlong_t
)drrfo
->drr_numobjs
, err
);
2609 struct drr_write
*drrw
= &rrd
->header
.drr_u
.drr_write
;
2610 dprintf("drr_type = WRITE obj = %llu type = %u offset = %llu "
2611 "lsize = %llu cksumtype = %u flags = %u "
2612 "compress = %u psize = %llu err = %d\n",
2613 (u_longlong_t
)drrw
->drr_object
, drrw
->drr_type
,
2614 (u_longlong_t
)drrw
->drr_offset
,
2615 (u_longlong_t
)drrw
->drr_logical_size
,
2616 drrw
->drr_checksumtype
, drrw
->drr_flags
,
2617 drrw
->drr_compressiontype
,
2618 (u_longlong_t
)drrw
->drr_compressed_size
, err
);
2621 case DRR_WRITE_BYREF
:
2623 struct drr_write_byref
*drrwbr
=
2624 &rrd
->header
.drr_u
.drr_write_byref
;
2625 dprintf("drr_type = WRITE_BYREF obj = %llu offset = %llu "
2626 "length = %llu toguid = %llx refguid = %llx "
2627 "refobject = %llu refoffset = %llu cksumtype = %u "
2628 "flags = %u err = %d\n",
2629 (u_longlong_t
)drrwbr
->drr_object
,
2630 (u_longlong_t
)drrwbr
->drr_offset
,
2631 (u_longlong_t
)drrwbr
->drr_length
,
2632 (u_longlong_t
)drrwbr
->drr_toguid
,
2633 (u_longlong_t
)drrwbr
->drr_refguid
,
2634 (u_longlong_t
)drrwbr
->drr_refobject
,
2635 (u_longlong_t
)drrwbr
->drr_refoffset
,
2636 drrwbr
->drr_checksumtype
, drrwbr
->drr_flags
, err
);
2639 case DRR_WRITE_EMBEDDED
:
2641 struct drr_write_embedded
*drrwe
=
2642 &rrd
->header
.drr_u
.drr_write_embedded
;
2643 dprintf("drr_type = WRITE_EMBEDDED obj = %llu offset = %llu "
2644 "length = %llu compress = %u etype = %u lsize = %u "
2645 "psize = %u err = %d\n",
2646 (u_longlong_t
)drrwe
->drr_object
,
2647 (u_longlong_t
)drrwe
->drr_offset
,
2648 (u_longlong_t
)drrwe
->drr_length
,
2649 drrwe
->drr_compression
, drrwe
->drr_etype
,
2650 drrwe
->drr_lsize
, drrwe
->drr_psize
, err
);
2655 struct drr_free
*drrf
= &rrd
->header
.drr_u
.drr_free
;
2656 dprintf("drr_type = FREE obj = %llu offset = %llu "
2657 "length = %lld err = %d\n",
2658 (u_longlong_t
)drrf
->drr_object
,
2659 (u_longlong_t
)drrf
->drr_offset
,
2660 (longlong_t
)drrf
->drr_length
,
2666 struct drr_spill
*drrs
= &rrd
->header
.drr_u
.drr_spill
;
2667 dprintf("drr_type = SPILL obj = %llu length = %llu "
2668 "err = %d\n", (u_longlong_t
)drrs
->drr_object
,
2669 (u_longlong_t
)drrs
->drr_length
, err
);
2672 case DRR_OBJECT_RANGE
:
2674 struct drr_object_range
*drror
=
2675 &rrd
->header
.drr_u
.drr_object_range
;
2676 dprintf("drr_type = OBJECT_RANGE firstobj = %llu "
2677 "numslots = %llu flags = %u err = %d\n",
2678 (u_longlong_t
)drror
->drr_firstobj
,
2679 (u_longlong_t
)drror
->drr_numslots
,
2680 drror
->drr_flags
, err
);
2690 * Commit the records to the pool.
2693 receive_process_record(struct receive_writer_arg
*rwa
,
2694 struct receive_record_arg
*rrd
)
2698 /* Processing in order, therefore bytes_read should be increasing. */
2699 ASSERT3U(rrd
->bytes_read
, >=, rwa
->bytes_read
);
2700 rwa
->bytes_read
= rrd
->bytes_read
;
2702 if (rrd
->header
.drr_type
!= DRR_WRITE
) {
2703 err
= flush_write_batch(rwa
);
2705 if (rrd
->abd
!= NULL
) {
2708 rrd
->payload
= NULL
;
2709 } else if (rrd
->payload
!= NULL
) {
2710 kmem_free(rrd
->payload
, rrd
->payload_size
);
2711 rrd
->payload
= NULL
;
2718 switch (rrd
->header
.drr_type
) {
2721 struct drr_object
*drro
= &rrd
->header
.drr_u
.drr_object
;
2722 err
= receive_object(rwa
, drro
, rrd
->payload
);
2723 kmem_free(rrd
->payload
, rrd
->payload_size
);
2724 rrd
->payload
= NULL
;
2727 case DRR_FREEOBJECTS
:
2729 struct drr_freeobjects
*drrfo
=
2730 &rrd
->header
.drr_u
.drr_freeobjects
;
2731 err
= receive_freeobjects(rwa
, drrfo
);
2736 err
= receive_process_write_record(rwa
, rrd
);
2737 if (err
!= EAGAIN
) {
2739 * On success, receive_process_write_record() returns
2740 * EAGAIN to indicate that we do not want to free
2741 * the rrd or arc_buf.
2749 case DRR_WRITE_EMBEDDED
:
2751 struct drr_write_embedded
*drrwe
=
2752 &rrd
->header
.drr_u
.drr_write_embedded
;
2753 err
= receive_write_embedded(rwa
, drrwe
, rrd
->payload
);
2754 kmem_free(rrd
->payload
, rrd
->payload_size
);
2755 rrd
->payload
= NULL
;
2760 struct drr_free
*drrf
= &rrd
->header
.drr_u
.drr_free
;
2761 err
= receive_free(rwa
, drrf
);
2766 struct drr_spill
*drrs
= &rrd
->header
.drr_u
.drr_spill
;
2767 err
= receive_spill(rwa
, drrs
, rrd
->abd
);
2771 rrd
->payload
= NULL
;
2774 case DRR_OBJECT_RANGE
:
2776 struct drr_object_range
*drror
=
2777 &rrd
->header
.drr_u
.drr_object_range
;
2778 err
= receive_object_range(rwa
, drror
);
2783 struct drr_redact
*drrr
= &rrd
->header
.drr_u
.drr_redact
;
2784 err
= receive_redact(rwa
, drrr
);
2788 err
= (SET_ERROR(EINVAL
));
2792 dprintf_drr(rrd
, err
);
2798 * dmu_recv_stream's worker thread; pull records off the queue, and then call
2799 * receive_process_record When we're done, signal the main thread and exit.
2802 receive_writer_thread(void *arg
)
2804 struct receive_writer_arg
*rwa
= arg
;
2805 struct receive_record_arg
*rrd
;
2806 fstrans_cookie_t cookie
= spl_fstrans_mark();
2808 for (rrd
= bqueue_dequeue(&rwa
->q
); !rrd
->eos_marker
;
2809 rrd
= bqueue_dequeue(&rwa
->q
)) {
2811 * If there's an error, the main thread will stop putting things
2812 * on the queue, but we need to clear everything in it before we
2816 if (rwa
->err
== 0) {
2817 err
= receive_process_record(rwa
, rrd
);
2818 } else if (rrd
->abd
!= NULL
) {
2821 rrd
->payload
= NULL
;
2822 } else if (rrd
->payload
!= NULL
) {
2823 kmem_free(rrd
->payload
, rrd
->payload_size
);
2824 rrd
->payload
= NULL
;
2827 * EAGAIN indicates that this record has been saved (on
2828 * raw->write_batch), and will be used again, so we don't
2831 if (err
!= EAGAIN
) {
2834 kmem_free(rrd
, sizeof (*rrd
));
2837 kmem_free(rrd
, sizeof (*rrd
));
2839 int err
= flush_write_batch(rwa
);
2843 mutex_enter(&rwa
->mutex
);
2845 cv_signal(&rwa
->cv
);
2846 mutex_exit(&rwa
->mutex
);
2847 spl_fstrans_unmark(cookie
);
2852 resume_check(dmu_recv_cookie_t
*drc
, nvlist_t
*begin_nvl
)
2855 objset_t
*mos
= dmu_objset_pool(drc
->drc_os
)->dp_meta_objset
;
2856 uint64_t dsobj
= dmu_objset_id(drc
->drc_os
);
2857 uint64_t resume_obj
, resume_off
;
2859 if (nvlist_lookup_uint64(begin_nvl
,
2860 "resume_object", &resume_obj
) != 0 ||
2861 nvlist_lookup_uint64(begin_nvl
,
2862 "resume_offset", &resume_off
) != 0) {
2863 return (SET_ERROR(EINVAL
));
2865 VERIFY0(zap_lookup(mos
, dsobj
,
2866 DS_FIELD_RESUME_OBJECT
, sizeof (val
), 1, &val
));
2867 if (resume_obj
!= val
)
2868 return (SET_ERROR(EINVAL
));
2869 VERIFY0(zap_lookup(mos
, dsobj
,
2870 DS_FIELD_RESUME_OFFSET
, sizeof (val
), 1, &val
));
2871 if (resume_off
!= val
)
2872 return (SET_ERROR(EINVAL
));
2878 * Read in the stream's records, one by one, and apply them to the pool. There
2879 * are two threads involved; the thread that calls this function will spin up a
2880 * worker thread, read the records off the stream one by one, and issue
2881 * prefetches for any necessary indirect blocks. It will then push the records
2882 * onto an internal blocking queue. The worker thread will pull the records off
2883 * the queue, and actually write the data into the DMU. This way, the worker
2884 * thread doesn't have to wait for reads to complete, since everything it needs
2885 * (the indirect blocks) will be prefetched.
2887 * NB: callers *must* call dmu_recv_end() if this succeeds.
2890 dmu_recv_stream(dmu_recv_cookie_t
*drc
, offset_t
*voffp
)
2893 struct receive_writer_arg
*rwa
= kmem_zalloc(sizeof (*rwa
), KM_SLEEP
);
2895 if (dsl_dataset_has_resume_receive_state(drc
->drc_ds
)) {
2897 (void) zap_lookup(drc
->drc_ds
->ds_dir
->dd_pool
->dp_meta_objset
,
2898 drc
->drc_ds
->ds_object
, DS_FIELD_RESUME_BYTES
,
2899 sizeof (bytes
), 1, &bytes
);
2900 drc
->drc_bytes_read
+= bytes
;
2903 drc
->drc_ignore_objlist
= objlist_create();
2905 /* these were verified in dmu_recv_begin */
2906 ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc
->drc_drrb
->drr_versioninfo
), ==,
2908 ASSERT3U(drc
->drc_drrb
->drr_type
, <, DMU_OST_NUMTYPES
);
2910 ASSERT(dsl_dataset_phys(drc
->drc_ds
)->ds_flags
& DS_FLAG_INCONSISTENT
);
2911 ASSERT0(drc
->drc_os
->os_encrypted
&&
2912 (drc
->drc_featureflags
& DMU_BACKUP_FEATURE_EMBED_DATA
));
2914 /* handle DSL encryption key payload */
2915 if (drc
->drc_featureflags
& DMU_BACKUP_FEATURE_RAW
) {
2916 nvlist_t
*keynvl
= NULL
;
2918 ASSERT(drc
->drc_os
->os_encrypted
);
2919 ASSERT(drc
->drc_raw
);
2921 err
= nvlist_lookup_nvlist(drc
->drc_begin_nvl
, "crypt_keydata",
2927 * If this is a new dataset we set the key immediately.
2928 * Otherwise we don't want to change the key until we
2929 * are sure the rest of the receive succeeded so we stash
2930 * the keynvl away until then.
2932 err
= dsl_crypto_recv_raw(spa_name(drc
->drc_os
->os_spa
),
2933 drc
->drc_ds
->ds_object
, drc
->drc_fromsnapobj
,
2934 drc
->drc_drrb
->drr_type
, keynvl
, drc
->drc_newfs
);
2938 /* see comment in dmu_recv_end_sync() */
2939 drc
->drc_ivset_guid
= 0;
2940 (void) nvlist_lookup_uint64(keynvl
, "to_ivset_guid",
2941 &drc
->drc_ivset_guid
);
2943 if (!drc
->drc_newfs
)
2944 drc
->drc_keynvl
= fnvlist_dup(keynvl
);
2947 if (drc
->drc_featureflags
& DMU_BACKUP_FEATURE_RESUMING
) {
2948 err
= resume_check(drc
, drc
->drc_begin_nvl
);
2954 * If we failed before this point we will clean up any new resume
2955 * state that was created. Now that we've gotten past the initial
2956 * checks we are ok to retain that resume state.
2958 drc
->drc_should_save
= B_TRUE
;
2960 (void) bqueue_init(&rwa
->q
, zfs_recv_queue_ff
,
2961 MAX(zfs_recv_queue_length
, 2 * zfs_max_recordsize
),
2962 offsetof(struct receive_record_arg
, node
));
2963 cv_init(&rwa
->cv
, NULL
, CV_DEFAULT
, NULL
);
2964 mutex_init(&rwa
->mutex
, NULL
, MUTEX_DEFAULT
, NULL
);
2965 rwa
->os
= drc
->drc_os
;
2966 rwa
->byteswap
= drc
->drc_byteswap
;
2967 rwa
->resumable
= drc
->drc_resumable
;
2968 rwa
->raw
= drc
->drc_raw
;
2969 rwa
->spill
= drc
->drc_spill
;
2970 rwa
->full
= (drc
->drc_drr_begin
->drr_u
.drr_begin
.drr_fromguid
== 0);
2971 rwa
->os
->os_raw_receive
= drc
->drc_raw
;
2972 list_create(&rwa
->write_batch
, sizeof (struct receive_record_arg
),
2973 offsetof(struct receive_record_arg
, node
.bqn_node
));
2975 (void) thread_create(NULL
, 0, receive_writer_thread
, rwa
, 0, curproc
,
2976 TS_RUN
, minclsyspri
);
2978 * We're reading rwa->err without locks, which is safe since we are the
2979 * only reader, and the worker thread is the only writer. It's ok if we
2980 * miss a write for an iteration or two of the loop, since the writer
2981 * thread will keep freeing records we send it until we send it an eos
2984 * We can leave this loop in 3 ways: First, if rwa->err is
2985 * non-zero. In that case, the writer thread will free the rrd we just
2986 * pushed. Second, if we're interrupted; in that case, either it's the
2987 * first loop and drc->drc_rrd was never allocated, or it's later, and
2988 * drc->drc_rrd has been handed off to the writer thread who will free
2989 * it. Finally, if receive_read_record fails or we're at the end of the
2990 * stream, then we free drc->drc_rrd and exit.
2992 while (rwa
->err
== 0) {
2993 if (issig(JUSTLOOKING
) && issig(FORREAL
)) {
2994 err
= SET_ERROR(EINTR
);
2998 ASSERT3P(drc
->drc_rrd
, ==, NULL
);
2999 drc
->drc_rrd
= drc
->drc_next_rrd
;
3000 drc
->drc_next_rrd
= NULL
;
3001 /* Allocates and loads header into drc->drc_next_rrd */
3002 err
= receive_read_record(drc
);
3004 if (drc
->drc_rrd
->header
.drr_type
== DRR_END
|| err
!= 0) {
3005 kmem_free(drc
->drc_rrd
, sizeof (*drc
->drc_rrd
));
3006 drc
->drc_rrd
= NULL
;
3010 bqueue_enqueue(&rwa
->q
, drc
->drc_rrd
,
3011 sizeof (struct receive_record_arg
) +
3012 drc
->drc_rrd
->payload_size
);
3013 drc
->drc_rrd
= NULL
;
3016 ASSERT3P(drc
->drc_rrd
, ==, NULL
);
3017 drc
->drc_rrd
= kmem_zalloc(sizeof (*drc
->drc_rrd
), KM_SLEEP
);
3018 drc
->drc_rrd
->eos_marker
= B_TRUE
;
3019 bqueue_enqueue_flush(&rwa
->q
, drc
->drc_rrd
, 1);
3021 mutex_enter(&rwa
->mutex
);
3022 while (!rwa
->done
) {
3024 * We need to use cv_wait_sig() so that any process that may
3025 * be sleeping here can still fork.
3027 (void) cv_wait_sig(&rwa
->cv
, &rwa
->mutex
);
3029 mutex_exit(&rwa
->mutex
);
3032 * If we are receiving a full stream as a clone, all object IDs which
3033 * are greater than the maximum ID referenced in the stream are
3034 * by definition unused and must be freed.
3036 if (drc
->drc_clone
&& drc
->drc_drrb
->drr_fromguid
== 0) {
3037 uint64_t obj
= rwa
->max_object
+ 1;
3041 while (next_err
== 0) {
3042 free_err
= dmu_free_long_object(rwa
->os
, obj
);
3043 if (free_err
!= 0 && free_err
!= ENOENT
)
3046 next_err
= dmu_object_next(rwa
->os
, &obj
, FALSE
, 0);
3050 if (free_err
!= 0 && free_err
!= ENOENT
)
3052 else if (next_err
!= ESRCH
)
3057 cv_destroy(&rwa
->cv
);
3058 mutex_destroy(&rwa
->mutex
);
3059 bqueue_destroy(&rwa
->q
);
3060 list_destroy(&rwa
->write_batch
);
3066 * If we hit an error before we started the receive_writer_thread
3067 * we need to clean up the next_rrd we create by processing the
3070 if (drc
->drc_next_rrd
!= NULL
)
3071 kmem_free(drc
->drc_next_rrd
, sizeof (*drc
->drc_next_rrd
));
3074 * The objset will be invalidated by dmu_recv_end() when we do
3075 * dsl_dataset_clone_swap_sync_impl().
3079 kmem_free(rwa
, sizeof (*rwa
));
3080 nvlist_free(drc
->drc_begin_nvl
);
3084 * Clean up references. If receive is not resumable,
3085 * destroy what we created, so we don't leave it in
3086 * the inconsistent state.
3088 dmu_recv_cleanup_ds(drc
);
3089 nvlist_free(drc
->drc_keynvl
);
3092 objlist_destroy(drc
->drc_ignore_objlist
);
3093 drc
->drc_ignore_objlist
= NULL
;
3094 *voffp
= drc
->drc_voff
;
3099 dmu_recv_end_check(void *arg
, dmu_tx_t
*tx
)
3101 dmu_recv_cookie_t
*drc
= arg
;
3102 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
3105 ASSERT3P(drc
->drc_ds
->ds_owner
, ==, dmu_recv_tag
);
3107 if (!drc
->drc_newfs
) {
3108 dsl_dataset_t
*origin_head
;
3110 error
= dsl_dataset_hold(dp
, drc
->drc_tofs
, FTAG
, &origin_head
);
3113 if (drc
->drc_force
) {
3115 * We will destroy any snapshots in tofs (i.e. before
3116 * origin_head) that are after the origin (which is
3117 * the snap before drc_ds, because drc_ds can not
3118 * have any snaps of its own).
3122 obj
= dsl_dataset_phys(origin_head
)->ds_prev_snap_obj
;
3124 dsl_dataset_phys(drc
->drc_ds
)->ds_prev_snap_obj
) {
3125 dsl_dataset_t
*snap
;
3126 error
= dsl_dataset_hold_obj(dp
, obj
, FTAG
,
3130 if (snap
->ds_dir
!= origin_head
->ds_dir
)
3131 error
= SET_ERROR(EINVAL
);
3133 error
= dsl_destroy_snapshot_check_impl(
3136 obj
= dsl_dataset_phys(snap
)->ds_prev_snap_obj
;
3137 dsl_dataset_rele(snap
, FTAG
);
3142 dsl_dataset_rele(origin_head
, FTAG
);
3146 if (drc
->drc_keynvl
!= NULL
) {
3147 error
= dsl_crypto_recv_raw_key_check(drc
->drc_ds
,
3148 drc
->drc_keynvl
, tx
);
3150 dsl_dataset_rele(origin_head
, FTAG
);
3155 error
= dsl_dataset_clone_swap_check_impl(drc
->drc_ds
,
3156 origin_head
, drc
->drc_force
, drc
->drc_owner
, tx
);
3158 dsl_dataset_rele(origin_head
, FTAG
);
3161 error
= dsl_dataset_snapshot_check_impl(origin_head
,
3162 drc
->drc_tosnap
, tx
, B_TRUE
, 1,
3163 drc
->drc_cred
, drc
->drc_proc
);
3164 dsl_dataset_rele(origin_head
, FTAG
);
3168 error
= dsl_destroy_head_check_impl(drc
->drc_ds
, 1);
3170 error
= dsl_dataset_snapshot_check_impl(drc
->drc_ds
,
3171 drc
->drc_tosnap
, tx
, B_TRUE
, 1,
3172 drc
->drc_cred
, drc
->drc_proc
);
3178 dmu_recv_end_sync(void *arg
, dmu_tx_t
*tx
)
3180 dmu_recv_cookie_t
*drc
= arg
;
3181 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
3182 boolean_t encrypted
= drc
->drc_ds
->ds_dir
->dd_crypto_obj
!= 0;
3183 uint64_t newsnapobj
;
3185 spa_history_log_internal_ds(drc
->drc_ds
, "finish receiving",
3186 tx
, "snap=%s", drc
->drc_tosnap
);
3187 drc
->drc_ds
->ds_objset
->os_raw_receive
= B_FALSE
;
3189 if (!drc
->drc_newfs
) {
3190 dsl_dataset_t
*origin_head
;
3192 VERIFY0(dsl_dataset_hold(dp
, drc
->drc_tofs
, FTAG
,
3195 if (drc
->drc_force
) {
3197 * Destroy any snapshots of drc_tofs (origin_head)
3198 * after the origin (the snap before drc_ds).
3202 obj
= dsl_dataset_phys(origin_head
)->ds_prev_snap_obj
;
3204 dsl_dataset_phys(drc
->drc_ds
)->ds_prev_snap_obj
) {
3205 dsl_dataset_t
*snap
;
3206 VERIFY0(dsl_dataset_hold_obj(dp
, obj
, FTAG
,
3208 ASSERT3P(snap
->ds_dir
, ==, origin_head
->ds_dir
);
3209 obj
= dsl_dataset_phys(snap
)->ds_prev_snap_obj
;
3210 dsl_destroy_snapshot_sync_impl(snap
,
3212 dsl_dataset_rele(snap
, FTAG
);
3215 if (drc
->drc_keynvl
!= NULL
) {
3216 dsl_crypto_recv_raw_key_sync(drc
->drc_ds
,
3217 drc
->drc_keynvl
, tx
);
3218 nvlist_free(drc
->drc_keynvl
);
3219 drc
->drc_keynvl
= NULL
;
3222 VERIFY3P(drc
->drc_ds
->ds_prev
, ==,
3223 origin_head
->ds_prev
);
3225 dsl_dataset_clone_swap_sync_impl(drc
->drc_ds
,
3228 * The objset was evicted by dsl_dataset_clone_swap_sync_impl,
3229 * so drc_os is no longer valid.
3233 dsl_dataset_snapshot_sync_impl(origin_head
,
3234 drc
->drc_tosnap
, tx
);
3236 /* set snapshot's creation time and guid */
3237 dmu_buf_will_dirty(origin_head
->ds_prev
->ds_dbuf
, tx
);
3238 dsl_dataset_phys(origin_head
->ds_prev
)->ds_creation_time
=
3239 drc
->drc_drrb
->drr_creation_time
;
3240 dsl_dataset_phys(origin_head
->ds_prev
)->ds_guid
=
3241 drc
->drc_drrb
->drr_toguid
;
3242 dsl_dataset_phys(origin_head
->ds_prev
)->ds_flags
&=
3243 ~DS_FLAG_INCONSISTENT
;
3245 dmu_buf_will_dirty(origin_head
->ds_dbuf
, tx
);
3246 dsl_dataset_phys(origin_head
)->ds_flags
&=
3247 ~DS_FLAG_INCONSISTENT
;
3250 dsl_dataset_phys(origin_head
)->ds_prev_snap_obj
;
3252 dsl_dataset_rele(origin_head
, FTAG
);
3253 dsl_destroy_head_sync_impl(drc
->drc_ds
, tx
);
3255 if (drc
->drc_owner
!= NULL
)
3256 VERIFY3P(origin_head
->ds_owner
, ==, drc
->drc_owner
);
3258 dsl_dataset_t
*ds
= drc
->drc_ds
;
3260 dsl_dataset_snapshot_sync_impl(ds
, drc
->drc_tosnap
, tx
);
3262 /* set snapshot's creation time and guid */
3263 dmu_buf_will_dirty(ds
->ds_prev
->ds_dbuf
, tx
);
3264 dsl_dataset_phys(ds
->ds_prev
)->ds_creation_time
=
3265 drc
->drc_drrb
->drr_creation_time
;
3266 dsl_dataset_phys(ds
->ds_prev
)->ds_guid
=
3267 drc
->drc_drrb
->drr_toguid
;
3268 dsl_dataset_phys(ds
->ds_prev
)->ds_flags
&=
3269 ~DS_FLAG_INCONSISTENT
;
3271 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
3272 dsl_dataset_phys(ds
)->ds_flags
&= ~DS_FLAG_INCONSISTENT
;
3273 if (dsl_dataset_has_resume_receive_state(ds
)) {
3274 (void) zap_remove(dp
->dp_meta_objset
, ds
->ds_object
,
3275 DS_FIELD_RESUME_FROMGUID
, tx
);
3276 (void) zap_remove(dp
->dp_meta_objset
, ds
->ds_object
,
3277 DS_FIELD_RESUME_OBJECT
, tx
);
3278 (void) zap_remove(dp
->dp_meta_objset
, ds
->ds_object
,
3279 DS_FIELD_RESUME_OFFSET
, tx
);
3280 (void) zap_remove(dp
->dp_meta_objset
, ds
->ds_object
,
3281 DS_FIELD_RESUME_BYTES
, tx
);
3282 (void) zap_remove(dp
->dp_meta_objset
, ds
->ds_object
,
3283 DS_FIELD_RESUME_TOGUID
, tx
);
3284 (void) zap_remove(dp
->dp_meta_objset
, ds
->ds_object
,
3285 DS_FIELD_RESUME_TONAME
, tx
);
3286 (void) zap_remove(dp
->dp_meta_objset
, ds
->ds_object
,
3287 DS_FIELD_RESUME_REDACT_BOOKMARK_SNAPS
, tx
);
3290 dsl_dataset_phys(drc
->drc_ds
)->ds_prev_snap_obj
;
3294 * If this is a raw receive, the crypt_keydata nvlist will include
3295 * a to_ivset_guid for us to set on the new snapshot. This value
3296 * will override the value generated by the snapshot code. However,
3297 * this value may not be present, because older implementations of
3298 * the raw send code did not include this value, and we are still
3299 * allowed to receive them if the zfs_disable_ivset_guid_check
3300 * tunable is set, in which case we will leave the newly-generated
3303 if (drc
->drc_raw
&& drc
->drc_ivset_guid
!= 0) {
3304 dmu_object_zapify(dp
->dp_meta_objset
, newsnapobj
,
3305 DMU_OT_DSL_DATASET
, tx
);
3306 VERIFY0(zap_update(dp
->dp_meta_objset
, newsnapobj
,
3307 DS_FIELD_IVSET_GUID
, sizeof (uint64_t), 1,
3308 &drc
->drc_ivset_guid
, tx
));
3312 * Release the hold from dmu_recv_begin. This must be done before
3313 * we return to open context, so that when we free the dataset's dnode
3314 * we can evict its bonus buffer. Since the dataset may be destroyed
3315 * at this point (and therefore won't have a valid pointer to the spa)
3316 * we release the key mapping manually here while we do have a valid
3317 * pointer, if it exists.
3319 if (!drc
->drc_raw
&& encrypted
) {
3320 (void) spa_keystore_remove_mapping(dmu_tx_pool(tx
)->dp_spa
,
3321 drc
->drc_ds
->ds_object
, drc
->drc_ds
);
3323 dsl_dataset_disown(drc
->drc_ds
, 0, dmu_recv_tag
);
3327 static int dmu_recv_end_modified_blocks
= 3;
3330 dmu_recv_existing_end(dmu_recv_cookie_t
*drc
)
3334 * We will be destroying the ds; make sure its origin is unmounted if
3337 char name
[ZFS_MAX_DATASET_NAME_LEN
];
3338 dsl_dataset_name(drc
->drc_ds
, name
);
3339 zfs_destroy_unmount_origin(name
);
3342 return (dsl_sync_task(drc
->drc_tofs
,
3343 dmu_recv_end_check
, dmu_recv_end_sync
, drc
,
3344 dmu_recv_end_modified_blocks
, ZFS_SPACE_CHECK_NORMAL
));
3348 dmu_recv_new_end(dmu_recv_cookie_t
*drc
)
3350 return (dsl_sync_task(drc
->drc_tofs
,
3351 dmu_recv_end_check
, dmu_recv_end_sync
, drc
,
3352 dmu_recv_end_modified_blocks
, ZFS_SPACE_CHECK_NORMAL
));
3356 dmu_recv_end(dmu_recv_cookie_t
*drc
, void *owner
)
3360 drc
->drc_owner
= owner
;
3363 error
= dmu_recv_new_end(drc
);
3365 error
= dmu_recv_existing_end(drc
);
3368 dmu_recv_cleanup_ds(drc
);
3369 nvlist_free(drc
->drc_keynvl
);
3371 if (drc
->drc_newfs
) {
3372 zvol_create_minor(drc
->drc_tofs
);
3374 char *snapname
= kmem_asprintf("%s@%s",
3375 drc
->drc_tofs
, drc
->drc_tosnap
);
3376 zvol_create_minor(snapname
);
3377 kmem_strfree(snapname
);
3383 * Return TRUE if this objset is currently being received into.
3386 dmu_objset_is_receiving(objset_t
*os
)
3388 return (os
->os_dsl_dataset
!= NULL
&&
3389 os
->os_dsl_dataset
->ds_owner
== dmu_recv_tag
);
3393 ZFS_MODULE_PARAM(zfs_recv
, zfs_recv_
, queue_length
, INT
, ZMOD_RW
,
3394 "Maximum receive queue length");
3396 ZFS_MODULE_PARAM(zfs_recv
, zfs_recv_
, queue_ff
, INT
, ZMOD_RW
,
3397 "Receive queue fill fraction");
3399 ZFS_MODULE_PARAM(zfs_recv
, zfs_recv_
, write_batch_size
, INT
, ZMOD_RW
,
3400 "Maximum amount of writes to batch into one transaction");