4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
25 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
26 * Copyright 2014 HybridCluster. All rights reserved.
27 * Copyright 2016 RackTop Systems.
28 * Copyright (c) 2016 Actifio, Inc. All rights reserved.
29 * Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
33 #include <sys/dmu_impl.h>
34 #include <sys/dmu_tx.h>
36 #include <sys/dnode.h>
37 #include <sys/zfs_context.h>
38 #include <sys/dmu_objset.h>
39 #include <sys/dmu_traverse.h>
40 #include <sys/dsl_dataset.h>
41 #include <sys/dsl_dir.h>
42 #include <sys/dsl_prop.h>
43 #include <sys/dsl_pool.h>
44 #include <sys/dsl_synctask.h>
45 #include <sys/spa_impl.h>
46 #include <sys/zfs_ioctl.h>
48 #include <sys/zio_checksum.h>
49 #include <sys/zfs_znode.h>
50 #include <zfs_fletcher.h>
53 #include <sys/zfs_onexit.h>
54 #include <sys/dmu_recv.h>
55 #include <sys/dsl_destroy.h>
56 #include <sys/blkptr.h>
57 #include <sys/dsl_bookmark.h>
58 #include <sys/zfeature.h>
59 #include <sys/bqueue.h>
61 #include <sys/policy.h>
63 int zfs_recv_queue_length
= SPA_MAXBLOCKSIZE
;
65 static char *dmu_recv_tag
= "dmu_recv_tag";
66 const char *recv_clone_name
= "%recv";
68 static void byteswap_record(dmu_replay_record_t
*drr
);
70 typedef struct dmu_recv_begin_arg
{
71 const char *drba_origin
;
72 dmu_recv_cookie_t
*drba_cookie
;
74 dsl_crypto_params_t
*drba_dcp
;
75 } dmu_recv_begin_arg_t
;
78 recv_begin_check_existing_impl(dmu_recv_begin_arg_t
*drba
, dsl_dataset_t
*ds
,
79 uint64_t fromguid
, uint64_t featureflags
)
84 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
85 boolean_t encrypted
= ds
->ds_dir
->dd_crypto_obj
!= 0;
86 boolean_t raw
= (featureflags
& DMU_BACKUP_FEATURE_RAW
) != 0;
87 boolean_t embed
= (featureflags
& DMU_BACKUP_FEATURE_EMBED_DATA
) != 0;
89 /* temporary clone name must not exist */
90 error
= zap_lookup(dp
->dp_meta_objset
,
91 dsl_dir_phys(ds
->ds_dir
)->dd_child_dir_zapobj
, recv_clone_name
,
94 return (error
== 0 ? EBUSY
: error
);
96 /* new snapshot name must not exist */
97 error
= zap_lookup(dp
->dp_meta_objset
,
98 dsl_dataset_phys(ds
)->ds_snapnames_zapobj
,
99 drba
->drba_cookie
->drc_tosnap
, 8, 1, &val
);
101 return (error
== 0 ? EEXIST
: error
);
103 /* must not have children if receiving a ZVOL */
104 error
= zap_count(dp
->dp_meta_objset
,
105 dsl_dir_phys(ds
->ds_dir
)->dd_child_dir_zapobj
, &children
);
108 if (drba
->drba_cookie
->drc_drrb
->drr_type
!= DMU_OST_ZFS
&&
110 return (SET_ERROR(ZFS_ERR_WRONG_PARENT
));
113 * Check snapshot limit before receiving. We'll recheck again at the
114 * end, but might as well abort before receiving if we're already over
117 * Note that we do not check the file system limit with
118 * dsl_dir_fscount_check because the temporary %clones don't count
119 * against that limit.
121 error
= dsl_fs_ss_limit_check(ds
->ds_dir
, 1, ZFS_PROP_SNAPSHOT_LIMIT
,
122 NULL
, drba
->drba_cred
);
128 uint64_t obj
= dsl_dataset_phys(ds
)->ds_prev_snap_obj
;
130 /* Can't raw receive on top of an unencrypted dataset */
131 if (!encrypted
&& raw
)
132 return (SET_ERROR(EINVAL
));
134 /* Encryption is incompatible with embedded data */
135 if (encrypted
&& embed
)
136 return (SET_ERROR(EINVAL
));
138 /* Find snapshot in this dir that matches fromguid. */
140 error
= dsl_dataset_hold_obj(dp
, obj
, FTAG
,
143 return (SET_ERROR(ENODEV
));
144 if (snap
->ds_dir
!= ds
->ds_dir
) {
145 dsl_dataset_rele(snap
, FTAG
);
146 return (SET_ERROR(ENODEV
));
148 if (dsl_dataset_phys(snap
)->ds_guid
== fromguid
)
150 obj
= dsl_dataset_phys(snap
)->ds_prev_snap_obj
;
151 dsl_dataset_rele(snap
, FTAG
);
154 return (SET_ERROR(ENODEV
));
156 if (drba
->drba_cookie
->drc_force
) {
157 drba
->drba_cookie
->drc_fromsnapobj
= obj
;
160 * If we are not forcing, there must be no
161 * changes since fromsnap.
163 if (dsl_dataset_modified_since_snap(ds
, snap
)) {
164 dsl_dataset_rele(snap
, FTAG
);
165 return (SET_ERROR(ETXTBSY
));
167 drba
->drba_cookie
->drc_fromsnapobj
=
168 ds
->ds_prev
->ds_object
;
171 dsl_dataset_rele(snap
, FTAG
);
173 /* if full, then must be forced */
174 if (!drba
->drba_cookie
->drc_force
)
175 return (SET_ERROR(EEXIST
));
178 * We don't support using zfs recv -F to blow away
179 * encrypted filesystems. This would require the
180 * dsl dir to point to the old encryption key and
181 * the new one at the same time during the receive.
183 if ((!encrypted
&& raw
) || encrypted
)
184 return (SET_ERROR(EINVAL
));
187 * Perform the same encryption checks we would if
188 * we were creating a new dataset from scratch.
191 boolean_t will_encrypt
;
193 error
= dmu_objset_create_crypt_check(
194 ds
->ds_dir
->dd_parent
, drba
->drba_dcp
,
199 if (will_encrypt
&& embed
)
200 return (SET_ERROR(EINVAL
));
203 drba
->drba_cookie
->drc_fromsnapobj
= 0;
211 dmu_recv_begin_check(void *arg
, dmu_tx_t
*tx
)
213 dmu_recv_begin_arg_t
*drba
= arg
;
214 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
215 struct drr_begin
*drrb
= drba
->drba_cookie
->drc_drrb
;
216 uint64_t fromguid
= drrb
->drr_fromguid
;
217 int flags
= drrb
->drr_flags
;
218 ds_hold_flags_t dsflags
= 0;
220 uint64_t featureflags
= DMU_GET_FEATUREFLAGS(drrb
->drr_versioninfo
);
222 const char *tofs
= drba
->drba_cookie
->drc_tofs
;
224 /* already checked */
225 ASSERT3U(drrb
->drr_magic
, ==, DMU_BACKUP_MAGIC
);
226 ASSERT(!(featureflags
& DMU_BACKUP_FEATURE_RESUMING
));
228 if (DMU_GET_STREAM_HDRTYPE(drrb
->drr_versioninfo
) ==
229 DMU_COMPOUNDSTREAM
||
230 drrb
->drr_type
>= DMU_OST_NUMTYPES
||
231 ((flags
& DRR_FLAG_CLONE
) && drba
->drba_origin
== NULL
))
232 return (SET_ERROR(EINVAL
));
234 /* Verify pool version supports SA if SA_SPILL feature set */
235 if ((featureflags
& DMU_BACKUP_FEATURE_SA_SPILL
) &&
236 spa_version(dp
->dp_spa
) < SPA_VERSION_SA
)
237 return (SET_ERROR(ENOTSUP
));
239 if (drba
->drba_cookie
->drc_resumable
&&
240 !spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_EXTENSIBLE_DATASET
))
241 return (SET_ERROR(ENOTSUP
));
244 * The receiving code doesn't know how to translate a WRITE_EMBEDDED
245 * record to a plain WRITE record, so the pool must have the
246 * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED
247 * records. Same with WRITE_EMBEDDED records that use LZ4 compression.
249 if ((featureflags
& DMU_BACKUP_FEATURE_EMBED_DATA
) &&
250 !spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_EMBEDDED_DATA
))
251 return (SET_ERROR(ENOTSUP
));
252 if ((featureflags
& DMU_BACKUP_FEATURE_LZ4
) &&
253 !spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_LZ4_COMPRESS
))
254 return (SET_ERROR(ENOTSUP
));
257 * The receiving code doesn't know how to translate large blocks
258 * to smaller ones, so the pool must have the LARGE_BLOCKS
259 * feature enabled if the stream has LARGE_BLOCKS. Same with
262 if ((featureflags
& DMU_BACKUP_FEATURE_LARGE_BLOCKS
) &&
263 !spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_LARGE_BLOCKS
))
264 return (SET_ERROR(ENOTSUP
));
265 if ((featureflags
& DMU_BACKUP_FEATURE_LARGE_DNODE
) &&
266 !spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_LARGE_DNODE
))
267 return (SET_ERROR(ENOTSUP
));
269 if (featureflags
& DMU_BACKUP_FEATURE_RAW
) {
270 /* raw receives require the encryption feature */
271 if (!spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_ENCRYPTION
))
272 return (SET_ERROR(ENOTSUP
));
274 /* embedded data is incompatible with encryption and raw recv */
275 if (featureflags
& DMU_BACKUP_FEATURE_EMBED_DATA
)
276 return (SET_ERROR(EINVAL
));
278 /* raw receives require spill block allocation flag */
279 if (!(flags
& DRR_FLAG_SPILL_BLOCK
))
280 return (SET_ERROR(ZFS_ERR_SPILL_BLOCK_FLAG_MISSING
));
282 dsflags
|= DS_HOLD_FLAG_DECRYPT
;
285 error
= dsl_dataset_hold_flags(dp
, tofs
, dsflags
, FTAG
, &ds
);
287 /* target fs already exists; recv into temp clone */
289 /* Can't recv a clone into an existing fs */
290 if (flags
& DRR_FLAG_CLONE
|| drba
->drba_origin
) {
291 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
292 return (SET_ERROR(EINVAL
));
295 error
= recv_begin_check_existing_impl(drba
, ds
, fromguid
,
297 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
298 } else if (error
== ENOENT
) {
299 /* target fs does not exist; must be a full backup or clone */
300 char buf
[ZFS_MAX_DATASET_NAME_LEN
];
304 * If it's a non-clone incremental, we are missing the
305 * target fs, so fail the recv.
307 if (fromguid
!= 0 && !(flags
& DRR_FLAG_CLONE
||
309 return (SET_ERROR(ENOENT
));
312 * If we're receiving a full send as a clone, and it doesn't
313 * contain all the necessary free records and freeobject
314 * records, reject it.
316 if (fromguid
== 0 && drba
->drba_origin
&&
317 !(flags
& DRR_FLAG_FREERECORDS
))
318 return (SET_ERROR(EINVAL
));
320 /* Open the parent of tofs */
321 ASSERT3U(strlen(tofs
), <, sizeof (buf
));
322 (void) strlcpy(buf
, tofs
, strrchr(tofs
, '/') - tofs
+ 1);
323 error
= dsl_dataset_hold_flags(dp
, buf
, dsflags
, FTAG
, &ds
);
327 if ((featureflags
& DMU_BACKUP_FEATURE_RAW
) == 0 &&
328 drba
->drba_origin
== NULL
) {
329 boolean_t will_encrypt
;
332 * Check that we aren't breaking any encryption rules
333 * and that we have all the parameters we need to
334 * create an encrypted dataset if necessary. If we are
335 * making an encrypted dataset the stream can't have
338 error
= dmu_objset_create_crypt_check(ds
->ds_dir
,
339 drba
->drba_dcp
, &will_encrypt
);
341 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
346 (featureflags
& DMU_BACKUP_FEATURE_EMBED_DATA
)) {
347 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
348 return (SET_ERROR(EINVAL
));
353 * Check filesystem and snapshot limits before receiving. We'll
354 * recheck snapshot limits again at the end (we create the
355 * filesystems and increment those counts during begin_sync).
357 error
= dsl_fs_ss_limit_check(ds
->ds_dir
, 1,
358 ZFS_PROP_FILESYSTEM_LIMIT
, NULL
, drba
->drba_cred
);
360 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
364 error
= dsl_fs_ss_limit_check(ds
->ds_dir
, 1,
365 ZFS_PROP_SNAPSHOT_LIMIT
, NULL
, drba
->drba_cred
);
367 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
371 /* can't recv below anything but filesystems (eg. no ZVOLs) */
372 error
= dmu_objset_from_ds(ds
, &os
);
374 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
377 if (dmu_objset_type(os
) != DMU_OST_ZFS
) {
378 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
379 return (SET_ERROR(ZFS_ERR_WRONG_PARENT
));
382 if (drba
->drba_origin
!= NULL
) {
383 dsl_dataset_t
*origin
;
385 error
= dsl_dataset_hold_flags(dp
, drba
->drba_origin
,
386 dsflags
, FTAG
, &origin
);
388 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
391 if (!origin
->ds_is_snapshot
) {
392 dsl_dataset_rele_flags(origin
, dsflags
, FTAG
);
393 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
394 return (SET_ERROR(EINVAL
));
396 if (dsl_dataset_phys(origin
)->ds_guid
!= fromguid
&&
398 dsl_dataset_rele_flags(origin
, dsflags
, FTAG
);
399 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
400 return (SET_ERROR(ENODEV
));
402 if (origin
->ds_dir
->dd_crypto_obj
!= 0 &&
403 (featureflags
& DMU_BACKUP_FEATURE_EMBED_DATA
)) {
404 dsl_dataset_rele_flags(origin
, dsflags
, FTAG
);
405 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
406 return (SET_ERROR(EINVAL
));
408 dsl_dataset_rele_flags(origin
,
412 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
419 dmu_recv_begin_sync(void *arg
, dmu_tx_t
*tx
)
421 dmu_recv_begin_arg_t
*drba
= arg
;
422 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
423 objset_t
*mos
= dp
->dp_meta_objset
;
424 struct drr_begin
*drrb
= drba
->drba_cookie
->drc_drrb
;
425 const char *tofs
= drba
->drba_cookie
->drc_tofs
;
426 uint64_t featureflags
= DMU_GET_FEATUREFLAGS(drrb
->drr_versioninfo
);
427 dsl_dataset_t
*ds
, *newds
;
430 ds_hold_flags_t dsflags
= 0;
432 uint64_t crflags
= 0;
433 dsl_crypto_params_t dummy_dcp
= { 0 };
434 dsl_crypto_params_t
*dcp
= drba
->drba_dcp
;
436 if (drrb
->drr_flags
& DRR_FLAG_CI_DATA
)
437 crflags
|= DS_FLAG_CI_DATASET
;
439 if ((featureflags
& DMU_BACKUP_FEATURE_RAW
) == 0)
440 dsflags
|= DS_HOLD_FLAG_DECRYPT
;
443 * Raw, non-incremental recvs always use a dummy dcp with
444 * the raw cmd set. Raw incremental recvs do not use a dcp
445 * since the encryption parameters are already set in stone.
447 if (dcp
== NULL
&& drba
->drba_cookie
->drc_fromsnapobj
== 0 &&
448 drba
->drba_origin
== NULL
) {
449 ASSERT3P(dcp
, ==, NULL
);
452 if (featureflags
& DMU_BACKUP_FEATURE_RAW
)
453 dcp
->cp_cmd
= DCP_CMD_RAW_RECV
;
456 error
= dsl_dataset_hold_flags(dp
, tofs
, dsflags
, FTAG
, &ds
);
458 /* create temporary clone */
459 dsl_dataset_t
*snap
= NULL
;
461 if (drba
->drba_cookie
->drc_fromsnapobj
!= 0) {
462 VERIFY0(dsl_dataset_hold_obj(dp
,
463 drba
->drba_cookie
->drc_fromsnapobj
, FTAG
, &snap
));
464 ASSERT3P(dcp
, ==, NULL
);
467 dsobj
= dsl_dataset_create_sync(ds
->ds_dir
, recv_clone_name
,
468 snap
, crflags
, drba
->drba_cred
, dcp
, tx
);
469 if (drba
->drba_cookie
->drc_fromsnapobj
!= 0)
470 dsl_dataset_rele(snap
, FTAG
);
471 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
475 dsl_dataset_t
*origin
= NULL
;
477 VERIFY0(dsl_dir_hold(dp
, tofs
, FTAG
, &dd
, &tail
));
479 if (drba
->drba_origin
!= NULL
) {
480 VERIFY0(dsl_dataset_hold(dp
, drba
->drba_origin
,
482 ASSERT3P(dcp
, ==, NULL
);
485 /* Create new dataset. */
486 dsobj
= dsl_dataset_create_sync(dd
, strrchr(tofs
, '/') + 1,
487 origin
, crflags
, drba
->drba_cred
, dcp
, tx
);
489 dsl_dataset_rele(origin
, FTAG
);
490 dsl_dir_rele(dd
, FTAG
);
491 drba
->drba_cookie
->drc_newfs
= B_TRUE
;
494 VERIFY0(dsl_dataset_own_obj(dp
, dsobj
, dsflags
, dmu_recv_tag
, &newds
));
495 VERIFY0(dmu_objset_from_ds(newds
, &os
));
497 if (drba
->drba_cookie
->drc_resumable
) {
498 dsl_dataset_zapify(newds
, tx
);
499 if (drrb
->drr_fromguid
!= 0) {
500 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_FROMGUID
,
501 8, 1, &drrb
->drr_fromguid
, tx
));
503 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_TOGUID
,
504 8, 1, &drrb
->drr_toguid
, tx
));
505 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_TONAME
,
506 1, strlen(drrb
->drr_toname
) + 1, drrb
->drr_toname
, tx
));
509 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_OBJECT
,
511 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_OFFSET
,
513 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_BYTES
,
515 if (featureflags
& DMU_BACKUP_FEATURE_LARGE_BLOCKS
) {
516 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_LARGEBLOCK
,
519 if (featureflags
& DMU_BACKUP_FEATURE_EMBED_DATA
) {
520 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_EMBEDOK
,
523 if (featureflags
& DMU_BACKUP_FEATURE_COMPRESSED
) {
524 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_COMPRESSOK
,
527 if (featureflags
& DMU_BACKUP_FEATURE_RAW
) {
528 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_RAWOK
,
534 * Usually the os->os_encrypted value is tied to the presence of a
535 * DSL Crypto Key object in the dd. However, that will not be received
536 * until dmu_recv_stream(), so we set the value manually for now.
538 if (featureflags
& DMU_BACKUP_FEATURE_RAW
) {
539 os
->os_encrypted
= B_TRUE
;
540 drba
->drba_cookie
->drc_raw
= B_TRUE
;
543 dmu_buf_will_dirty(newds
->ds_dbuf
, tx
);
544 dsl_dataset_phys(newds
)->ds_flags
|= DS_FLAG_INCONSISTENT
;
547 * If we actually created a non-clone, we need to create the objset
548 * in our new dataset. If this is a raw send we postpone this until
549 * dmu_recv_stream() so that we can allocate the metadnode with the
550 * properties from the DRR_BEGIN payload.
552 rrw_enter(&newds
->ds_bp_rwlock
, RW_READER
, FTAG
);
553 if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds
)) &&
554 (featureflags
& DMU_BACKUP_FEATURE_RAW
) == 0) {
555 (void) dmu_objset_create_impl(dp
->dp_spa
,
556 newds
, dsl_dataset_get_blkptr(newds
), drrb
->drr_type
, tx
);
558 rrw_exit(&newds
->ds_bp_rwlock
, FTAG
);
560 drba
->drba_cookie
->drc_ds
= newds
;
562 spa_history_log_internal_ds(newds
, "receive", tx
, "");
566 dmu_recv_resume_begin_check(void *arg
, dmu_tx_t
*tx
)
568 dmu_recv_begin_arg_t
*drba
= arg
;
569 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
570 struct drr_begin
*drrb
= drba
->drba_cookie
->drc_drrb
;
572 ds_hold_flags_t dsflags
= 0;
573 uint64_t featureflags
= DMU_GET_FEATUREFLAGS(drrb
->drr_versioninfo
);
575 const char *tofs
= drba
->drba_cookie
->drc_tofs
;
577 /* already checked */
578 ASSERT3U(drrb
->drr_magic
, ==, DMU_BACKUP_MAGIC
);
579 ASSERT(featureflags
& DMU_BACKUP_FEATURE_RESUMING
);
581 if (DMU_GET_STREAM_HDRTYPE(drrb
->drr_versioninfo
) ==
582 DMU_COMPOUNDSTREAM
||
583 drrb
->drr_type
>= DMU_OST_NUMTYPES
)
584 return (SET_ERROR(EINVAL
));
586 /* Verify pool version supports SA if SA_SPILL feature set */
587 if ((featureflags
& DMU_BACKUP_FEATURE_SA_SPILL
) &&
588 spa_version(dp
->dp_spa
) < SPA_VERSION_SA
)
589 return (SET_ERROR(ENOTSUP
));
592 * The receiving code doesn't know how to translate a WRITE_EMBEDDED
593 * record to a plain WRITE record, so the pool must have the
594 * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED
595 * records. Same with WRITE_EMBEDDED records that use LZ4 compression.
597 if ((featureflags
& DMU_BACKUP_FEATURE_EMBED_DATA
) &&
598 !spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_EMBEDDED_DATA
))
599 return (SET_ERROR(ENOTSUP
));
600 if ((featureflags
& DMU_BACKUP_FEATURE_LZ4
) &&
601 !spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_LZ4_COMPRESS
))
602 return (SET_ERROR(ENOTSUP
));
605 * The receiving code doesn't know how to translate large blocks
606 * to smaller ones, so the pool must have the LARGE_BLOCKS
607 * feature enabled if the stream has LARGE_BLOCKS. Same with
610 if ((featureflags
& DMU_BACKUP_FEATURE_LARGE_BLOCKS
) &&
611 !spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_LARGE_BLOCKS
))
612 return (SET_ERROR(ENOTSUP
));
613 if ((featureflags
& DMU_BACKUP_FEATURE_LARGE_DNODE
) &&
614 !spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_LARGE_DNODE
))
615 return (SET_ERROR(ENOTSUP
));
617 /* 6 extra bytes for /%recv */
618 char recvname
[ZFS_MAX_DATASET_NAME_LEN
+ 6];
619 (void) snprintf(recvname
, sizeof (recvname
), "%s/%s",
620 tofs
, recv_clone_name
);
622 if (featureflags
& DMU_BACKUP_FEATURE_RAW
) {
623 /* raw receives require spill block allocation flag */
624 if (!(drrb
->drr_flags
& DRR_FLAG_SPILL_BLOCK
))
625 return (SET_ERROR(ZFS_ERR_SPILL_BLOCK_FLAG_MISSING
));
627 dsflags
|= DS_HOLD_FLAG_DECRYPT
;
630 if (dsl_dataset_hold_flags(dp
, recvname
, dsflags
, FTAG
, &ds
) != 0) {
631 /* %recv does not exist; continue in tofs */
632 error
= dsl_dataset_hold_flags(dp
, tofs
, dsflags
, FTAG
, &ds
);
637 /* check that ds is marked inconsistent */
638 if (!DS_IS_INCONSISTENT(ds
)) {
639 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
640 return (SET_ERROR(EINVAL
));
643 /* check that there is resuming data, and that the toguid matches */
644 if (!dsl_dataset_is_zapified(ds
)) {
645 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
646 return (SET_ERROR(EINVAL
));
649 error
= zap_lookup(dp
->dp_meta_objset
, ds
->ds_object
,
650 DS_FIELD_RESUME_TOGUID
, sizeof (val
), 1, &val
);
651 if (error
!= 0 || drrb
->drr_toguid
!= val
) {
652 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
653 return (SET_ERROR(EINVAL
));
657 * Check if the receive is still running. If so, it will be owned.
658 * Note that nothing else can own the dataset (e.g. after the receive
659 * fails) because it will be marked inconsistent.
661 if (dsl_dataset_has_owner(ds
)) {
662 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
663 return (SET_ERROR(EBUSY
));
666 /* There should not be any snapshots of this fs yet. */
667 if (ds
->ds_prev
!= NULL
&& ds
->ds_prev
->ds_dir
== ds
->ds_dir
) {
668 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
669 return (SET_ERROR(EINVAL
));
673 * Note: resume point will be checked when we process the first WRITE
677 /* check that the origin matches */
679 (void) zap_lookup(dp
->dp_meta_objset
, ds
->ds_object
,
680 DS_FIELD_RESUME_FROMGUID
, sizeof (val
), 1, &val
);
681 if (drrb
->drr_fromguid
!= val
) {
682 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
683 return (SET_ERROR(EINVAL
));
686 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
691 dmu_recv_resume_begin_sync(void *arg
, dmu_tx_t
*tx
)
693 dmu_recv_begin_arg_t
*drba
= arg
;
694 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
695 const char *tofs
= drba
->drba_cookie
->drc_tofs
;
696 struct drr_begin
*drrb
= drba
->drba_cookie
->drc_drrb
;
697 uint64_t featureflags
= DMU_GET_FEATUREFLAGS(drrb
->drr_versioninfo
);
700 ds_hold_flags_t dsflags
= 0;
702 /* 6 extra bytes for /%recv */
703 char recvname
[ZFS_MAX_DATASET_NAME_LEN
+ 6];
705 (void) snprintf(recvname
, sizeof (recvname
), "%s/%s",
706 tofs
, recv_clone_name
);
708 if (featureflags
& DMU_BACKUP_FEATURE_RAW
) {
709 drba
->drba_cookie
->drc_raw
= B_TRUE
;
711 dsflags
|= DS_HOLD_FLAG_DECRYPT
;
714 if (dsl_dataset_hold_flags(dp
, recvname
, dsflags
, FTAG
, &ds
) != 0) {
715 /* %recv does not exist; continue in tofs */
716 VERIFY0(dsl_dataset_hold_flags(dp
, tofs
, dsflags
, FTAG
, &ds
));
717 drba
->drba_cookie
->drc_newfs
= B_TRUE
;
720 /* clear the inconsistent flag so that we can own it */
721 ASSERT(DS_IS_INCONSISTENT(ds
));
722 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
723 dsl_dataset_phys(ds
)->ds_flags
&= ~DS_FLAG_INCONSISTENT
;
724 dsobj
= ds
->ds_object
;
725 dsl_dataset_rele_flags(ds
, dsflags
, FTAG
);
727 VERIFY0(dsl_dataset_own_obj(dp
, dsobj
, dsflags
, dmu_recv_tag
, &ds
));
728 VERIFY0(dmu_objset_from_ds(ds
, &os
));
730 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
731 dsl_dataset_phys(ds
)->ds_flags
|= DS_FLAG_INCONSISTENT
;
733 rrw_enter(&ds
->ds_bp_rwlock
, RW_READER
, FTAG
);
734 ASSERT(!BP_IS_HOLE(dsl_dataset_get_blkptr(ds
)) ||
735 drba
->drba_cookie
->drc_raw
);
736 rrw_exit(&ds
->ds_bp_rwlock
, FTAG
);
738 drba
->drba_cookie
->drc_ds
= ds
;
740 spa_history_log_internal_ds(ds
, "resume receive", tx
, "");
744 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
745 * succeeds; otherwise we will leak the holds on the datasets.
748 dmu_recv_begin(char *tofs
, char *tosnap
, dmu_replay_record_t
*drr_begin
,
749 boolean_t force
, boolean_t resumable
, nvlist_t
*localprops
,
750 nvlist_t
*hidden_args
, char *origin
, dmu_recv_cookie_t
*drc
)
752 dmu_recv_begin_arg_t drba
= { 0 };
754 bzero(drc
, sizeof (dmu_recv_cookie_t
));
755 drc
->drc_drr_begin
= drr_begin
;
756 drc
->drc_drrb
= &drr_begin
->drr_u
.drr_begin
;
757 drc
->drc_tosnap
= tosnap
;
758 drc
->drc_tofs
= tofs
;
759 drc
->drc_force
= force
;
760 drc
->drc_resumable
= resumable
;
761 drc
->drc_cred
= CRED();
762 drc
->drc_clone
= (origin
!= NULL
);
764 if (drc
->drc_drrb
->drr_magic
== BSWAP_64(DMU_BACKUP_MAGIC
)) {
765 drc
->drc_byteswap
= B_TRUE
;
766 (void) fletcher_4_incremental_byteswap(drr_begin
,
767 sizeof (dmu_replay_record_t
), &drc
->drc_cksum
);
768 byteswap_record(drr_begin
);
769 } else if (drc
->drc_drrb
->drr_magic
== DMU_BACKUP_MAGIC
) {
770 (void) fletcher_4_incremental_native(drr_begin
,
771 sizeof (dmu_replay_record_t
), &drc
->drc_cksum
);
773 return (SET_ERROR(EINVAL
));
776 if (drc
->drc_drrb
->drr_flags
& DRR_FLAG_SPILL_BLOCK
)
777 drc
->drc_spill
= B_TRUE
;
779 drba
.drba_origin
= origin
;
780 drba
.drba_cookie
= drc
;
781 drba
.drba_cred
= CRED();
783 if (DMU_GET_FEATUREFLAGS(drc
->drc_drrb
->drr_versioninfo
) &
784 DMU_BACKUP_FEATURE_RESUMING
) {
785 return (dsl_sync_task(tofs
,
786 dmu_recv_resume_begin_check
, dmu_recv_resume_begin_sync
,
787 &drba
, 5, ZFS_SPACE_CHECK_NORMAL
));
792 * For non-raw, non-incremental, non-resuming receives the
793 * user can specify encryption parameters on the command line
794 * with "zfs recv -o". For these receives we create a dcp and
795 * pass it to the sync task. Creating the dcp will implicitly
796 * remove the encryption params from the localprops nvlist,
797 * which avoids errors when trying to set these normally
798 * read-only properties. Any other kind of receive that
799 * attempts to set these properties will fail as a result.
801 if ((DMU_GET_FEATUREFLAGS(drc
->drc_drrb
->drr_versioninfo
) &
802 DMU_BACKUP_FEATURE_RAW
) == 0 &&
803 origin
== NULL
&& drc
->drc_drrb
->drr_fromguid
== 0) {
804 err
= dsl_crypto_params_create_nvlist(DCP_CMD_NONE
,
805 localprops
, hidden_args
, &drba
.drba_dcp
);
810 err
= dsl_sync_task(tofs
,
811 dmu_recv_begin_check
, dmu_recv_begin_sync
,
812 &drba
, 5, ZFS_SPACE_CHECK_NORMAL
);
813 dsl_crypto_params_free(drba
.drba_dcp
, !!err
);
819 struct receive_record_arg
{
820 dmu_replay_record_t header
;
821 void *payload
; /* Pointer to a buffer containing the payload */
823 * If the record is a write, pointer to the arc_buf_t containing the
828 uint64_t bytes_read
; /* bytes read from stream when record created */
829 boolean_t eos_marker
; /* Marks the end of the stream */
833 struct receive_writer_arg
{
839 * These three args are used to signal to the main thread that we're
847 /* A map from guid to dataset to help handle dedup'd streams. */
848 avl_tree_t
*guid_to_ds_map
;
850 boolean_t raw
; /* DMU_BACKUP_FEATURE_RAW set */
851 boolean_t spill
; /* DRR_FLAG_SPILL_BLOCK set */
852 uint64_t last_object
;
853 uint64_t last_offset
;
854 uint64_t max_object
; /* highest object ID referenced in stream */
855 uint64_t bytes_read
; /* bytes read when current record created */
857 /* Encryption parameters for the last received DRR_OBJECT_RANGE */
858 boolean_t or_crypt_params_present
;
859 uint64_t or_firstobj
;
860 uint64_t or_numslots
;
861 uint8_t or_salt
[ZIO_DATA_SALT_LEN
];
862 uint8_t or_iv
[ZIO_DATA_IV_LEN
];
863 uint8_t or_mac
[ZIO_DATA_MAC_LEN
];
864 boolean_t or_byteorder
;
868 list_t list
; /* List of struct receive_objnode. */
870 * Last object looked up. Used to assert that objects are being looked
871 * up in ascending order.
873 uint64_t last_lookup
;
876 struct receive_objnode
{
883 vnode_t
*vp
; /* The vnode to read the stream from */
884 uint64_t voff
; /* The current offset in the stream */
887 * A record that has had its payload read in, but hasn't yet been handed
888 * off to the worker thread.
890 struct receive_record_arg
*rrd
;
891 /* A record that has had its header read in, but not its payload. */
892 struct receive_record_arg
*next_rrd
;
894 zio_cksum_t prev_cksum
;
898 uint64_t featureflags
;
899 /* Sorted list of objects not to issue prefetches for. */
900 struct objlist ignore_objlist
;
903 typedef struct guid_map_entry
{
906 dsl_dataset_t
*gme_ds
;
911 guid_compare(const void *arg1
, const void *arg2
)
913 const guid_map_entry_t
*gmep1
= (const guid_map_entry_t
*)arg1
;
914 const guid_map_entry_t
*gmep2
= (const guid_map_entry_t
*)arg2
;
916 return (AVL_CMP(gmep1
->guid
, gmep2
->guid
));
920 free_guid_map_onexit(void *arg
)
922 avl_tree_t
*ca
= arg
;
924 guid_map_entry_t
*gmep
;
926 while ((gmep
= avl_destroy_nodes(ca
, &cookie
)) != NULL
) {
927 ds_hold_flags_t dsflags
= DS_HOLD_FLAG_DECRYPT
;
930 gmep
->gme_ds
->ds_objset
->os_raw_receive
= B_FALSE
;
931 dsflags
&= ~DS_HOLD_FLAG_DECRYPT
;
934 dsl_dataset_disown(gmep
->gme_ds
, dsflags
, gmep
);
935 kmem_free(gmep
, sizeof (guid_map_entry_t
));
938 kmem_free(ca
, sizeof (avl_tree_t
));
942 receive_read(struct receive_arg
*ra
, int len
, void *buf
)
947 * The code doesn't rely on this (lengths being multiples of 8). See
948 * comment in dump_bytes.
950 ASSERT(len
% 8 == 0 ||
951 (ra
->featureflags
& DMU_BACKUP_FEATURE_RAW
) != 0);
956 ra
->err
= vn_rdwr(UIO_READ
, ra
->vp
,
957 (char *)buf
+ done
, len
- done
,
958 ra
->voff
, UIO_SYSSPACE
, FAPPEND
,
959 RLIM64_INFINITY
, CRED(), &resid
);
961 if (resid
== len
- done
) {
963 * Note: ECKSUM indicates that the receive
964 * was interrupted and can potentially be resumed.
966 ra
->err
= SET_ERROR(ECKSUM
);
968 ra
->voff
+= len
- done
- resid
;
974 ra
->bytes_read
+= len
;
976 ASSERT3U(done
, ==, len
);
981 byteswap_record(dmu_replay_record_t
*drr
)
983 #define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
984 #define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
985 drr
->drr_type
= BSWAP_32(drr
->drr_type
);
986 drr
->drr_payloadlen
= BSWAP_32(drr
->drr_payloadlen
);
988 switch (drr
->drr_type
) {
990 DO64(drr_begin
.drr_magic
);
991 DO64(drr_begin
.drr_versioninfo
);
992 DO64(drr_begin
.drr_creation_time
);
993 DO32(drr_begin
.drr_type
);
994 DO32(drr_begin
.drr_flags
);
995 DO64(drr_begin
.drr_toguid
);
996 DO64(drr_begin
.drr_fromguid
);
999 DO64(drr_object
.drr_object
);
1000 DO32(drr_object
.drr_type
);
1001 DO32(drr_object
.drr_bonustype
);
1002 DO32(drr_object
.drr_blksz
);
1003 DO32(drr_object
.drr_bonuslen
);
1004 DO32(drr_object
.drr_raw_bonuslen
);
1005 DO64(drr_object
.drr_toguid
);
1006 DO64(drr_object
.drr_maxblkid
);
1008 case DRR_FREEOBJECTS
:
1009 DO64(drr_freeobjects
.drr_firstobj
);
1010 DO64(drr_freeobjects
.drr_numobjs
);
1011 DO64(drr_freeobjects
.drr_toguid
);
1014 DO64(drr_write
.drr_object
);
1015 DO32(drr_write
.drr_type
);
1016 DO64(drr_write
.drr_offset
);
1017 DO64(drr_write
.drr_logical_size
);
1018 DO64(drr_write
.drr_toguid
);
1019 ZIO_CHECKSUM_BSWAP(&drr
->drr_u
.drr_write
.drr_key
.ddk_cksum
);
1020 DO64(drr_write
.drr_key
.ddk_prop
);
1021 DO64(drr_write
.drr_compressed_size
);
1023 case DRR_WRITE_BYREF
:
1024 DO64(drr_write_byref
.drr_object
);
1025 DO64(drr_write_byref
.drr_offset
);
1026 DO64(drr_write_byref
.drr_length
);
1027 DO64(drr_write_byref
.drr_toguid
);
1028 DO64(drr_write_byref
.drr_refguid
);
1029 DO64(drr_write_byref
.drr_refobject
);
1030 DO64(drr_write_byref
.drr_refoffset
);
1031 ZIO_CHECKSUM_BSWAP(&drr
->drr_u
.drr_write_byref
.
1033 DO64(drr_write_byref
.drr_key
.ddk_prop
);
1035 case DRR_WRITE_EMBEDDED
:
1036 DO64(drr_write_embedded
.drr_object
);
1037 DO64(drr_write_embedded
.drr_offset
);
1038 DO64(drr_write_embedded
.drr_length
);
1039 DO64(drr_write_embedded
.drr_toguid
);
1040 DO32(drr_write_embedded
.drr_lsize
);
1041 DO32(drr_write_embedded
.drr_psize
);
1044 DO64(drr_free
.drr_object
);
1045 DO64(drr_free
.drr_offset
);
1046 DO64(drr_free
.drr_length
);
1047 DO64(drr_free
.drr_toguid
);
1050 DO64(drr_spill
.drr_object
);
1051 DO64(drr_spill
.drr_length
);
1052 DO64(drr_spill
.drr_toguid
);
1053 DO64(drr_spill
.drr_compressed_size
);
1054 DO32(drr_spill
.drr_type
);
1056 case DRR_OBJECT_RANGE
:
1057 DO64(drr_object_range
.drr_firstobj
);
1058 DO64(drr_object_range
.drr_numslots
);
1059 DO64(drr_object_range
.drr_toguid
);
1062 DO64(drr_end
.drr_toguid
);
1063 ZIO_CHECKSUM_BSWAP(&drr
->drr_u
.drr_end
.drr_checksum
);
1069 if (drr
->drr_type
!= DRR_BEGIN
) {
1070 ZIO_CHECKSUM_BSWAP(&drr
->drr_u
.drr_checksum
.drr_checksum
);
1077 static inline uint8_t
1078 deduce_nblkptr(dmu_object_type_t bonus_type
, uint64_t bonus_size
)
1080 if (bonus_type
== DMU_OT_SA
) {
1084 ((DN_OLD_MAX_BONUSLEN
-
1085 MIN(DN_OLD_MAX_BONUSLEN
, bonus_size
)) >> SPA_BLKPTRSHIFT
));
1090 save_resume_state(struct receive_writer_arg
*rwa
,
1091 uint64_t object
, uint64_t offset
, dmu_tx_t
*tx
)
1093 int txgoff
= dmu_tx_get_txg(tx
) & TXG_MASK
;
1095 if (!rwa
->resumable
)
1099 * We use ds_resume_bytes[] != 0 to indicate that we need to
1100 * update this on disk, so it must not be 0.
1102 ASSERT(rwa
->bytes_read
!= 0);
1105 * We only resume from write records, which have a valid
1106 * (non-meta-dnode) object number.
1108 ASSERT(object
!= 0);
1111 * For resuming to work correctly, we must receive records in order,
1112 * sorted by object,offset. This is checked by the callers, but
1113 * assert it here for good measure.
1115 ASSERT3U(object
, >=, rwa
->os
->os_dsl_dataset
->ds_resume_object
[txgoff
]);
1116 ASSERT(object
!= rwa
->os
->os_dsl_dataset
->ds_resume_object
[txgoff
] ||
1117 offset
>= rwa
->os
->os_dsl_dataset
->ds_resume_offset
[txgoff
]);
1118 ASSERT3U(rwa
->bytes_read
, >=,
1119 rwa
->os
->os_dsl_dataset
->ds_resume_bytes
[txgoff
]);
1121 rwa
->os
->os_dsl_dataset
->ds_resume_object
[txgoff
] = object
;
1122 rwa
->os
->os_dsl_dataset
->ds_resume_offset
[txgoff
] = offset
;
1123 rwa
->os
->os_dsl_dataset
->ds_resume_bytes
[txgoff
] = rwa
->bytes_read
;
1127 receive_object(struct receive_writer_arg
*rwa
, struct drr_object
*drro
,
1130 dmu_object_info_t doi
;
1134 uint8_t dn_slots
= drro
->drr_dn_slots
!= 0 ?
1135 drro
->drr_dn_slots
: DNODE_MIN_SLOTS
;
1137 if (drro
->drr_type
== DMU_OT_NONE
||
1138 !DMU_OT_IS_VALID(drro
->drr_type
) ||
1139 !DMU_OT_IS_VALID(drro
->drr_bonustype
) ||
1140 drro
->drr_checksumtype
>= ZIO_CHECKSUM_FUNCTIONS
||
1141 drro
->drr_compress
>= ZIO_COMPRESS_FUNCTIONS
||
1142 P2PHASE(drro
->drr_blksz
, SPA_MINBLOCKSIZE
) ||
1143 drro
->drr_blksz
< SPA_MINBLOCKSIZE
||
1144 drro
->drr_blksz
> spa_maxblocksize(dmu_objset_spa(rwa
->os
)) ||
1145 drro
->drr_bonuslen
>
1146 DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(rwa
->os
))) ||
1148 (spa_maxdnodesize(dmu_objset_spa(rwa
->os
)) >> DNODE_SHIFT
)) {
1149 return (SET_ERROR(EINVAL
));
1154 * We should have received a DRR_OBJECT_RANGE record
1155 * containing this block and stored it in rwa.
1157 if (drro
->drr_object
< rwa
->or_firstobj
||
1158 drro
->drr_object
>= rwa
->or_firstobj
+ rwa
->or_numslots
||
1159 drro
->drr_raw_bonuslen
< drro
->drr_bonuslen
||
1160 drro
->drr_indblkshift
> SPA_MAXBLOCKSHIFT
||
1161 drro
->drr_nlevels
> DN_MAX_LEVELS
||
1162 drro
->drr_nblkptr
> DN_MAX_NBLKPTR
||
1163 DN_SLOTS_TO_BONUSLEN(dn_slots
) <
1164 drro
->drr_raw_bonuslen
)
1165 return (SET_ERROR(EINVAL
));
1168 * The DRR_OBJECT_SPILL flag is valid when the DRR_BEGIN
1169 * record indicates this by setting DRR_FLAG_SPILL_BLOCK.
1171 if (((drro
->drr_flags
& ~(DRR_OBJECT_SPILL
))) ||
1172 (!rwa
->spill
&& DRR_OBJECT_HAS_SPILL(drro
->drr_flags
))) {
1173 return (SET_ERROR(EINVAL
));
1176 if (drro
->drr_raw_bonuslen
!= 0 || drro
->drr_nblkptr
!= 0 ||
1177 drro
->drr_indblkshift
!= 0 || drro
->drr_nlevels
!= 0) {
1178 return (SET_ERROR(EINVAL
));
1182 err
= dmu_object_info(rwa
->os
, drro
->drr_object
, &doi
);
1183 if (err
!= 0 && err
!= ENOENT
&& err
!= EEXIST
)
1184 return (SET_ERROR(EINVAL
));
1186 if (drro
->drr_object
> rwa
->max_object
)
1187 rwa
->max_object
= drro
->drr_object
;
1190 * If we are losing blkptrs or changing the block size this must
1191 * be a new file instance. We must clear out the previous file
1192 * contents before we can change this type of metadata in the dnode.
1193 * Raw receives will also check that the indirect structure of the
1194 * dnode hasn't changed.
1197 uint32_t indblksz
= drro
->drr_indblkshift
?
1198 1ULL << drro
->drr_indblkshift
: 0;
1199 int nblkptr
= deduce_nblkptr(drro
->drr_bonustype
,
1200 drro
->drr_bonuslen
);
1201 boolean_t did_free
= B_FALSE
;
1203 object
= drro
->drr_object
;
1205 /* nblkptr should be bounded by the bonus size and type */
1206 if (rwa
->raw
&& nblkptr
!= drro
->drr_nblkptr
)
1207 return (SET_ERROR(EINVAL
));
1210 * Check for indicators that the object was freed and
1211 * reallocated. For all sends, these indicators are:
1212 * - A changed block size
1213 * - A smaller nblkptr
1214 * - A changed dnode size
1215 * For raw sends we also check a few other fields to
1216 * ensure we are preserving the objset structure exactly
1217 * as it was on the receive side:
1218 * - A changed indirect block size
1219 * - A smaller nlevels
1221 if (drro
->drr_blksz
!= doi
.doi_data_block_size
||
1222 nblkptr
< doi
.doi_nblkptr
||
1223 dn_slots
!= doi
.doi_dnodesize
>> DNODE_SHIFT
||
1225 (indblksz
!= doi
.doi_metadata_block_size
||
1226 drro
->drr_nlevels
< doi
.doi_indirection
))) {
1227 err
= dmu_free_long_range(rwa
->os
,
1228 drro
->drr_object
, 0, DMU_OBJECT_END
);
1230 return (SET_ERROR(EINVAL
));
1236 * The dmu does not currently support decreasing nlevels
1237 * or changing the number of dnode slots on an object. For
1238 * non-raw sends, this does not matter and the new object
1239 * can just use the previous one's nlevels. For raw sends,
1240 * however, the structure of the received dnode (including
1241 * nlevels and dnode slots) must match that of the send
1242 * side. Therefore, instead of using dmu_object_reclaim(),
1243 * we must free the object completely and call
1244 * dmu_object_claim_dnsize() instead.
1246 if ((rwa
->raw
&& drro
->drr_nlevels
< doi
.doi_indirection
) ||
1247 dn_slots
!= doi
.doi_dnodesize
>> DNODE_SHIFT
) {
1248 err
= dmu_free_long_object(rwa
->os
, drro
->drr_object
);
1250 return (SET_ERROR(EINVAL
));
1252 txg_wait_synced(dmu_objset_pool(rwa
->os
), 0);
1253 object
= DMU_NEW_OBJECT
;
1257 * For raw receives, free everything beyond the new incoming
1258 * maxblkid. Normally this would be done with a DRR_FREE
1259 * record that would come after this DRR_OBJECT record is
1260 * processed. However, for raw receives we manually set the
1261 * maxblkid from the drr_maxblkid and so we must first free
1262 * everything above that blkid to ensure the DMU is always
1263 * consistent with itself. We will never free the first block
1264 * of the object here because a maxblkid of 0 could indicate
1265 * an object with a single block or one with no blocks. This
1266 * free may be skipped when dmu_free_long_range() was called
1267 * above since it covers the entire object's contents.
1269 if (rwa
->raw
&& object
!= DMU_NEW_OBJECT
&& !did_free
) {
1270 err
= dmu_free_long_range(rwa
->os
, drro
->drr_object
,
1271 (drro
->drr_maxblkid
+ 1) * doi
.doi_data_block_size
,
1274 return (SET_ERROR(EINVAL
));
1276 } else if (err
== EEXIST
) {
1278 * The object requested is currently an interior slot of a
1279 * multi-slot dnode. This will be resolved when the next txg
1280 * is synced out, since the send stream will have told us
1281 * to free this slot when we freed the associated dnode
1282 * earlier in the stream.
1284 txg_wait_synced(dmu_objset_pool(rwa
->os
), 0);
1286 if (dmu_object_info(rwa
->os
, drro
->drr_object
, NULL
) != ENOENT
)
1287 return (SET_ERROR(EINVAL
));
1289 /* object was freed and we are about to allocate a new one */
1290 object
= DMU_NEW_OBJECT
;
1292 /* object is free and we are about to allocate a new one */
1293 object
= DMU_NEW_OBJECT
;
1297 * If this is a multi-slot dnode there is a chance that this
1298 * object will expand into a slot that is already used by
1299 * another object from the previous snapshot. We must free
1300 * these objects before we attempt to allocate the new dnode.
1303 boolean_t need_sync
= B_FALSE
;
1305 for (uint64_t slot
= drro
->drr_object
+ 1;
1306 slot
< drro
->drr_object
+ dn_slots
;
1308 dmu_object_info_t slot_doi
;
1310 err
= dmu_object_info(rwa
->os
, slot
, &slot_doi
);
1311 if (err
== ENOENT
|| err
== EEXIST
)
1316 err
= dmu_free_long_object(rwa
->os
, slot
);
1324 txg_wait_synced(dmu_objset_pool(rwa
->os
), 0);
1327 tx
= dmu_tx_create(rwa
->os
);
1328 dmu_tx_hold_bonus(tx
, object
);
1329 dmu_tx_hold_write(tx
, object
, 0, 0);
1330 err
= dmu_tx_assign(tx
, TXG_WAIT
);
1336 if (object
== DMU_NEW_OBJECT
) {
1337 /* Currently free, wants to be allocated */
1338 err
= dmu_object_claim_dnsize(rwa
->os
, drro
->drr_object
,
1339 drro
->drr_type
, drro
->drr_blksz
,
1340 drro
->drr_bonustype
, drro
->drr_bonuslen
,
1341 dn_slots
<< DNODE_SHIFT
, tx
);
1342 } else if (drro
->drr_type
!= doi
.doi_type
||
1343 drro
->drr_blksz
!= doi
.doi_data_block_size
||
1344 drro
->drr_bonustype
!= doi
.doi_bonus_type
||
1345 drro
->drr_bonuslen
!= doi
.doi_bonus_size
) {
1346 /* Currently allocated, but with different properties */
1347 err
= dmu_object_reclaim_dnsize(rwa
->os
, drro
->drr_object
,
1348 drro
->drr_type
, drro
->drr_blksz
,
1349 drro
->drr_bonustype
, drro
->drr_bonuslen
,
1350 dn_slots
<< DNODE_SHIFT
, rwa
->spill
?
1351 DRR_OBJECT_HAS_SPILL(drro
->drr_flags
) : B_FALSE
, tx
);
1352 } else if (rwa
->spill
&& !DRR_OBJECT_HAS_SPILL(drro
->drr_flags
)) {
1354 * Currently allocated, the existing version of this object
1355 * may reference a spill block that is no longer allocated
1356 * at the source and needs to be freed.
1358 err
= dmu_object_rm_spill(rwa
->os
, drro
->drr_object
, tx
);
1363 return (SET_ERROR(EINVAL
));
1366 if (rwa
->or_crypt_params_present
) {
1368 * Set the crypt params for the buffer associated with this
1369 * range of dnodes. This causes the blkptr_t to have the
1370 * same crypt params (byteorder, salt, iv, mac) as on the
1373 * Since we are committing this tx now, it is possible for
1374 * the dnode block to end up on-disk with the incorrect MAC,
1375 * if subsequent objects in this block are received in a
1376 * different txg. However, since the dataset is marked as
1377 * inconsistent, no code paths will do a non-raw read (or
1378 * decrypt the block / verify the MAC). The receive code and
1379 * scrub code can safely do raw reads and verify the
1380 * checksum. They don't need to verify the MAC.
1382 dmu_buf_t
*db
= NULL
;
1383 uint64_t offset
= rwa
->or_firstobj
* DNODE_MIN_SIZE
;
1385 err
= dmu_buf_hold_by_dnode(DMU_META_DNODE(rwa
->os
),
1386 offset
, FTAG
, &db
, DMU_READ_PREFETCH
| DMU_READ_NO_DECRYPT
);
1389 return (SET_ERROR(EINVAL
));
1392 dmu_buf_set_crypt_params(db
, rwa
->or_byteorder
,
1393 rwa
->or_salt
, rwa
->or_iv
, rwa
->or_mac
, tx
);
1395 dmu_buf_rele(db
, FTAG
);
1397 rwa
->or_crypt_params_present
= B_FALSE
;
1400 dmu_object_set_checksum(rwa
->os
, drro
->drr_object
,
1401 drro
->drr_checksumtype
, tx
);
1402 dmu_object_set_compress(rwa
->os
, drro
->drr_object
,
1403 drro
->drr_compress
, tx
);
1405 /* handle more restrictive dnode structuring for raw recvs */
1408 * Set the indirect block size, block shift, nlevels.
1409 * This will not fail because we ensured all of the
1410 * blocks were freed earlier if this is a new object.
1411 * For non-new objects block size and indirect block
1412 * shift cannot change and nlevels can only increase.
1414 VERIFY0(dmu_object_set_blocksize(rwa
->os
, drro
->drr_object
,
1415 drro
->drr_blksz
, drro
->drr_indblkshift
, tx
));
1416 VERIFY0(dmu_object_set_nlevels(rwa
->os
, drro
->drr_object
,
1417 drro
->drr_nlevels
, tx
));
1420 * Set the maxblkid. This will always succeed because
1421 * we freed all blocks beyond the new maxblkid above.
1423 VERIFY0(dmu_object_set_maxblkid(rwa
->os
, drro
->drr_object
,
1424 drro
->drr_maxblkid
, tx
));
1430 uint32_t flags
= DMU_READ_NO_PREFETCH
;
1433 flags
|= DMU_READ_NO_DECRYPT
;
1435 VERIFY0(dnode_hold(rwa
->os
, drro
->drr_object
, FTAG
, &dn
));
1436 VERIFY0(dmu_bonus_hold_by_dnode(dn
, FTAG
, &db
, flags
));
1438 dmu_buf_will_dirty(db
, tx
);
1440 ASSERT3U(db
->db_size
, >=, drro
->drr_bonuslen
);
1441 bcopy(data
, db
->db_data
, DRR_OBJECT_PAYLOAD_SIZE(drro
));
1444 * Raw bonus buffers have their byteorder determined by the
1445 * DRR_OBJECT_RANGE record.
1447 if (rwa
->byteswap
&& !rwa
->raw
) {
1448 dmu_object_byteswap_t byteswap
=
1449 DMU_OT_BYTESWAP(drro
->drr_bonustype
);
1450 dmu_ot_byteswap
[byteswap
].ob_func(db
->db_data
,
1451 DRR_OBJECT_PAYLOAD_SIZE(drro
));
1453 dmu_buf_rele(db
, FTAG
);
1454 dnode_rele(dn
, FTAG
);
1463 receive_freeobjects(struct receive_writer_arg
*rwa
,
1464 struct drr_freeobjects
*drrfo
)
1469 if (drrfo
->drr_firstobj
+ drrfo
->drr_numobjs
< drrfo
->drr_firstobj
)
1470 return (SET_ERROR(EINVAL
));
1472 for (obj
= drrfo
->drr_firstobj
== 0 ? 1 : drrfo
->drr_firstobj
;
1473 obj
< drrfo
->drr_firstobj
+ drrfo
->drr_numobjs
&& next_err
== 0;
1474 next_err
= dmu_object_next(rwa
->os
, &obj
, FALSE
, 0)) {
1475 dmu_object_info_t doi
;
1478 err
= dmu_object_info(rwa
->os
, obj
, &doi
);
1484 err
= dmu_free_long_object(rwa
->os
, obj
);
1489 if (obj
> rwa
->max_object
)
1490 rwa
->max_object
= obj
;
1492 if (next_err
!= ESRCH
)
1498 receive_write(struct receive_writer_arg
*rwa
, struct drr_write
*drrw
,
1505 if (drrw
->drr_offset
+ drrw
->drr_logical_size
< drrw
->drr_offset
||
1506 !DMU_OT_IS_VALID(drrw
->drr_type
))
1507 return (SET_ERROR(EINVAL
));
1510 * For resuming to work, records must be in increasing order
1511 * by (object, offset).
1513 if (drrw
->drr_object
< rwa
->last_object
||
1514 (drrw
->drr_object
== rwa
->last_object
&&
1515 drrw
->drr_offset
< rwa
->last_offset
)) {
1516 return (SET_ERROR(EINVAL
));
1518 rwa
->last_object
= drrw
->drr_object
;
1519 rwa
->last_offset
= drrw
->drr_offset
;
1521 if (rwa
->last_object
> rwa
->max_object
)
1522 rwa
->max_object
= rwa
->last_object
;
1524 if (dmu_object_info(rwa
->os
, drrw
->drr_object
, NULL
) != 0)
1525 return (SET_ERROR(EINVAL
));
1527 tx
= dmu_tx_create(rwa
->os
);
1528 dmu_tx_hold_write(tx
, drrw
->drr_object
,
1529 drrw
->drr_offset
, drrw
->drr_logical_size
);
1530 err
= dmu_tx_assign(tx
, TXG_WAIT
);
1536 if (rwa
->byteswap
&& !arc_is_encrypted(abuf
) &&
1537 arc_get_compression(abuf
) == ZIO_COMPRESS_OFF
) {
1538 dmu_object_byteswap_t byteswap
=
1539 DMU_OT_BYTESWAP(drrw
->drr_type
);
1540 dmu_ot_byteswap
[byteswap
].ob_func(abuf
->b_data
,
1541 DRR_WRITE_PAYLOAD_SIZE(drrw
));
1544 VERIFY0(dnode_hold(rwa
->os
, drrw
->drr_object
, FTAG
, &dn
));
1545 err
= dmu_assign_arcbuf_by_dnode(dn
, drrw
->drr_offset
, abuf
, tx
);
1547 dnode_rele(dn
, FTAG
);
1551 dnode_rele(dn
, FTAG
);
1554 * Note: If the receive fails, we want the resume stream to start
1555 * with the same record that we last successfully received (as opposed
1556 * to the next record), so that we can verify that we are
1557 * resuming from the correct location.
1559 save_resume_state(rwa
, drrw
->drr_object
, drrw
->drr_offset
, tx
);
1566 * Handle a DRR_WRITE_BYREF record. This record is used in dedup'ed
1567 * streams to refer to a copy of the data that is already on the
1568 * system because it came in earlier in the stream. This function
1569 * finds the earlier copy of the data, and uses that copy instead of
1570 * data from the stream to fulfill this write.
1573 receive_write_byref(struct receive_writer_arg
*rwa
,
1574 struct drr_write_byref
*drrwbr
)
1578 guid_map_entry_t gmesrch
;
1579 guid_map_entry_t
*gmep
;
1581 objset_t
*ref_os
= NULL
;
1582 int flags
= DMU_READ_PREFETCH
;
1585 if (drrwbr
->drr_offset
+ drrwbr
->drr_length
< drrwbr
->drr_offset
)
1586 return (SET_ERROR(EINVAL
));
1589 * If the GUID of the referenced dataset is different from the
1590 * GUID of the target dataset, find the referenced dataset.
1592 if (drrwbr
->drr_toguid
!= drrwbr
->drr_refguid
) {
1593 gmesrch
.guid
= drrwbr
->drr_refguid
;
1594 if ((gmep
= avl_find(rwa
->guid_to_ds_map
, &gmesrch
,
1596 return (SET_ERROR(EINVAL
));
1598 if (dmu_objset_from_ds(gmep
->gme_ds
, &ref_os
))
1599 return (SET_ERROR(EINVAL
));
1604 if (drrwbr
->drr_object
> rwa
->max_object
)
1605 rwa
->max_object
= drrwbr
->drr_object
;
1608 flags
|= DMU_READ_NO_DECRYPT
;
1610 /* may return either a regular db or an encrypted one */
1611 err
= dmu_buf_hold(ref_os
, drrwbr
->drr_refobject
,
1612 drrwbr
->drr_refoffset
, FTAG
, &dbp
, flags
);
1616 tx
= dmu_tx_create(rwa
->os
);
1618 dmu_tx_hold_write(tx
, drrwbr
->drr_object
,
1619 drrwbr
->drr_offset
, drrwbr
->drr_length
);
1620 err
= dmu_tx_assign(tx
, TXG_WAIT
);
1627 dmu_copy_from_buf(rwa
->os
, drrwbr
->drr_object
,
1628 drrwbr
->drr_offset
, dbp
, tx
);
1630 dmu_write(rwa
->os
, drrwbr
->drr_object
,
1631 drrwbr
->drr_offset
, drrwbr
->drr_length
, dbp
->db_data
, tx
);
1633 dmu_buf_rele(dbp
, FTAG
);
1635 /* See comment in restore_write. */
1636 save_resume_state(rwa
, drrwbr
->drr_object
, drrwbr
->drr_offset
, tx
);
1642 receive_write_embedded(struct receive_writer_arg
*rwa
,
1643 struct drr_write_embedded
*drrwe
, void *data
)
1648 if (drrwe
->drr_offset
+ drrwe
->drr_length
< drrwe
->drr_offset
)
1649 return (SET_ERROR(EINVAL
));
1651 if (drrwe
->drr_psize
> BPE_PAYLOAD_SIZE
)
1652 return (SET_ERROR(EINVAL
));
1654 if (drrwe
->drr_etype
>= NUM_BP_EMBEDDED_TYPES
)
1655 return (SET_ERROR(EINVAL
));
1656 if (drrwe
->drr_compression
>= ZIO_COMPRESS_FUNCTIONS
)
1657 return (SET_ERROR(EINVAL
));
1659 return (SET_ERROR(EINVAL
));
1661 if (drrwe
->drr_object
> rwa
->max_object
)
1662 rwa
->max_object
= drrwe
->drr_object
;
1664 tx
= dmu_tx_create(rwa
->os
);
1666 dmu_tx_hold_write(tx
, drrwe
->drr_object
,
1667 drrwe
->drr_offset
, drrwe
->drr_length
);
1668 err
= dmu_tx_assign(tx
, TXG_WAIT
);
1674 dmu_write_embedded(rwa
->os
, drrwe
->drr_object
,
1675 drrwe
->drr_offset
, data
, drrwe
->drr_etype
,
1676 drrwe
->drr_compression
, drrwe
->drr_lsize
, drrwe
->drr_psize
,
1677 rwa
->byteswap
^ ZFS_HOST_BYTEORDER
, tx
);
1679 /* See comment in restore_write. */
1680 save_resume_state(rwa
, drrwe
->drr_object
, drrwe
->drr_offset
, tx
);
1686 receive_spill(struct receive_writer_arg
*rwa
, struct drr_spill
*drrs
,
1690 dmu_buf_t
*db
, *db_spill
;
1694 if (drrs
->drr_length
< SPA_MINBLOCKSIZE
||
1695 drrs
->drr_length
> spa_maxblocksize(dmu_objset_spa(rwa
->os
)))
1696 return (SET_ERROR(EINVAL
));
1699 * This is an unmodified spill block which was added to the stream
1700 * to resolve an issue with incorrectly removing spill blocks. It
1701 * should be ignored by current versions of the code which support
1702 * the DRR_FLAG_SPILL_BLOCK flag.
1704 if (rwa
->spill
&& DRR_SPILL_IS_UNMODIFIED(drrs
->drr_flags
)) {
1705 dmu_return_arcbuf(abuf
);
1710 if (!DMU_OT_IS_VALID(drrs
->drr_type
) ||
1711 drrs
->drr_compressiontype
>= ZIO_COMPRESS_FUNCTIONS
||
1712 drrs
->drr_compressed_size
== 0)
1713 return (SET_ERROR(EINVAL
));
1715 flags
|= DMU_READ_NO_DECRYPT
;
1718 if (dmu_object_info(rwa
->os
, drrs
->drr_object
, NULL
) != 0)
1719 return (SET_ERROR(EINVAL
));
1721 if (drrs
->drr_object
> rwa
->max_object
)
1722 rwa
->max_object
= drrs
->drr_object
;
1724 VERIFY0(dmu_bonus_hold(rwa
->os
, drrs
->drr_object
, FTAG
, &db
));
1725 if ((err
= dmu_spill_hold_by_bonus(db
, DMU_READ_NO_DECRYPT
, FTAG
,
1727 dmu_buf_rele(db
, FTAG
);
1731 tx
= dmu_tx_create(rwa
->os
);
1733 dmu_tx_hold_spill(tx
, db
->db_object
);
1735 err
= dmu_tx_assign(tx
, TXG_WAIT
);
1737 dmu_buf_rele(db
, FTAG
);
1738 dmu_buf_rele(db_spill
, FTAG
);
1744 * Spill blocks may both grow and shrink. When a change in size
1745 * occurs any existing dbuf must be updated to match the logical
1746 * size of the provided arc_buf_t.
1748 if (db_spill
->db_size
!= drrs
->drr_length
) {
1749 dmu_buf_will_fill(db_spill
, tx
);
1750 VERIFY(0 == dbuf_spill_set_blksz(db_spill
,
1751 drrs
->drr_length
, tx
));
1754 if (rwa
->byteswap
&& !arc_is_encrypted(abuf
) &&
1755 arc_get_compression(abuf
) == ZIO_COMPRESS_OFF
) {
1756 dmu_object_byteswap_t byteswap
=
1757 DMU_OT_BYTESWAP(drrs
->drr_type
);
1758 dmu_ot_byteswap
[byteswap
].ob_func(abuf
->b_data
,
1759 DRR_SPILL_PAYLOAD_SIZE(drrs
));
1762 dbuf_assign_arcbuf((dmu_buf_impl_t
*)db_spill
, abuf
, tx
);
1764 dmu_buf_rele(db
, FTAG
);
1765 dmu_buf_rele(db_spill
, FTAG
);
1773 receive_free(struct receive_writer_arg
*rwa
, struct drr_free
*drrf
)
1777 if (drrf
->drr_length
!= DMU_OBJECT_END
&&
1778 drrf
->drr_offset
+ drrf
->drr_length
< drrf
->drr_offset
)
1779 return (SET_ERROR(EINVAL
));
1781 if (dmu_object_info(rwa
->os
, drrf
->drr_object
, NULL
) != 0)
1782 return (SET_ERROR(EINVAL
));
1784 if (drrf
->drr_object
> rwa
->max_object
)
1785 rwa
->max_object
= drrf
->drr_object
;
1787 err
= dmu_free_long_range(rwa
->os
, drrf
->drr_object
,
1788 drrf
->drr_offset
, drrf
->drr_length
);
1794 receive_object_range(struct receive_writer_arg
*rwa
,
1795 struct drr_object_range
*drror
)
1798 * By default, we assume this block is in our native format
1799 * (ZFS_HOST_BYTEORDER). We then take into account whether
1800 * the send stream is byteswapped (rwa->byteswap). Finally,
1801 * we need to byteswap again if this particular block was
1802 * in non-native format on the send side.
1804 boolean_t byteorder
= ZFS_HOST_BYTEORDER
^ rwa
->byteswap
^
1805 !!DRR_IS_RAW_BYTESWAPPED(drror
->drr_flags
);
1808 * Since dnode block sizes are constant, we should not need to worry
1809 * about making sure that the dnode block size is the same on the
1810 * sending and receiving sides for the time being. For non-raw sends,
1811 * this does not matter (and in fact we do not send a DRR_OBJECT_RANGE
1812 * record at all). Raw sends require this record type because the
1813 * encryption parameters are used to protect an entire block of bonus
1814 * buffers. If the size of dnode blocks ever becomes variable,
1815 * handling will need to be added to ensure that dnode block sizes
1816 * match on the sending and receiving side.
1818 if (drror
->drr_numslots
!= DNODES_PER_BLOCK
||
1819 P2PHASE(drror
->drr_firstobj
, DNODES_PER_BLOCK
) != 0 ||
1821 return (SET_ERROR(EINVAL
));
1823 if (drror
->drr_firstobj
> rwa
->max_object
)
1824 rwa
->max_object
= drror
->drr_firstobj
;
1827 * The DRR_OBJECT_RANGE handling must be deferred to receive_object()
1828 * so that the block of dnodes is not written out when it's empty,
1829 * and converted to a HOLE BP.
1831 rwa
->or_crypt_params_present
= B_TRUE
;
1832 rwa
->or_firstobj
= drror
->drr_firstobj
;
1833 rwa
->or_numslots
= drror
->drr_numslots
;
1834 bcopy(drror
->drr_salt
, rwa
->or_salt
, ZIO_DATA_SALT_LEN
);
1835 bcopy(drror
->drr_iv
, rwa
->or_iv
, ZIO_DATA_IV_LEN
);
1836 bcopy(drror
->drr_mac
, rwa
->or_mac
, ZIO_DATA_MAC_LEN
);
1837 rwa
->or_byteorder
= byteorder
;
1842 /* used to destroy the drc_ds on error */
1844 dmu_recv_cleanup_ds(dmu_recv_cookie_t
*drc
)
1846 dsl_dataset_t
*ds
= drc
->drc_ds
;
1847 ds_hold_flags_t dsflags
= (drc
->drc_raw
) ? 0 : DS_HOLD_FLAG_DECRYPT
;
1850 * Wait for the txg sync before cleaning up the receive. For
1851 * resumable receives, this ensures that our resume state has
1852 * been written out to disk. For raw receives, this ensures
1853 * that the user accounting code will not attempt to do anything
1854 * after we stopped receiving the dataset.
1856 txg_wait_synced(ds
->ds_dir
->dd_pool
, 0);
1857 ds
->ds_objset
->os_raw_receive
= B_FALSE
;
1859 rrw_enter(&ds
->ds_bp_rwlock
, RW_READER
, FTAG
);
1860 if (drc
->drc_resumable
&& !BP_IS_HOLE(dsl_dataset_get_blkptr(ds
))) {
1861 rrw_exit(&ds
->ds_bp_rwlock
, FTAG
);
1862 dsl_dataset_disown(ds
, dsflags
, dmu_recv_tag
);
1864 char name
[ZFS_MAX_DATASET_NAME_LEN
];
1865 rrw_exit(&ds
->ds_bp_rwlock
, FTAG
);
1866 dsl_dataset_name(ds
, name
);
1867 dsl_dataset_disown(ds
, dsflags
, dmu_recv_tag
);
1868 (void) dsl_destroy_head(name
);
1873 receive_cksum(struct receive_arg
*ra
, int len
, void *buf
)
1876 (void) fletcher_4_incremental_byteswap(buf
, len
, &ra
->cksum
);
1878 (void) fletcher_4_incremental_native(buf
, len
, &ra
->cksum
);
1883 * Read the payload into a buffer of size len, and update the current record's
1885 * Allocate ra->next_rrd and read the next record's header into
1886 * ra->next_rrd->header.
1887 * Verify checksum of payload and next record.
1890 receive_read_payload_and_next_header(struct receive_arg
*ra
, int len
, void *buf
)
1893 zio_cksum_t cksum_orig
;
1894 zio_cksum_t
*cksump
;
1897 ASSERT3U(len
, <=, SPA_MAXBLOCKSIZE
);
1898 err
= receive_read(ra
, len
, buf
);
1901 receive_cksum(ra
, len
, buf
);
1903 /* note: rrd is NULL when reading the begin record's payload */
1904 if (ra
->rrd
!= NULL
) {
1905 ra
->rrd
->payload
= buf
;
1906 ra
->rrd
->payload_size
= len
;
1907 ra
->rrd
->bytes_read
= ra
->bytes_read
;
1910 ASSERT3P(buf
, ==, NULL
);
1913 ra
->prev_cksum
= ra
->cksum
;
1915 ra
->next_rrd
= kmem_zalloc(sizeof (*ra
->next_rrd
), KM_SLEEP
);
1916 err
= receive_read(ra
, sizeof (ra
->next_rrd
->header
),
1917 &ra
->next_rrd
->header
);
1918 ra
->next_rrd
->bytes_read
= ra
->bytes_read
;
1921 kmem_free(ra
->next_rrd
, sizeof (*ra
->next_rrd
));
1922 ra
->next_rrd
= NULL
;
1925 if (ra
->next_rrd
->header
.drr_type
== DRR_BEGIN
) {
1926 kmem_free(ra
->next_rrd
, sizeof (*ra
->next_rrd
));
1927 ra
->next_rrd
= NULL
;
1928 return (SET_ERROR(EINVAL
));
1932 * Note: checksum is of everything up to but not including the
1935 ASSERT3U(offsetof(dmu_replay_record_t
, drr_u
.drr_checksum
.drr_checksum
),
1936 ==, sizeof (dmu_replay_record_t
) - sizeof (zio_cksum_t
));
1938 offsetof(dmu_replay_record_t
, drr_u
.drr_checksum
.drr_checksum
),
1939 &ra
->next_rrd
->header
);
1941 cksum_orig
= ra
->next_rrd
->header
.drr_u
.drr_checksum
.drr_checksum
;
1942 cksump
= &ra
->next_rrd
->header
.drr_u
.drr_checksum
.drr_checksum
;
1945 byteswap_record(&ra
->next_rrd
->header
);
1947 if ((!ZIO_CHECKSUM_IS_ZERO(cksump
)) &&
1948 !ZIO_CHECKSUM_EQUAL(ra
->cksum
, *cksump
)) {
1949 kmem_free(ra
->next_rrd
, sizeof (*ra
->next_rrd
));
1950 ra
->next_rrd
= NULL
;
1951 return (SET_ERROR(ECKSUM
));
1954 receive_cksum(ra
, sizeof (cksum_orig
), &cksum_orig
);
1960 objlist_create(struct objlist
*list
)
1962 list_create(&list
->list
, sizeof (struct receive_objnode
),
1963 offsetof(struct receive_objnode
, node
));
1964 list
->last_lookup
= 0;
1968 objlist_destroy(struct objlist
*list
)
1970 for (struct receive_objnode
*n
= list_remove_head(&list
->list
);
1971 n
!= NULL
; n
= list_remove_head(&list
->list
)) {
1972 kmem_free(n
, sizeof (*n
));
1974 list_destroy(&list
->list
);
1978 * This function looks through the objlist to see if the specified object number
1979 * is contained in the objlist. In the process, it will remove all object
1980 * numbers in the list that are smaller than the specified object number. Thus,
1981 * any lookup of an object number smaller than a previously looked up object
1982 * number will always return false; therefore, all lookups should be done in
1986 objlist_exists(struct objlist
*list
, uint64_t object
)
1988 struct receive_objnode
*node
= list_head(&list
->list
);
1989 ASSERT3U(object
, >=, list
->last_lookup
);
1990 list
->last_lookup
= object
;
1991 while (node
!= NULL
&& node
->object
< object
) {
1992 VERIFY3P(node
, ==, list_remove_head(&list
->list
));
1993 kmem_free(node
, sizeof (*node
));
1994 node
= list_head(&list
->list
);
1996 return (node
!= NULL
&& node
->object
== object
);
2000 * The objlist is a list of object numbers stored in ascending order. However,
2001 * the insertion of new object numbers does not seek out the correct location to
2002 * store a new object number; instead, it appends it to the list for simplicity.
2003 * Thus, any users must take care to only insert new object numbers in ascending
2007 objlist_insert(struct objlist
*list
, uint64_t object
)
2009 struct receive_objnode
*node
= kmem_zalloc(sizeof (*node
), KM_SLEEP
);
2010 node
->object
= object
;
2013 struct receive_objnode
*last_object
= list_tail(&list
->list
);
2014 uint64_t last_objnum
= (last_object
!= NULL
? last_object
->object
: 0);
2015 ASSERT3U(node
->object
, >, last_objnum
);
2018 list_insert_tail(&list
->list
, node
);
2022 * Issue the prefetch reads for any necessary indirect blocks.
2024 * We use the object ignore list to tell us whether or not to issue prefetches
2025 * for a given object. We do this for both correctness (in case the blocksize
2026 * of an object has changed) and performance (if the object doesn't exist, don't
2027 * needlessly try to issue prefetches). We also trim the list as we go through
2028 * the stream to prevent it from growing to an unbounded size.
2030 * The object numbers within will always be in sorted order, and any write
2031 * records we see will also be in sorted order, but they're not sorted with
2032 * respect to each other (i.e. we can get several object records before
2033 * receiving each object's write records). As a result, once we've reached a
2034 * given object number, we can safely remove any reference to lower object
2035 * numbers in the ignore list. In practice, we receive up to 32 object records
2036 * before receiving write records, so the list can have up to 32 nodes in it.
2040 receive_read_prefetch(struct receive_arg
*ra
,
2041 uint64_t object
, uint64_t offset
, uint64_t length
)
2043 if (!objlist_exists(&ra
->ignore_objlist
, object
)) {
2044 dmu_prefetch(ra
->os
, object
, 1, offset
, length
,
2045 ZIO_PRIORITY_SYNC_READ
);
2050 * Read records off the stream, issuing any necessary prefetches.
2053 receive_read_record(struct receive_arg
*ra
)
2057 switch (ra
->rrd
->header
.drr_type
) {
2060 struct drr_object
*drro
= &ra
->rrd
->header
.drr_u
.drr_object
;
2061 uint32_t size
= DRR_OBJECT_PAYLOAD_SIZE(drro
);
2063 dmu_object_info_t doi
;
2066 buf
= kmem_zalloc(size
, KM_SLEEP
);
2068 err
= receive_read_payload_and_next_header(ra
, size
, buf
);
2070 kmem_free(buf
, size
);
2073 err
= dmu_object_info(ra
->os
, drro
->drr_object
, &doi
);
2075 * See receive_read_prefetch for an explanation why we're
2076 * storing this object in the ignore_obj_list.
2078 if (err
== ENOENT
|| err
== EEXIST
||
2079 (err
== 0 && doi
.doi_data_block_size
!= drro
->drr_blksz
)) {
2080 objlist_insert(&ra
->ignore_objlist
, drro
->drr_object
);
2085 case DRR_FREEOBJECTS
:
2087 err
= receive_read_payload_and_next_header(ra
, 0, NULL
);
2092 struct drr_write
*drrw
= &ra
->rrd
->header
.drr_u
.drr_write
;
2094 boolean_t is_meta
= DMU_OT_IS_METADATA(drrw
->drr_type
);
2097 boolean_t byteorder
= ZFS_HOST_BYTEORDER
^
2098 !!DRR_IS_RAW_BYTESWAPPED(drrw
->drr_flags
) ^
2101 abuf
= arc_loan_raw_buf(dmu_objset_spa(ra
->os
),
2102 drrw
->drr_object
, byteorder
, drrw
->drr_salt
,
2103 drrw
->drr_iv
, drrw
->drr_mac
, drrw
->drr_type
,
2104 drrw
->drr_compressed_size
, drrw
->drr_logical_size
,
2105 drrw
->drr_compressiontype
);
2106 } else if (DRR_WRITE_COMPRESSED(drrw
)) {
2107 ASSERT3U(drrw
->drr_compressed_size
, >, 0);
2108 ASSERT3U(drrw
->drr_logical_size
, >=,
2109 drrw
->drr_compressed_size
);
2111 abuf
= arc_loan_compressed_buf(
2112 dmu_objset_spa(ra
->os
),
2113 drrw
->drr_compressed_size
, drrw
->drr_logical_size
,
2114 drrw
->drr_compressiontype
);
2116 abuf
= arc_loan_buf(dmu_objset_spa(ra
->os
),
2117 is_meta
, drrw
->drr_logical_size
);
2120 err
= receive_read_payload_and_next_header(ra
,
2121 DRR_WRITE_PAYLOAD_SIZE(drrw
), abuf
->b_data
);
2123 dmu_return_arcbuf(abuf
);
2126 ra
->rrd
->arc_buf
= abuf
;
2127 receive_read_prefetch(ra
, drrw
->drr_object
, drrw
->drr_offset
,
2128 drrw
->drr_logical_size
);
2131 case DRR_WRITE_BYREF
:
2133 struct drr_write_byref
*drrwb
=
2134 &ra
->rrd
->header
.drr_u
.drr_write_byref
;
2135 err
= receive_read_payload_and_next_header(ra
, 0, NULL
);
2136 receive_read_prefetch(ra
, drrwb
->drr_object
, drrwb
->drr_offset
,
2140 case DRR_WRITE_EMBEDDED
:
2142 struct drr_write_embedded
*drrwe
=
2143 &ra
->rrd
->header
.drr_u
.drr_write_embedded
;
2144 uint32_t size
= P2ROUNDUP(drrwe
->drr_psize
, 8);
2145 void *buf
= kmem_zalloc(size
, KM_SLEEP
);
2147 err
= receive_read_payload_and_next_header(ra
, size
, buf
);
2149 kmem_free(buf
, size
);
2153 receive_read_prefetch(ra
, drrwe
->drr_object
, drrwe
->drr_offset
,
2160 * It might be beneficial to prefetch indirect blocks here, but
2161 * we don't really have the data to decide for sure.
2163 err
= receive_read_payload_and_next_header(ra
, 0, NULL
);
2168 struct drr_end
*drre
= &ra
->rrd
->header
.drr_u
.drr_end
;
2169 if (!ZIO_CHECKSUM_EQUAL(ra
->prev_cksum
, drre
->drr_checksum
))
2170 return (SET_ERROR(ECKSUM
));
2175 struct drr_spill
*drrs
= &ra
->rrd
->header
.drr_u
.drr_spill
;
2177 int len
= DRR_SPILL_PAYLOAD_SIZE(drrs
);
2179 /* DRR_SPILL records are either raw or uncompressed */
2181 boolean_t byteorder
= ZFS_HOST_BYTEORDER
^
2182 !!DRR_IS_RAW_BYTESWAPPED(drrs
->drr_flags
) ^
2185 abuf
= arc_loan_raw_buf(dmu_objset_spa(ra
->os
),
2186 dmu_objset_id(ra
->os
), byteorder
, drrs
->drr_salt
,
2187 drrs
->drr_iv
, drrs
->drr_mac
, drrs
->drr_type
,
2188 drrs
->drr_compressed_size
, drrs
->drr_length
,
2189 drrs
->drr_compressiontype
);
2191 abuf
= arc_loan_buf(dmu_objset_spa(ra
->os
),
2192 DMU_OT_IS_METADATA(drrs
->drr_type
),
2196 err
= receive_read_payload_and_next_header(ra
, len
,
2199 dmu_return_arcbuf(abuf
);
2202 ra
->rrd
->arc_buf
= abuf
;
2205 case DRR_OBJECT_RANGE
:
2207 err
= receive_read_payload_and_next_header(ra
, 0, NULL
);
2211 return (SET_ERROR(EINVAL
));
2216 dprintf_drr(struct receive_record_arg
*rrd
, int err
)
2219 switch (rrd
->header
.drr_type
) {
2222 struct drr_object
*drro
= &rrd
->header
.drr_u
.drr_object
;
2223 dprintf("drr_type = OBJECT obj = %llu type = %u "
2224 "bonustype = %u blksz = %u bonuslen = %u cksumtype = %u "
2225 "compress = %u dn_slots = %u err = %d\n",
2226 drro
->drr_object
, drro
->drr_type
, drro
->drr_bonustype
,
2227 drro
->drr_blksz
, drro
->drr_bonuslen
,
2228 drro
->drr_checksumtype
, drro
->drr_compress
,
2229 drro
->drr_dn_slots
, err
);
2232 case DRR_FREEOBJECTS
:
2234 struct drr_freeobjects
*drrfo
=
2235 &rrd
->header
.drr_u
.drr_freeobjects
;
2236 dprintf("drr_type = FREEOBJECTS firstobj = %llu "
2237 "numobjs = %llu err = %d\n",
2238 drrfo
->drr_firstobj
, drrfo
->drr_numobjs
, err
);
2243 struct drr_write
*drrw
= &rrd
->header
.drr_u
.drr_write
;
2244 dprintf("drr_type = WRITE obj = %llu type = %u offset = %llu "
2245 "lsize = %llu cksumtype = %u flags = %u "
2246 "compress = %u psize = %llu err = %d\n",
2247 drrw
->drr_object
, drrw
->drr_type
, drrw
->drr_offset
,
2248 drrw
->drr_logical_size
, drrw
->drr_checksumtype
,
2249 drrw
->drr_flags
, drrw
->drr_compressiontype
,
2250 drrw
->drr_compressed_size
, err
);
2253 case DRR_WRITE_BYREF
:
2255 struct drr_write_byref
*drrwbr
=
2256 &rrd
->header
.drr_u
.drr_write_byref
;
2257 dprintf("drr_type = WRITE_BYREF obj = %llu offset = %llu "
2258 "length = %llu toguid = %llx refguid = %llx "
2259 "refobject = %llu refoffset = %llu cksumtype = %u "
2260 "flags = %u err = %d\n",
2261 drrwbr
->drr_object
, drrwbr
->drr_offset
,
2262 drrwbr
->drr_length
, drrwbr
->drr_toguid
,
2263 drrwbr
->drr_refguid
, drrwbr
->drr_refobject
,
2264 drrwbr
->drr_refoffset
, drrwbr
->drr_checksumtype
,
2265 drrwbr
->drr_flags
, err
);
2268 case DRR_WRITE_EMBEDDED
:
2270 struct drr_write_embedded
*drrwe
=
2271 &rrd
->header
.drr_u
.drr_write_embedded
;
2272 dprintf("drr_type = WRITE_EMBEDDED obj = %llu offset = %llu "
2273 "length = %llu compress = %u etype = %u lsize = %u "
2274 "psize = %u err = %d\n",
2275 drrwe
->drr_object
, drrwe
->drr_offset
, drrwe
->drr_length
,
2276 drrwe
->drr_compression
, drrwe
->drr_etype
,
2277 drrwe
->drr_lsize
, drrwe
->drr_psize
, err
);
2282 struct drr_free
*drrf
= &rrd
->header
.drr_u
.drr_free
;
2283 dprintf("drr_type = FREE obj = %llu offset = %llu "
2284 "length = %lld err = %d\n",
2285 drrf
->drr_object
, drrf
->drr_offset
, drrf
->drr_length
,
2291 struct drr_spill
*drrs
= &rrd
->header
.drr_u
.drr_spill
;
2292 dprintf("drr_type = SPILL obj = %llu length = %llu "
2293 "err = %d\n", drrs
->drr_object
, drrs
->drr_length
, err
);
2296 case DRR_OBJECT_RANGE
:
2298 struct drr_object_range
*drror
=
2299 &rrd
->header
.drr_u
.drr_object_range
;
2300 dprintf("drr_type = OBJECT_RANGE firstobj = %llu "
2301 "numslots = %llu flags = %u err = %d\n",
2302 drror
->drr_firstobj
, drror
->drr_numslots
,
2303 drror
->drr_flags
, err
);
2313 * Commit the records to the pool.
2316 receive_process_record(struct receive_writer_arg
*rwa
,
2317 struct receive_record_arg
*rrd
)
2321 /* Processing in order, therefore bytes_read should be increasing. */
2322 ASSERT3U(rrd
->bytes_read
, >=, rwa
->bytes_read
);
2323 rwa
->bytes_read
= rrd
->bytes_read
;
2325 switch (rrd
->header
.drr_type
) {
2328 struct drr_object
*drro
= &rrd
->header
.drr_u
.drr_object
;
2329 err
= receive_object(rwa
, drro
, rrd
->payload
);
2330 kmem_free(rrd
->payload
, rrd
->payload_size
);
2331 rrd
->payload
= NULL
;
2334 case DRR_FREEOBJECTS
:
2336 struct drr_freeobjects
*drrfo
=
2337 &rrd
->header
.drr_u
.drr_freeobjects
;
2338 err
= receive_freeobjects(rwa
, drrfo
);
2343 struct drr_write
*drrw
= &rrd
->header
.drr_u
.drr_write
;
2344 err
= receive_write(rwa
, drrw
, rrd
->arc_buf
);
2345 /* if receive_write() is successful, it consumes the arc_buf */
2347 dmu_return_arcbuf(rrd
->arc_buf
);
2348 rrd
->arc_buf
= NULL
;
2349 rrd
->payload
= NULL
;
2352 case DRR_WRITE_BYREF
:
2354 struct drr_write_byref
*drrwbr
=
2355 &rrd
->header
.drr_u
.drr_write_byref
;
2356 err
= receive_write_byref(rwa
, drrwbr
);
2359 case DRR_WRITE_EMBEDDED
:
2361 struct drr_write_embedded
*drrwe
=
2362 &rrd
->header
.drr_u
.drr_write_embedded
;
2363 err
= receive_write_embedded(rwa
, drrwe
, rrd
->payload
);
2364 kmem_free(rrd
->payload
, rrd
->payload_size
);
2365 rrd
->payload
= NULL
;
2370 struct drr_free
*drrf
= &rrd
->header
.drr_u
.drr_free
;
2371 err
= receive_free(rwa
, drrf
);
2376 struct drr_spill
*drrs
= &rrd
->header
.drr_u
.drr_spill
;
2377 err
= receive_spill(rwa
, drrs
, rrd
->arc_buf
);
2378 /* if receive_spill() is successful, it consumes the arc_buf */
2380 dmu_return_arcbuf(rrd
->arc_buf
);
2381 rrd
->arc_buf
= NULL
;
2382 rrd
->payload
= NULL
;
2385 case DRR_OBJECT_RANGE
:
2387 struct drr_object_range
*drror
=
2388 &rrd
->header
.drr_u
.drr_object_range
;
2389 err
= receive_object_range(rwa
, drror
);
2393 err
= (SET_ERROR(EINVAL
));
2397 dprintf_drr(rrd
, err
);
2403 * dmu_recv_stream's worker thread; pull records off the queue, and then call
2404 * receive_process_record When we're done, signal the main thread and exit.
2407 receive_writer_thread(void *arg
)
2409 struct receive_writer_arg
*rwa
= arg
;
2410 struct receive_record_arg
*rrd
;
2411 fstrans_cookie_t cookie
= spl_fstrans_mark();
2413 for (rrd
= bqueue_dequeue(&rwa
->q
); !rrd
->eos_marker
;
2414 rrd
= bqueue_dequeue(&rwa
->q
)) {
2416 * If there's an error, the main thread will stop putting things
2417 * on the queue, but we need to clear everything in it before we
2420 if (rwa
->err
== 0) {
2421 rwa
->err
= receive_process_record(rwa
, rrd
);
2422 } else if (rrd
->arc_buf
!= NULL
) {
2423 dmu_return_arcbuf(rrd
->arc_buf
);
2424 rrd
->arc_buf
= NULL
;
2425 rrd
->payload
= NULL
;
2426 } else if (rrd
->payload
!= NULL
) {
2427 kmem_free(rrd
->payload
, rrd
->payload_size
);
2428 rrd
->payload
= NULL
;
2430 kmem_free(rrd
, sizeof (*rrd
));
2432 kmem_free(rrd
, sizeof (*rrd
));
2433 mutex_enter(&rwa
->mutex
);
2435 cv_signal(&rwa
->cv
);
2436 mutex_exit(&rwa
->mutex
);
2437 spl_fstrans_unmark(cookie
);
2442 resume_check(struct receive_arg
*ra
, nvlist_t
*begin_nvl
)
2445 objset_t
*mos
= dmu_objset_pool(ra
->os
)->dp_meta_objset
;
2446 uint64_t dsobj
= dmu_objset_id(ra
->os
);
2447 uint64_t resume_obj
, resume_off
;
2449 if (nvlist_lookup_uint64(begin_nvl
,
2450 "resume_object", &resume_obj
) != 0 ||
2451 nvlist_lookup_uint64(begin_nvl
,
2452 "resume_offset", &resume_off
) != 0) {
2453 return (SET_ERROR(EINVAL
));
2455 VERIFY0(zap_lookup(mos
, dsobj
,
2456 DS_FIELD_RESUME_OBJECT
, sizeof (val
), 1, &val
));
2457 if (resume_obj
!= val
)
2458 return (SET_ERROR(EINVAL
));
2459 VERIFY0(zap_lookup(mos
, dsobj
,
2460 DS_FIELD_RESUME_OFFSET
, sizeof (val
), 1, &val
));
2461 if (resume_off
!= val
)
2462 return (SET_ERROR(EINVAL
));
2468 * Read in the stream's records, one by one, and apply them to the pool. There
2469 * are two threads involved; the thread that calls this function will spin up a
2470 * worker thread, read the records off the stream one by one, and issue
2471 * prefetches for any necessary indirect blocks. It will then push the records
2472 * onto an internal blocking queue. The worker thread will pull the records off
2473 * the queue, and actually write the data into the DMU. This way, the worker
2474 * thread doesn't have to wait for reads to complete, since everything it needs
2475 * (the indirect blocks) will be prefetched.
2477 * NB: callers *must* call dmu_recv_end() if this succeeds.
2480 dmu_recv_stream(dmu_recv_cookie_t
*drc
, vnode_t
*vp
, offset_t
*voffp
,
2481 int cleanup_fd
, uint64_t *action_handlep
)
2484 struct receive_arg
*ra
;
2485 struct receive_writer_arg
*rwa
;
2487 uint32_t payloadlen
;
2489 nvlist_t
*begin_nvl
= NULL
;
2491 ra
= kmem_zalloc(sizeof (*ra
), KM_SLEEP
);
2492 rwa
= kmem_zalloc(sizeof (*rwa
), KM_SLEEP
);
2494 ra
->byteswap
= drc
->drc_byteswap
;
2495 ra
->raw
= drc
->drc_raw
;
2496 ra
->cksum
= drc
->drc_cksum
;
2500 if (dsl_dataset_is_zapified(drc
->drc_ds
)) {
2501 (void) zap_lookup(drc
->drc_ds
->ds_dir
->dd_pool
->dp_meta_objset
,
2502 drc
->drc_ds
->ds_object
, DS_FIELD_RESUME_BYTES
,
2503 sizeof (ra
->bytes_read
), 1, &ra
->bytes_read
);
2506 objlist_create(&ra
->ignore_objlist
);
2508 /* these were verified in dmu_recv_begin */
2509 ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc
->drc_drrb
->drr_versioninfo
), ==,
2511 ASSERT3U(drc
->drc_drrb
->drr_type
, <, DMU_OST_NUMTYPES
);
2514 * Open the objset we are modifying.
2516 VERIFY0(dmu_objset_from_ds(drc
->drc_ds
, &ra
->os
));
2518 ASSERT(dsl_dataset_phys(drc
->drc_ds
)->ds_flags
& DS_FLAG_INCONSISTENT
);
2520 featureflags
= DMU_GET_FEATUREFLAGS(drc
->drc_drrb
->drr_versioninfo
);
2521 ra
->featureflags
= featureflags
;
2523 ASSERT0(ra
->os
->os_encrypted
&&
2524 (featureflags
& DMU_BACKUP_FEATURE_EMBED_DATA
));
2526 /* if this stream is dedup'ed, set up the avl tree for guid mapping */
2527 if (featureflags
& DMU_BACKUP_FEATURE_DEDUP
) {
2530 if (cleanup_fd
== -1) {
2531 err
= SET_ERROR(EBADF
);
2534 err
= zfs_onexit_fd_hold(cleanup_fd
, &minor
);
2540 if (*action_handlep
== 0) {
2541 rwa
->guid_to_ds_map
=
2542 kmem_alloc(sizeof (avl_tree_t
), KM_SLEEP
);
2543 avl_create(rwa
->guid_to_ds_map
, guid_compare
,
2544 sizeof (guid_map_entry_t
),
2545 offsetof(guid_map_entry_t
, avlnode
));
2546 err
= zfs_onexit_add_cb(minor
,
2547 free_guid_map_onexit
, rwa
->guid_to_ds_map
,
2552 err
= zfs_onexit_cb_data(minor
, *action_handlep
,
2553 (void **)&rwa
->guid_to_ds_map
);
2558 drc
->drc_guid_to_ds_map
= rwa
->guid_to_ds_map
;
2561 payloadlen
= drc
->drc_drr_begin
->drr_payloadlen
;
2563 if (payloadlen
!= 0)
2564 payload
= kmem_alloc(payloadlen
, KM_SLEEP
);
2566 err
= receive_read_payload_and_next_header(ra
, payloadlen
, payload
);
2568 if (payloadlen
!= 0)
2569 kmem_free(payload
, payloadlen
);
2572 if (payloadlen
!= 0) {
2573 err
= nvlist_unpack(payload
, payloadlen
, &begin_nvl
, KM_SLEEP
);
2574 kmem_free(payload
, payloadlen
);
2579 /* handle DSL encryption key payload */
2580 if (featureflags
& DMU_BACKUP_FEATURE_RAW
) {
2581 nvlist_t
*keynvl
= NULL
;
2583 ASSERT(ra
->os
->os_encrypted
);
2584 ASSERT(drc
->drc_raw
);
2586 err
= nvlist_lookup_nvlist(begin_nvl
, "crypt_keydata", &keynvl
);
2591 * If this is a new dataset we set the key immediately.
2592 * Otherwise we don't want to change the key until we
2593 * are sure the rest of the receive succeeded so we stash
2594 * the keynvl away until then.
2596 err
= dsl_crypto_recv_raw(spa_name(ra
->os
->os_spa
),
2597 drc
->drc_ds
->ds_object
, drc
->drc_fromsnapobj
,
2598 drc
->drc_drrb
->drr_type
, keynvl
, drc
->drc_newfs
);
2602 /* see comment in dmu_recv_end_sync() */
2603 drc
->drc_ivset_guid
= 0;
2604 (void) nvlist_lookup_uint64(keynvl
, "to_ivset_guid",
2605 &drc
->drc_ivset_guid
);
2607 if (!drc
->drc_newfs
)
2608 drc
->drc_keynvl
= fnvlist_dup(keynvl
);
2611 if (featureflags
& DMU_BACKUP_FEATURE_RESUMING
) {
2612 err
= resume_check(ra
, begin_nvl
);
2617 (void) bqueue_init(&rwa
->q
,
2618 MAX(zfs_recv_queue_length
, 2 * zfs_max_recordsize
),
2619 offsetof(struct receive_record_arg
, node
));
2620 cv_init(&rwa
->cv
, NULL
, CV_DEFAULT
, NULL
);
2621 mutex_init(&rwa
->mutex
, NULL
, MUTEX_DEFAULT
, NULL
);
2623 rwa
->byteswap
= drc
->drc_byteswap
;
2624 rwa
->resumable
= drc
->drc_resumable
;
2625 rwa
->raw
= drc
->drc_raw
;
2626 rwa
->spill
= drc
->drc_spill
;
2627 rwa
->os
->os_raw_receive
= drc
->drc_raw
;
2629 (void) thread_create(NULL
, 0, receive_writer_thread
, rwa
, 0, curproc
,
2630 TS_RUN
, minclsyspri
);
2632 * We're reading rwa->err without locks, which is safe since we are the
2633 * only reader, and the worker thread is the only writer. It's ok if we
2634 * miss a write for an iteration or two of the loop, since the writer
2635 * thread will keep freeing records we send it until we send it an eos
2638 * We can leave this loop in 3 ways: First, if rwa->err is
2639 * non-zero. In that case, the writer thread will free the rrd we just
2640 * pushed. Second, if we're interrupted; in that case, either it's the
2641 * first loop and ra->rrd was never allocated, or it's later and ra->rrd
2642 * has been handed off to the writer thread who will free it. Finally,
2643 * if receive_read_record fails or we're at the end of the stream, then
2644 * we free ra->rrd and exit.
2646 while (rwa
->err
== 0) {
2647 if (issig(JUSTLOOKING
) && issig(FORREAL
)) {
2648 err
= SET_ERROR(EINTR
);
2652 ASSERT3P(ra
->rrd
, ==, NULL
);
2653 ra
->rrd
= ra
->next_rrd
;
2654 ra
->next_rrd
= NULL
;
2655 /* Allocates and loads header into ra->next_rrd */
2656 err
= receive_read_record(ra
);
2658 if (ra
->rrd
->header
.drr_type
== DRR_END
|| err
!= 0) {
2659 kmem_free(ra
->rrd
, sizeof (*ra
->rrd
));
2664 bqueue_enqueue(&rwa
->q
, ra
->rrd
,
2665 sizeof (struct receive_record_arg
) + ra
->rrd
->payload_size
);
2668 ASSERT3P(ra
->rrd
, ==, NULL
);
2669 ra
->rrd
= kmem_zalloc(sizeof (*ra
->rrd
), KM_SLEEP
);
2670 ra
->rrd
->eos_marker
= B_TRUE
;
2671 bqueue_enqueue(&rwa
->q
, ra
->rrd
, 1);
2673 mutex_enter(&rwa
->mutex
);
2674 while (!rwa
->done
) {
2675 cv_wait(&rwa
->cv
, &rwa
->mutex
);
2677 mutex_exit(&rwa
->mutex
);
2680 * If we are receiving a full stream as a clone, all object IDs which
2681 * are greater than the maximum ID referenced in the stream are
2682 * by definition unused and must be freed.
2684 if (drc
->drc_clone
&& drc
->drc_drrb
->drr_fromguid
== 0) {
2685 uint64_t obj
= rwa
->max_object
+ 1;
2689 while (next_err
== 0) {
2690 free_err
= dmu_free_long_object(rwa
->os
, obj
);
2691 if (free_err
!= 0 && free_err
!= ENOENT
)
2694 next_err
= dmu_object_next(rwa
->os
, &obj
, FALSE
, 0);
2698 if (free_err
!= 0 && free_err
!= ENOENT
)
2700 else if (next_err
!= ESRCH
)
2705 cv_destroy(&rwa
->cv
);
2706 mutex_destroy(&rwa
->mutex
);
2707 bqueue_destroy(&rwa
->q
);
2713 * If we hit an error before we started the receive_writer_thread
2714 * we need to clean up the next_rrd we create by processing the
2717 if (ra
->next_rrd
!= NULL
)
2718 kmem_free(ra
->next_rrd
, sizeof (*ra
->next_rrd
));
2720 nvlist_free(begin_nvl
);
2721 if ((featureflags
& DMU_BACKUP_FEATURE_DEDUP
) && (cleanup_fd
!= -1))
2722 zfs_onexit_fd_rele(cleanup_fd
);
2726 * Clean up references. If receive is not resumable,
2727 * destroy what we created, so we don't leave it in
2728 * the inconsistent state.
2730 dmu_recv_cleanup_ds(drc
);
2731 nvlist_free(drc
->drc_keynvl
);
2735 objlist_destroy(&ra
->ignore_objlist
);
2736 kmem_free(ra
, sizeof (*ra
));
2737 kmem_free(rwa
, sizeof (*rwa
));
2742 dmu_recv_end_check(void *arg
, dmu_tx_t
*tx
)
2744 dmu_recv_cookie_t
*drc
= arg
;
2745 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
2748 ASSERT3P(drc
->drc_ds
->ds_owner
, ==, dmu_recv_tag
);
2750 if (!drc
->drc_newfs
) {
2751 dsl_dataset_t
*origin_head
;
2753 error
= dsl_dataset_hold(dp
, drc
->drc_tofs
, FTAG
, &origin_head
);
2756 if (drc
->drc_force
) {
2758 * We will destroy any snapshots in tofs (i.e. before
2759 * origin_head) that are after the origin (which is
2760 * the snap before drc_ds, because drc_ds can not
2761 * have any snaps of its own).
2765 obj
= dsl_dataset_phys(origin_head
)->ds_prev_snap_obj
;
2767 dsl_dataset_phys(drc
->drc_ds
)->ds_prev_snap_obj
) {
2768 dsl_dataset_t
*snap
;
2769 error
= dsl_dataset_hold_obj(dp
, obj
, FTAG
,
2773 if (snap
->ds_dir
!= origin_head
->ds_dir
)
2774 error
= SET_ERROR(EINVAL
);
2776 error
= dsl_destroy_snapshot_check_impl(
2779 obj
= dsl_dataset_phys(snap
)->ds_prev_snap_obj
;
2780 dsl_dataset_rele(snap
, FTAG
);
2785 dsl_dataset_rele(origin_head
, FTAG
);
2789 if (drc
->drc_keynvl
!= NULL
) {
2790 error
= dsl_crypto_recv_raw_key_check(drc
->drc_ds
,
2791 drc
->drc_keynvl
, tx
);
2793 dsl_dataset_rele(origin_head
, FTAG
);
2798 error
= dsl_dataset_clone_swap_check_impl(drc
->drc_ds
,
2799 origin_head
, drc
->drc_force
, drc
->drc_owner
, tx
);
2801 dsl_dataset_rele(origin_head
, FTAG
);
2804 error
= dsl_dataset_snapshot_check_impl(origin_head
,
2805 drc
->drc_tosnap
, tx
, B_TRUE
, 1, drc
->drc_cred
);
2806 dsl_dataset_rele(origin_head
, FTAG
);
2810 error
= dsl_destroy_head_check_impl(drc
->drc_ds
, 1);
2812 error
= dsl_dataset_snapshot_check_impl(drc
->drc_ds
,
2813 drc
->drc_tosnap
, tx
, B_TRUE
, 1, drc
->drc_cred
);
2819 dmu_recv_end_sync(void *arg
, dmu_tx_t
*tx
)
2821 dmu_recv_cookie_t
*drc
= arg
;
2822 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
2823 boolean_t encrypted
= drc
->drc_ds
->ds_dir
->dd_crypto_obj
!= 0;
2825 spa_history_log_internal_ds(drc
->drc_ds
, "finish receiving",
2826 tx
, "snap=%s", drc
->drc_tosnap
);
2827 drc
->drc_ds
->ds_objset
->os_raw_receive
= B_FALSE
;
2829 if (!drc
->drc_newfs
) {
2830 dsl_dataset_t
*origin_head
;
2832 VERIFY0(dsl_dataset_hold(dp
, drc
->drc_tofs
, FTAG
,
2835 if (drc
->drc_force
) {
2837 * Destroy any snapshots of drc_tofs (origin_head)
2838 * after the origin (the snap before drc_ds).
2842 obj
= dsl_dataset_phys(origin_head
)->ds_prev_snap_obj
;
2844 dsl_dataset_phys(drc
->drc_ds
)->ds_prev_snap_obj
) {
2845 dsl_dataset_t
*snap
;
2846 VERIFY0(dsl_dataset_hold_obj(dp
, obj
, FTAG
,
2848 ASSERT3P(snap
->ds_dir
, ==, origin_head
->ds_dir
);
2849 obj
= dsl_dataset_phys(snap
)->ds_prev_snap_obj
;
2850 dsl_destroy_snapshot_sync_impl(snap
,
2852 dsl_dataset_rele(snap
, FTAG
);
2855 if (drc
->drc_keynvl
!= NULL
) {
2856 dsl_crypto_recv_raw_key_sync(drc
->drc_ds
,
2857 drc
->drc_keynvl
, tx
);
2858 nvlist_free(drc
->drc_keynvl
);
2859 drc
->drc_keynvl
= NULL
;
2862 VERIFY3P(drc
->drc_ds
->ds_prev
, ==, origin_head
->ds_prev
);
2864 dsl_dataset_clone_swap_sync_impl(drc
->drc_ds
,
2866 dsl_dataset_snapshot_sync_impl(origin_head
,
2867 drc
->drc_tosnap
, tx
);
2869 /* set snapshot's creation time and guid */
2870 dmu_buf_will_dirty(origin_head
->ds_prev
->ds_dbuf
, tx
);
2871 dsl_dataset_phys(origin_head
->ds_prev
)->ds_creation_time
=
2872 drc
->drc_drrb
->drr_creation_time
;
2873 dsl_dataset_phys(origin_head
->ds_prev
)->ds_guid
=
2874 drc
->drc_drrb
->drr_toguid
;
2875 dsl_dataset_phys(origin_head
->ds_prev
)->ds_flags
&=
2876 ~DS_FLAG_INCONSISTENT
;
2878 dmu_buf_will_dirty(origin_head
->ds_dbuf
, tx
);
2879 dsl_dataset_phys(origin_head
)->ds_flags
&=
2880 ~DS_FLAG_INCONSISTENT
;
2882 drc
->drc_newsnapobj
=
2883 dsl_dataset_phys(origin_head
)->ds_prev_snap_obj
;
2885 dsl_dataset_rele(origin_head
, FTAG
);
2886 dsl_destroy_head_sync_impl(drc
->drc_ds
, tx
);
2888 if (drc
->drc_owner
!= NULL
)
2889 VERIFY3P(origin_head
->ds_owner
, ==, drc
->drc_owner
);
2891 dsl_dataset_t
*ds
= drc
->drc_ds
;
2893 dsl_dataset_snapshot_sync_impl(ds
, drc
->drc_tosnap
, tx
);
2895 /* set snapshot's creation time and guid */
2896 dmu_buf_will_dirty(ds
->ds_prev
->ds_dbuf
, tx
);
2897 dsl_dataset_phys(ds
->ds_prev
)->ds_creation_time
=
2898 drc
->drc_drrb
->drr_creation_time
;
2899 dsl_dataset_phys(ds
->ds_prev
)->ds_guid
=
2900 drc
->drc_drrb
->drr_toguid
;
2901 dsl_dataset_phys(ds
->ds_prev
)->ds_flags
&=
2902 ~DS_FLAG_INCONSISTENT
;
2904 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
2905 dsl_dataset_phys(ds
)->ds_flags
&= ~DS_FLAG_INCONSISTENT
;
2906 if (dsl_dataset_has_resume_receive_state(ds
)) {
2907 (void) zap_remove(dp
->dp_meta_objset
, ds
->ds_object
,
2908 DS_FIELD_RESUME_FROMGUID
, tx
);
2909 (void) zap_remove(dp
->dp_meta_objset
, ds
->ds_object
,
2910 DS_FIELD_RESUME_OBJECT
, tx
);
2911 (void) zap_remove(dp
->dp_meta_objset
, ds
->ds_object
,
2912 DS_FIELD_RESUME_OFFSET
, tx
);
2913 (void) zap_remove(dp
->dp_meta_objset
, ds
->ds_object
,
2914 DS_FIELD_RESUME_BYTES
, tx
);
2915 (void) zap_remove(dp
->dp_meta_objset
, ds
->ds_object
,
2916 DS_FIELD_RESUME_TOGUID
, tx
);
2917 (void) zap_remove(dp
->dp_meta_objset
, ds
->ds_object
,
2918 DS_FIELD_RESUME_TONAME
, tx
);
2920 drc
->drc_newsnapobj
=
2921 dsl_dataset_phys(drc
->drc_ds
)->ds_prev_snap_obj
;
2925 * If this is a raw receive, the crypt_keydata nvlist will include
2926 * a to_ivset_guid for us to set on the new snapshot. This value
2927 * will override the value generated by the snapshot code. However,
2928 * this value may not be present, because older implementations of
2929 * the raw send code did not include this value, and we are still
2930 * allowed to receive them if the zfs_disable_ivset_guid_check
2931 * tunable is set, in which case we will leave the newly-generated
2934 if (drc
->drc_raw
&& drc
->drc_ivset_guid
!= 0) {
2935 dmu_object_zapify(dp
->dp_meta_objset
, drc
->drc_newsnapobj
,
2936 DMU_OT_DSL_DATASET
, tx
);
2937 VERIFY0(zap_update(dp
->dp_meta_objset
, drc
->drc_newsnapobj
,
2938 DS_FIELD_IVSET_GUID
, sizeof (uint64_t), 1,
2939 &drc
->drc_ivset_guid
, tx
));
2942 zvol_create_minors(dp
->dp_spa
, drc
->drc_tofs
, B_TRUE
);
2945 * Release the hold from dmu_recv_begin. This must be done before
2946 * we return to open context, so that when we free the dataset's dnode
2947 * we can evict its bonus buffer. Since the dataset may be destroyed
2948 * at this point (and therefore won't have a valid pointer to the spa)
2949 * we release the key mapping manually here while we do have a valid
2950 * pointer, if it exists.
2952 if (!drc
->drc_raw
&& encrypted
) {
2953 (void) spa_keystore_remove_mapping(dmu_tx_pool(tx
)->dp_spa
,
2954 drc
->drc_ds
->ds_object
, drc
->drc_ds
);
2956 dsl_dataset_disown(drc
->drc_ds
, 0, dmu_recv_tag
);
2961 add_ds_to_guidmap(const char *name
, avl_tree_t
*guid_map
, uint64_t snapobj
,
2965 dsl_dataset_t
*snapds
;
2966 guid_map_entry_t
*gmep
;
2968 ds_hold_flags_t dsflags
= (raw
) ? 0 : DS_HOLD_FLAG_DECRYPT
;
2971 ASSERT(guid_map
!= NULL
);
2973 err
= dsl_pool_hold(name
, FTAG
, &dp
);
2976 gmep
= kmem_alloc(sizeof (*gmep
), KM_SLEEP
);
2977 err
= dsl_dataset_own_obj(dp
, snapobj
, dsflags
, gmep
, &snapds
);
2980 * If this is a deduplicated raw send stream, we need
2981 * to make sure that we can still read raw blocks from
2982 * earlier datasets in the stream, so we set the
2983 * os_raw_receive flag now.
2986 err
= dmu_objset_from_ds(snapds
, &os
);
2988 dsl_dataset_disown(snapds
, dsflags
, FTAG
);
2989 dsl_pool_rele(dp
, FTAG
);
2990 kmem_free(gmep
, sizeof (*gmep
));
2993 os
->os_raw_receive
= B_TRUE
;
2997 gmep
->guid
= dsl_dataset_phys(snapds
)->ds_guid
;
2998 gmep
->gme_ds
= snapds
;
2999 avl_add(guid_map
, gmep
);
3001 kmem_free(gmep
, sizeof (*gmep
));
3004 dsl_pool_rele(dp
, FTAG
);
3008 static int dmu_recv_end_modified_blocks
= 3;
3011 dmu_recv_existing_end(dmu_recv_cookie_t
*drc
)
3015 * We will be destroying the ds; make sure its origin is unmounted if
3018 char name
[ZFS_MAX_DATASET_NAME_LEN
];
3019 dsl_dataset_name(drc
->drc_ds
, name
);
3020 zfs_destroy_unmount_origin(name
);
3023 return (dsl_sync_task(drc
->drc_tofs
,
3024 dmu_recv_end_check
, dmu_recv_end_sync
, drc
,
3025 dmu_recv_end_modified_blocks
, ZFS_SPACE_CHECK_NORMAL
));
3029 dmu_recv_new_end(dmu_recv_cookie_t
*drc
)
3031 return (dsl_sync_task(drc
->drc_tofs
,
3032 dmu_recv_end_check
, dmu_recv_end_sync
, drc
,
3033 dmu_recv_end_modified_blocks
, ZFS_SPACE_CHECK_NORMAL
));
3037 dmu_recv_end(dmu_recv_cookie_t
*drc
, void *owner
)
3041 drc
->drc_owner
= owner
;
3044 error
= dmu_recv_new_end(drc
);
3046 error
= dmu_recv_existing_end(drc
);
3049 dmu_recv_cleanup_ds(drc
);
3050 nvlist_free(drc
->drc_keynvl
);
3051 } else if (drc
->drc_guid_to_ds_map
!= NULL
) {
3052 (void) add_ds_to_guidmap(drc
->drc_tofs
, drc
->drc_guid_to_ds_map
,
3053 drc
->drc_newsnapobj
, drc
->drc_raw
);
3059 * Return TRUE if this objset is currently being received into.
3062 dmu_objset_is_receiving(objset_t
*os
)
3064 return (os
->os_dsl_dataset
!= NULL
&&
3065 os
->os_dsl_dataset
->ds_owner
== dmu_recv_tag
);
3068 #if defined(_KERNEL)
3069 module_param(zfs_recv_queue_length
, int, 0644);
3070 MODULE_PARM_DESC(zfs_recv_queue_length
, "Maximum receive queue length");