4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011 by Delphix. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2011 by Delphix. All rights reserved.
26 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
30 #include <sys/dmu_impl.h>
31 #include <sys/dmu_tx.h>
33 #include <sys/dnode.h>
34 #include <sys/zfs_context.h>
35 #include <sys/dmu_objset.h>
36 #include <sys/dmu_traverse.h>
37 #include <sys/dsl_dataset.h>
38 #include <sys/dsl_dir.h>
39 #include <sys/dsl_prop.h>
40 #include <sys/dsl_pool.h>
41 #include <sys/dsl_synctask.h>
42 #include <sys/zfs_ioctl.h>
44 #include <sys/zio_checksum.h>
45 #include <sys/zfs_znode.h>
46 #include <zfs_fletcher.h>
49 #include <sys/zfs_onexit.h>
51 /* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */
52 int zfs_send_corrupt_data
= B_FALSE
;
54 static char *dmu_recv_tag
= "dmu_recv_tag";
57 dump_bytes(dmu_sendarg_t
*dsp
, void *buf
, int len
)
59 dsl_dataset_t
*ds
= dsp
->dsa_os
->os_dsl_dataset
;
60 ssize_t resid
; /* have to get resid to get detailed errno */
61 ASSERT3U(len
% 8, ==, 0);
63 fletcher_4_incremental_native(buf
, len
, &dsp
->dsa_zc
);
64 dsp
->dsa_err
= vn_rdwr(UIO_WRITE
, dsp
->dsa_vp
,
66 0, UIO_SYSSPACE
, FAPPEND
, RLIM64_INFINITY
, CRED(), &resid
);
68 mutex_enter(&ds
->ds_sendstream_lock
);
70 mutex_exit(&ds
->ds_sendstream_lock
);
72 return (dsp
->dsa_err
);
76 dump_free(dmu_sendarg_t
*dsp
, uint64_t object
, uint64_t offset
,
79 struct drr_free
*drrf
= &(dsp
->dsa_drr
->drr_u
.drr_free
);
81 if (length
!= -1ULL && offset
+ length
< offset
)
85 * If there is a pending op, but it's not PENDING_FREE, push it out,
86 * since free block aggregation can only be done for blocks of the
87 * same type (i.e., DRR_FREE records can only be aggregated with
88 * other DRR_FREE records. DRR_FREEOBJECTS records can only be
89 * aggregated with other DRR_FREEOBJECTS records.
91 if (dsp
->dsa_pending_op
!= PENDING_NONE
&&
92 dsp
->dsa_pending_op
!= PENDING_FREE
) {
93 if (dump_bytes(dsp
, dsp
->dsa_drr
,
94 sizeof (dmu_replay_record_t
)) != 0)
96 dsp
->dsa_pending_op
= PENDING_NONE
;
99 if (dsp
->dsa_pending_op
== PENDING_FREE
) {
101 * There should never be a PENDING_FREE if length is -1
102 * (because dump_dnode is the only place where this
103 * function is called with a -1, and only after flushing
104 * any pending record).
106 ASSERT(length
!= -1ULL);
108 * Check to see whether this free block can be aggregated
111 if (drrf
->drr_object
== object
&& drrf
->drr_offset
+
112 drrf
->drr_length
== offset
) {
113 drrf
->drr_length
+= length
;
116 /* not a continuation. Push out pending record */
117 if (dump_bytes(dsp
, dsp
->dsa_drr
,
118 sizeof (dmu_replay_record_t
)) != 0)
120 dsp
->dsa_pending_op
= PENDING_NONE
;
123 /* create a FREE record and make it pending */
124 bzero(dsp
->dsa_drr
, sizeof (dmu_replay_record_t
));
125 dsp
->dsa_drr
->drr_type
= DRR_FREE
;
126 drrf
->drr_object
= object
;
127 drrf
->drr_offset
= offset
;
128 drrf
->drr_length
= length
;
129 drrf
->drr_toguid
= dsp
->dsa_toguid
;
130 if (length
== -1ULL) {
131 if (dump_bytes(dsp
, dsp
->dsa_drr
,
132 sizeof (dmu_replay_record_t
)) != 0)
135 dsp
->dsa_pending_op
= PENDING_FREE
;
142 dump_data(dmu_sendarg_t
*dsp
, dmu_object_type_t type
,
143 uint64_t object
, uint64_t offset
, int blksz
, const blkptr_t
*bp
, void *data
)
145 struct drr_write
*drrw
= &(dsp
->dsa_drr
->drr_u
.drr_write
);
149 * If there is any kind of pending aggregation (currently either
150 * a grouping of free objects or free blocks), push it out to
151 * the stream, since aggregation can't be done across operations
152 * of different types.
154 if (dsp
->dsa_pending_op
!= PENDING_NONE
) {
155 if (dump_bytes(dsp
, dsp
->dsa_drr
,
156 sizeof (dmu_replay_record_t
)) != 0)
158 dsp
->dsa_pending_op
= PENDING_NONE
;
160 /* write a DATA record */
161 bzero(dsp
->dsa_drr
, sizeof (dmu_replay_record_t
));
162 dsp
->dsa_drr
->drr_type
= DRR_WRITE
;
163 drrw
->drr_object
= object
;
164 drrw
->drr_type
= type
;
165 drrw
->drr_offset
= offset
;
166 drrw
->drr_length
= blksz
;
167 drrw
->drr_toguid
= dsp
->dsa_toguid
;
168 drrw
->drr_checksumtype
= BP_GET_CHECKSUM(bp
);
169 if (zio_checksum_table
[drrw
->drr_checksumtype
].ci_dedup
)
170 drrw
->drr_checksumflags
|= DRR_CHECKSUM_DEDUP
;
171 DDK_SET_LSIZE(&drrw
->drr_key
, BP_GET_LSIZE(bp
));
172 DDK_SET_PSIZE(&drrw
->drr_key
, BP_GET_PSIZE(bp
));
173 DDK_SET_COMPRESS(&drrw
->drr_key
, BP_GET_COMPRESS(bp
));
174 drrw
->drr_key
.ddk_cksum
= bp
->blk_cksum
;
176 if (dump_bytes(dsp
, dsp
->dsa_drr
, sizeof (dmu_replay_record_t
)) != 0)
178 if (dump_bytes(dsp
, data
, blksz
) != 0)
184 dump_spill(dmu_sendarg_t
*dsp
, uint64_t object
, int blksz
, void *data
)
186 struct drr_spill
*drrs
= &(dsp
->dsa_drr
->drr_u
.drr_spill
);
188 if (dsp
->dsa_pending_op
!= PENDING_NONE
) {
189 if (dump_bytes(dsp
, dsp
->dsa_drr
,
190 sizeof (dmu_replay_record_t
)) != 0)
192 dsp
->dsa_pending_op
= PENDING_NONE
;
195 /* write a SPILL record */
196 bzero(dsp
->dsa_drr
, sizeof (dmu_replay_record_t
));
197 dsp
->dsa_drr
->drr_type
= DRR_SPILL
;
198 drrs
->drr_object
= object
;
199 drrs
->drr_length
= blksz
;
200 drrs
->drr_toguid
= dsp
->dsa_toguid
;
202 if (dump_bytes(dsp
, dsp
->dsa_drr
, sizeof (dmu_replay_record_t
)))
204 if (dump_bytes(dsp
, data
, blksz
))
210 dump_freeobjects(dmu_sendarg_t
*dsp
, uint64_t firstobj
, uint64_t numobjs
)
212 struct drr_freeobjects
*drrfo
= &(dsp
->dsa_drr
->drr_u
.drr_freeobjects
);
215 * If there is a pending op, but it's not PENDING_FREEOBJECTS,
216 * push it out, since free block aggregation can only be done for
217 * blocks of the same type (i.e., DRR_FREE records can only be
218 * aggregated with other DRR_FREE records. DRR_FREEOBJECTS records
219 * can only be aggregated with other DRR_FREEOBJECTS records.
221 if (dsp
->dsa_pending_op
!= PENDING_NONE
&&
222 dsp
->dsa_pending_op
!= PENDING_FREEOBJECTS
) {
223 if (dump_bytes(dsp
, dsp
->dsa_drr
,
224 sizeof (dmu_replay_record_t
)) != 0)
226 dsp
->dsa_pending_op
= PENDING_NONE
;
228 if (dsp
->dsa_pending_op
== PENDING_FREEOBJECTS
) {
230 * See whether this free object array can be aggregated
233 if (drrfo
->drr_firstobj
+ drrfo
->drr_numobjs
== firstobj
) {
234 drrfo
->drr_numobjs
+= numobjs
;
237 /* can't be aggregated. Push out pending record */
238 if (dump_bytes(dsp
, dsp
->dsa_drr
,
239 sizeof (dmu_replay_record_t
)) != 0)
241 dsp
->dsa_pending_op
= PENDING_NONE
;
245 /* write a FREEOBJECTS record */
246 bzero(dsp
->dsa_drr
, sizeof (dmu_replay_record_t
));
247 dsp
->dsa_drr
->drr_type
= DRR_FREEOBJECTS
;
248 drrfo
->drr_firstobj
= firstobj
;
249 drrfo
->drr_numobjs
= numobjs
;
250 drrfo
->drr_toguid
= dsp
->dsa_toguid
;
252 dsp
->dsa_pending_op
= PENDING_FREEOBJECTS
;
258 dump_dnode(dmu_sendarg_t
*dsp
, uint64_t object
, dnode_phys_t
*dnp
)
260 struct drr_object
*drro
= &(dsp
->dsa_drr
->drr_u
.drr_object
);
262 if (dnp
== NULL
|| dnp
->dn_type
== DMU_OT_NONE
)
263 return (dump_freeobjects(dsp
, object
, 1));
265 if (dsp
->dsa_pending_op
!= PENDING_NONE
) {
266 if (dump_bytes(dsp
, dsp
->dsa_drr
,
267 sizeof (dmu_replay_record_t
)) != 0)
269 dsp
->dsa_pending_op
= PENDING_NONE
;
272 /* write an OBJECT record */
273 bzero(dsp
->dsa_drr
, sizeof (dmu_replay_record_t
));
274 dsp
->dsa_drr
->drr_type
= DRR_OBJECT
;
275 drro
->drr_object
= object
;
276 drro
->drr_type
= dnp
->dn_type
;
277 drro
->drr_bonustype
= dnp
->dn_bonustype
;
278 drro
->drr_blksz
= dnp
->dn_datablkszsec
<< SPA_MINBLOCKSHIFT
;
279 drro
->drr_bonuslen
= dnp
->dn_bonuslen
;
280 drro
->drr_checksumtype
= dnp
->dn_checksum
;
281 drro
->drr_compress
= dnp
->dn_compress
;
282 drro
->drr_toguid
= dsp
->dsa_toguid
;
284 if (dump_bytes(dsp
, dsp
->dsa_drr
, sizeof (dmu_replay_record_t
)) != 0)
287 if (dump_bytes(dsp
, DN_BONUS(dnp
), P2ROUNDUP(dnp
->dn_bonuslen
, 8)) != 0)
290 /* free anything past the end of the file */
291 if (dump_free(dsp
, object
, (dnp
->dn_maxblkid
+ 1) *
292 (dnp
->dn_datablkszsec
<< SPA_MINBLOCKSHIFT
), -1ULL))
299 #define BP_SPAN(dnp, level) \
300 (((uint64_t)dnp->dn_datablkszsec) << (SPA_MINBLOCKSHIFT + \
301 (level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT)))
305 backup_cb(spa_t
*spa
, zilog_t
*zilog
, const blkptr_t
*bp
, arc_buf_t
*pbuf
,
306 const zbookmark_t
*zb
, const dnode_phys_t
*dnp
, void *arg
)
308 dmu_sendarg_t
*dsp
= arg
;
309 dmu_object_type_t type
= bp
? BP_GET_TYPE(bp
) : DMU_OT_NONE
;
312 if (issig(JUSTLOOKING
) && issig(FORREAL
))
315 if (zb
->zb_object
!= DMU_META_DNODE_OBJECT
&&
316 DMU_OBJECT_IS_SPECIAL(zb
->zb_object
)) {
318 } else if (bp
== NULL
&& zb
->zb_object
== DMU_META_DNODE_OBJECT
) {
319 uint64_t span
= BP_SPAN(dnp
, zb
->zb_level
);
320 uint64_t dnobj
= (zb
->zb_blkid
* span
) >> DNODE_SHIFT
;
321 err
= dump_freeobjects(dsp
, dnobj
, span
>> DNODE_SHIFT
);
322 } else if (bp
== NULL
) {
323 uint64_t span
= BP_SPAN(dnp
, zb
->zb_level
);
324 err
= dump_free(dsp
, zb
->zb_object
, zb
->zb_blkid
* span
, span
);
325 } else if (zb
->zb_level
> 0 || type
== DMU_OT_OBJSET
) {
327 } else if (type
== DMU_OT_DNODE
) {
330 int blksz
= BP_GET_LSIZE(bp
);
331 uint32_t aflags
= ARC_WAIT
;
334 if (dsl_read(NULL
, spa
, bp
, pbuf
,
335 arc_getbuf_func
, &abuf
, ZIO_PRIORITY_ASYNC_READ
,
336 ZIO_FLAG_CANFAIL
, &aflags
, zb
) != 0)
340 for (i
= 0; i
< blksz
>> DNODE_SHIFT
; i
++) {
341 uint64_t dnobj
= (zb
->zb_blkid
<<
342 (DNODE_BLOCK_SHIFT
- DNODE_SHIFT
)) + i
;
343 err
= dump_dnode(dsp
, dnobj
, blk
+i
);
347 (void) arc_buf_remove_ref(abuf
, &abuf
);
348 } else if (type
== DMU_OT_SA
) {
349 uint32_t aflags
= ARC_WAIT
;
351 int blksz
= BP_GET_LSIZE(bp
);
353 if (arc_read_nolock(NULL
, spa
, bp
,
354 arc_getbuf_func
, &abuf
, ZIO_PRIORITY_ASYNC_READ
,
355 ZIO_FLAG_CANFAIL
, &aflags
, zb
) != 0)
358 err
= dump_spill(dsp
, zb
->zb_object
, blksz
, abuf
->b_data
);
359 (void) arc_buf_remove_ref(abuf
, &abuf
);
360 } else { /* it's a level-0 block of a regular object */
361 uint32_t aflags
= ARC_WAIT
;
363 int blksz
= BP_GET_LSIZE(bp
);
365 if (dsl_read(NULL
, spa
, bp
, pbuf
,
366 arc_getbuf_func
, &abuf
, ZIO_PRIORITY_ASYNC_READ
,
367 ZIO_FLAG_CANFAIL
, &aflags
, zb
) != 0) {
368 if (zfs_send_corrupt_data
) {
370 /* Send a block filled with 0x"zfs badd bloc" */
371 abuf
= arc_buf_alloc(spa
, blksz
, &abuf
,
373 for (ptr
= abuf
->b_data
;
374 (char *)ptr
< (char *)abuf
->b_data
+ blksz
;
376 *ptr
= 0x2f5baddb10c;
382 err
= dump_data(dsp
, type
, zb
->zb_object
, zb
->zb_blkid
* blksz
,
383 blksz
, bp
, abuf
->b_data
);
384 (void) arc_buf_remove_ref(abuf
, &abuf
);
387 ASSERT(err
== 0 || err
== EINTR
);
392 dmu_send(objset_t
*tosnap
, objset_t
*fromsnap
, boolean_t fromorigin
,
393 int outfd
, vnode_t
*vp
, offset_t
*off
)
395 dsl_dataset_t
*ds
= tosnap
->os_dsl_dataset
;
396 dsl_dataset_t
*fromds
= fromsnap
? fromsnap
->os_dsl_dataset
: NULL
;
397 dmu_replay_record_t
*drr
;
400 uint64_t fromtxg
= 0;
402 /* tosnap must be a snapshot */
403 if (ds
->ds_phys
->ds_next_snap_obj
== 0)
406 /* fromsnap must be an earlier snapshot from the same fs as tosnap */
407 if (fromds
&& (ds
->ds_dir
!= fromds
->ds_dir
||
408 fromds
->ds_phys
->ds_creation_txg
>= ds
->ds_phys
->ds_creation_txg
))
412 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
417 if (dsl_dir_is_clone(ds
->ds_dir
)) {
418 rw_enter(&dp
->dp_config_rwlock
, RW_READER
);
419 err
= dsl_dataset_hold_obj(dp
,
420 ds
->ds_dir
->dd_phys
->dd_origin_obj
, FTAG
, &fromds
);
421 rw_exit(&dp
->dp_config_rwlock
);
425 fromorigin
= B_FALSE
;
430 drr
= kmem_zalloc(sizeof (dmu_replay_record_t
), KM_SLEEP
);
431 drr
->drr_type
= DRR_BEGIN
;
432 drr
->drr_u
.drr_begin
.drr_magic
= DMU_BACKUP_MAGIC
;
433 DMU_SET_STREAM_HDRTYPE(drr
->drr_u
.drr_begin
.drr_versioninfo
,
437 if (dmu_objset_type(tosnap
) == DMU_OST_ZFS
) {
439 if (zfs_get_zplprop(tosnap
, ZFS_PROP_VERSION
, &version
) != 0) {
440 kmem_free(drr
, sizeof (dmu_replay_record_t
));
443 if (version
== ZPL_VERSION_SA
) {
444 DMU_SET_FEATUREFLAGS(
445 drr
->drr_u
.drr_begin
.drr_versioninfo
,
446 DMU_BACKUP_FEATURE_SA_SPILL
);
451 drr
->drr_u
.drr_begin
.drr_creation_time
=
452 ds
->ds_phys
->ds_creation_time
;
453 drr
->drr_u
.drr_begin
.drr_type
= tosnap
->os_phys
->os_type
;
455 drr
->drr_u
.drr_begin
.drr_flags
|= DRR_FLAG_CLONE
;
456 drr
->drr_u
.drr_begin
.drr_toguid
= ds
->ds_phys
->ds_guid
;
457 if (ds
->ds_phys
->ds_flags
& DS_FLAG_CI_DATASET
)
458 drr
->drr_u
.drr_begin
.drr_flags
|= DRR_FLAG_CI_DATA
;
461 drr
->drr_u
.drr_begin
.drr_fromguid
= fromds
->ds_phys
->ds_guid
;
462 dsl_dataset_name(ds
, drr
->drr_u
.drr_begin
.drr_toname
);
465 fromtxg
= fromds
->ds_phys
->ds_creation_txg
;
467 dsl_dataset_rele(fromds
, FTAG
);
469 dsp
= kmem_zalloc(sizeof (dmu_sendarg_t
), KM_SLEEP
);
473 dsp
->dsa_outfd
= outfd
;
474 dsp
->dsa_proc
= curproc
;
475 dsp
->dsa_os
= tosnap
;
477 dsp
->dsa_toguid
= ds
->ds_phys
->ds_guid
;
478 ZIO_SET_CHECKSUM(&dsp
->dsa_zc
, 0, 0, 0, 0);
479 dsp
->dsa_pending_op
= PENDING_NONE
;
481 mutex_enter(&ds
->ds_sendstream_lock
);
482 list_insert_head(&ds
->ds_sendstreams
, dsp
);
483 mutex_exit(&ds
->ds_sendstream_lock
);
485 if (dump_bytes(dsp
, drr
, sizeof (dmu_replay_record_t
)) != 0) {
490 err
= traverse_dataset(ds
, fromtxg
, TRAVERSE_PRE
| TRAVERSE_PREFETCH
,
493 if (dsp
->dsa_pending_op
!= PENDING_NONE
)
494 if (dump_bytes(dsp
, drr
, sizeof (dmu_replay_record_t
)) != 0)
498 if (err
== EINTR
&& dsp
->dsa_err
)
503 bzero(drr
, sizeof (dmu_replay_record_t
));
504 drr
->drr_type
= DRR_END
;
505 drr
->drr_u
.drr_end
.drr_checksum
= dsp
->dsa_zc
;
506 drr
->drr_u
.drr_end
.drr_toguid
= dsp
->dsa_toguid
;
508 if (dump_bytes(dsp
, drr
, sizeof (dmu_replay_record_t
)) != 0) {
514 mutex_enter(&ds
->ds_sendstream_lock
);
515 list_remove(&ds
->ds_sendstreams
, dsp
);
516 mutex_exit(&ds
->ds_sendstream_lock
);
518 kmem_free(drr
, sizeof (dmu_replay_record_t
));
519 kmem_free(dsp
, sizeof (dmu_sendarg_t
));
525 dmu_send_estimate(objset_t
*tosnap
, objset_t
*fromsnap
, boolean_t fromorigin
,
528 dsl_dataset_t
*ds
= tosnap
->os_dsl_dataset
;
529 dsl_dataset_t
*fromds
= fromsnap
? fromsnap
->os_dsl_dataset
: NULL
;
530 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
532 uint64_t size
, recordsize
;
534 /* tosnap must be a snapshot */
535 if (ds
->ds_phys
->ds_next_snap_obj
== 0)
538 /* fromsnap must be an earlier snapshot from the same fs as tosnap */
539 if (fromds
&& (ds
->ds_dir
!= fromds
->ds_dir
||
540 fromds
->ds_phys
->ds_creation_txg
>= ds
->ds_phys
->ds_creation_txg
))
547 if (dsl_dir_is_clone(ds
->ds_dir
)) {
548 rw_enter(&dp
->dp_config_rwlock
, RW_READER
);
549 err
= dsl_dataset_hold_obj(dp
,
550 ds
->ds_dir
->dd_phys
->dd_origin_obj
, FTAG
, &fromds
);
551 rw_exit(&dp
->dp_config_rwlock
);
555 fromorigin
= B_FALSE
;
559 /* Get uncompressed size estimate of changed data. */
560 if (fromds
== NULL
) {
561 size
= ds
->ds_phys
->ds_uncompressed_bytes
;
564 err
= dsl_dataset_space_written(fromds
, ds
,
565 &used
, &comp
, &size
);
567 dsl_dataset_rele(fromds
, FTAG
);
573 * Assume that space (both on-disk and in-stream) is dominated by
574 * data. We will adjust for indirect blocks and the copies property,
575 * but ignore per-object space used (eg, dnodes and DRR_OBJECT records).
579 * Subtract out approximate space used by indirect blocks.
580 * Assume most space is used by data blocks (non-indirect, non-dnode).
581 * Assume all blocks are recordsize. Assume ditto blocks and
582 * internal fragmentation counter out compression.
584 * Therefore, space used by indirect blocks is sizeof(blkptr_t) per
585 * block, which we observe in practice.
587 rw_enter(&dp
->dp_config_rwlock
, RW_READER
);
588 err
= dsl_prop_get_ds(ds
, "recordsize",
589 sizeof (recordsize
), 1, &recordsize
, NULL
);
590 rw_exit(&dp
->dp_config_rwlock
);
593 size
-= size
/ recordsize
* sizeof (blkptr_t
);
595 /* Add in the space for the record associated with each block. */
596 size
+= size
/ recordsize
* sizeof (dmu_replay_record_t
);
603 struct recvbeginsyncarg
{
606 dsl_dataset_t
*origin
;
608 dmu_objset_type_t type
;
612 char clonelastname
[MAXNAMELEN
];
613 dsl_dataset_t
*ds
; /* the ds to recv into; returned from the syncfunc */
619 recv_new_check(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
621 dsl_dir_t
*dd
= arg1
;
622 struct recvbeginsyncarg
*rbsa
= arg2
;
623 objset_t
*mos
= dd
->dd_pool
->dp_meta_objset
;
627 err
= zap_lookup(mos
, dd
->dd_phys
->dd_child_dir_zapobj
,
628 strrchr(rbsa
->tofs
, '/') + 1, sizeof (uint64_t), 1, &val
);
631 return (err
? err
: EEXIST
);
634 /* make sure it's a snap in the same pool */
635 if (rbsa
->origin
->ds_dir
->dd_pool
!= dd
->dd_pool
)
637 if (!dsl_dataset_is_snapshot(rbsa
->origin
))
639 if (rbsa
->origin
->ds_phys
->ds_guid
!= rbsa
->fromguid
)
647 recv_new_sync(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
649 dsl_dir_t
*dd
= arg1
;
650 struct recvbeginsyncarg
*rbsa
= arg2
;
651 uint64_t flags
= DS_FLAG_INCONSISTENT
| rbsa
->dsflags
;
654 /* Create and open new dataset. */
655 dsobj
= dsl_dataset_create_sync(dd
, strrchr(rbsa
->tofs
, '/') + 1,
656 rbsa
->origin
, flags
, rbsa
->cr
, tx
);
657 VERIFY(0 == dsl_dataset_own_obj(dd
->dd_pool
, dsobj
,
658 B_TRUE
, dmu_recv_tag
, &rbsa
->ds
));
660 if (rbsa
->origin
== NULL
) {
661 (void) dmu_objset_create_impl(dd
->dd_pool
->dp_spa
,
662 rbsa
->ds
, &rbsa
->ds
->ds_phys
->ds_bp
, rbsa
->type
, tx
);
665 spa_history_log_internal(LOG_DS_REPLAY_FULL_SYNC
,
666 dd
->dd_pool
->dp_spa
, tx
, "dataset = %lld", dsobj
);
671 recv_existing_check(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
673 dsl_dataset_t
*ds
= arg1
;
674 struct recvbeginsyncarg
*rbsa
= arg2
;
678 /* must not have any changes since most recent snapshot */
679 if (!rbsa
->force
&& dsl_dataset_modified_since_lastsnap(ds
))
682 /* new snapshot name must not exist */
683 err
= zap_lookup(ds
->ds_dir
->dd_pool
->dp_meta_objset
,
684 ds
->ds_phys
->ds_snapnames_zapobj
, rbsa
->tosnap
, 8, 1, &val
);
690 if (rbsa
->fromguid
) {
691 /* if incremental, most recent snapshot must match fromguid */
692 if (ds
->ds_prev
== NULL
)
696 * most recent snapshot must match fromguid, or there are no
697 * changes since the fromguid one
699 if (ds
->ds_prev
->ds_phys
->ds_guid
!= rbsa
->fromguid
) {
700 uint64_t birth
= ds
->ds_prev
->ds_phys
->ds_bp
.blk_birth
;
701 uint64_t obj
= ds
->ds_prev
->ds_phys
->ds_prev_snap_obj
;
704 err
= dsl_dataset_hold_obj(ds
->ds_dir
->dd_pool
,
708 if (snap
->ds_phys
->ds_creation_txg
< birth
) {
709 dsl_dataset_rele(snap
, FTAG
);
712 if (snap
->ds_phys
->ds_guid
== rbsa
->fromguid
) {
713 dsl_dataset_rele(snap
, FTAG
);
716 obj
= snap
->ds_phys
->ds_prev_snap_obj
;
717 dsl_dataset_rele(snap
, FTAG
);
723 /* if full, most recent snapshot must be $ORIGIN */
724 if (ds
->ds_phys
->ds_prev_snap_txg
>= TXG_INITIAL
)
728 /* temporary clone name must not exist */
729 err
= zap_lookup(ds
->ds_dir
->dd_pool
->dp_meta_objset
,
730 ds
->ds_dir
->dd_phys
->dd_child_dir_zapobj
,
731 rbsa
->clonelastname
, 8, 1, &val
);
742 recv_existing_sync(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
744 dsl_dataset_t
*ohds
= arg1
;
745 struct recvbeginsyncarg
*rbsa
= arg2
;
746 dsl_pool_t
*dp
= ohds
->ds_dir
->dd_pool
;
748 uint64_t flags
= DS_FLAG_INCONSISTENT
| rbsa
->dsflags
;
751 /* create and open the temporary clone */
752 dsobj
= dsl_dataset_create_sync(ohds
->ds_dir
, rbsa
->clonelastname
,
753 ohds
->ds_prev
, flags
, rbsa
->cr
, tx
);
754 VERIFY(0 == dsl_dataset_own_obj(dp
, dsobj
, B_TRUE
, dmu_recv_tag
, &cds
));
757 * If we actually created a non-clone, we need to create the
758 * objset in our new dataset.
760 if (BP_IS_HOLE(dsl_dataset_get_blkptr(cds
))) {
761 (void) dmu_objset_create_impl(dp
->dp_spa
,
762 cds
, dsl_dataset_get_blkptr(cds
), rbsa
->type
, tx
);
767 spa_history_log_internal(LOG_DS_REPLAY_INC_SYNC
,
768 dp
->dp_spa
, tx
, "dataset = %lld", dsobj
);
772 dmu_recv_verify_features(dsl_dataset_t
*ds
, struct drr_begin
*drrb
)
776 featureflags
= DMU_GET_FEATUREFLAGS(drrb
->drr_versioninfo
);
778 /* Verify pool version supports SA if SA_SPILL feature set */
779 return ((featureflags
& DMU_BACKUP_FEATURE_SA_SPILL
) &&
780 (spa_version(dsl_dataset_get_spa(ds
)) < SPA_VERSION_SA
));
784 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
785 * succeeds; otherwise we will leak the holds on the datasets.
788 dmu_recv_begin(char *tofs
, char *tosnap
, char *top_ds
, struct drr_begin
*drrb
,
789 boolean_t force
, objset_t
*origin
, dmu_recv_cookie_t
*drc
)
793 struct recvbeginsyncarg rbsa
= { 0 };
794 uint64_t versioninfo
;
798 if (drrb
->drr_magic
== DMU_BACKUP_MAGIC
)
800 else if (drrb
->drr_magic
== BSWAP_64(DMU_BACKUP_MAGIC
))
806 rbsa
.tosnap
= tosnap
;
807 rbsa
.origin
= origin
? origin
->os_dsl_dataset
: NULL
;
808 rbsa
.fromguid
= drrb
->drr_fromguid
;
809 rbsa
.type
= drrb
->drr_type
;
813 versioninfo
= drrb
->drr_versioninfo
;
814 flags
= drrb
->drr_flags
;
817 rbsa
.type
= BSWAP_32(rbsa
.type
);
818 rbsa
.fromguid
= BSWAP_64(rbsa
.fromguid
);
819 versioninfo
= BSWAP_64(versioninfo
);
820 flags
= BSWAP_32(flags
);
823 if (DMU_GET_STREAM_HDRTYPE(versioninfo
) == DMU_COMPOUNDSTREAM
||
824 rbsa
.type
>= DMU_OST_NUMTYPES
||
825 ((flags
& DRR_FLAG_CLONE
) && origin
== NULL
))
828 if (flags
& DRR_FLAG_CI_DATA
)
829 rbsa
.dsflags
= DS_FLAG_CI_DATASET
;
831 bzero(drc
, sizeof (dmu_recv_cookie_t
));
832 drc
->drc_drrb
= drrb
;
833 drc
->drc_tosnap
= tosnap
;
834 drc
->drc_top_ds
= top_ds
;
835 drc
->drc_force
= force
;
838 * Process the begin in syncing context.
841 /* open the dataset we are logically receiving into */
842 err
= dsl_dataset_hold(tofs
, dmu_recv_tag
, &ds
);
844 if (dmu_recv_verify_features(ds
, drrb
)) {
845 dsl_dataset_rele(ds
, dmu_recv_tag
);
848 /* target fs already exists; recv into temp clone */
850 /* Can't recv a clone into an existing fs */
851 if (flags
& DRR_FLAG_CLONE
) {
852 dsl_dataset_rele(ds
, dmu_recv_tag
);
856 /* must not have an incremental recv already in progress */
857 if (!mutex_tryenter(&ds
->ds_recvlock
)) {
858 dsl_dataset_rele(ds
, dmu_recv_tag
);
862 /* tmp clone name is: tofs/%tosnap" */
863 (void) snprintf(rbsa
.clonelastname
, sizeof (rbsa
.clonelastname
),
866 err
= dsl_sync_task_do(ds
->ds_dir
->dd_pool
,
867 recv_existing_check
, recv_existing_sync
, ds
, &rbsa
, 5);
869 mutex_exit(&ds
->ds_recvlock
);
870 dsl_dataset_rele(ds
, dmu_recv_tag
);
873 drc
->drc_logical_ds
= ds
;
874 drc
->drc_real_ds
= rbsa
.ds
;
875 } else if (err
== ENOENT
) {
876 /* target fs does not exist; must be a full backup or clone */
880 * If it's a non-clone incremental, we are missing the
881 * target fs, so fail the recv.
883 if (rbsa
.fromguid
&& !(flags
& DRR_FLAG_CLONE
))
886 /* Open the parent of tofs */
887 cp
= strrchr(tofs
, '/');
889 err
= dsl_dataset_hold(tofs
, FTAG
, &ds
);
894 if (dmu_recv_verify_features(ds
, drrb
)) {
895 dsl_dataset_rele(ds
, FTAG
);
899 err
= dsl_sync_task_do(ds
->ds_dir
->dd_pool
,
900 recv_new_check
, recv_new_sync
, ds
->ds_dir
, &rbsa
, 5);
901 dsl_dataset_rele(ds
, FTAG
);
904 drc
->drc_logical_ds
= drc
->drc_real_ds
= rbsa
.ds
;
905 drc
->drc_newfs
= B_TRUE
;
917 int bufsize
; /* amount of memory allocated for buf */
919 avl_tree_t
*guid_to_ds_map
;
922 typedef struct guid_map_entry
{
924 dsl_dataset_t
*gme_ds
;
929 guid_compare(const void *arg1
, const void *arg2
)
931 const guid_map_entry_t
*gmep1
= arg1
;
932 const guid_map_entry_t
*gmep2
= arg2
;
934 if (gmep1
->guid
< gmep2
->guid
)
936 else if (gmep1
->guid
> gmep2
->guid
)
942 free_guid_map_onexit(void *arg
)
944 avl_tree_t
*ca
= arg
;
946 guid_map_entry_t
*gmep
;
948 while ((gmep
= avl_destroy_nodes(ca
, &cookie
)) != NULL
) {
949 dsl_dataset_rele(gmep
->gme_ds
, ca
);
950 kmem_free(gmep
, sizeof (guid_map_entry_t
));
953 kmem_free(ca
, sizeof (avl_tree_t
));
957 restore_read(struct restorearg
*ra
, int len
)
962 /* some things will require 8-byte alignment, so everything must */
963 ASSERT3U(len
% 8, ==, 0);
968 ra
->err
= vn_rdwr(UIO_READ
, ra
->vp
,
969 (caddr_t
)ra
->buf
+ done
, len
- done
,
970 ra
->voff
, UIO_SYSSPACE
, FAPPEND
,
971 RLIM64_INFINITY
, CRED(), &resid
);
973 if (resid
== len
- done
)
975 ra
->voff
+= len
- done
- resid
;
981 ASSERT3U(done
, ==, len
);
984 fletcher_4_incremental_byteswap(rv
, len
, &ra
->cksum
);
986 fletcher_4_incremental_native(rv
, len
, &ra
->cksum
);
991 backup_byteswap(dmu_replay_record_t
*drr
)
993 #define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
994 #define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
995 drr
->drr_type
= BSWAP_32(drr
->drr_type
);
996 drr
->drr_payloadlen
= BSWAP_32(drr
->drr_payloadlen
);
997 switch (drr
->drr_type
) {
999 DO64(drr_begin
.drr_magic
);
1000 DO64(drr_begin
.drr_versioninfo
);
1001 DO64(drr_begin
.drr_creation_time
);
1002 DO32(drr_begin
.drr_type
);
1003 DO32(drr_begin
.drr_flags
);
1004 DO64(drr_begin
.drr_toguid
);
1005 DO64(drr_begin
.drr_fromguid
);
1008 DO64(drr_object
.drr_object
);
1009 /* DO64(drr_object.drr_allocation_txg); */
1010 DO32(drr_object
.drr_type
);
1011 DO32(drr_object
.drr_bonustype
);
1012 DO32(drr_object
.drr_blksz
);
1013 DO32(drr_object
.drr_bonuslen
);
1014 DO64(drr_object
.drr_toguid
);
1016 case DRR_FREEOBJECTS
:
1017 DO64(drr_freeobjects
.drr_firstobj
);
1018 DO64(drr_freeobjects
.drr_numobjs
);
1019 DO64(drr_freeobjects
.drr_toguid
);
1022 DO64(drr_write
.drr_object
);
1023 DO32(drr_write
.drr_type
);
1024 DO64(drr_write
.drr_offset
);
1025 DO64(drr_write
.drr_length
);
1026 DO64(drr_write
.drr_toguid
);
1027 DO64(drr_write
.drr_key
.ddk_cksum
.zc_word
[0]);
1028 DO64(drr_write
.drr_key
.ddk_cksum
.zc_word
[1]);
1029 DO64(drr_write
.drr_key
.ddk_cksum
.zc_word
[2]);
1030 DO64(drr_write
.drr_key
.ddk_cksum
.zc_word
[3]);
1031 DO64(drr_write
.drr_key
.ddk_prop
);
1033 case DRR_WRITE_BYREF
:
1034 DO64(drr_write_byref
.drr_object
);
1035 DO64(drr_write_byref
.drr_offset
);
1036 DO64(drr_write_byref
.drr_length
);
1037 DO64(drr_write_byref
.drr_toguid
);
1038 DO64(drr_write_byref
.drr_refguid
);
1039 DO64(drr_write_byref
.drr_refobject
);
1040 DO64(drr_write_byref
.drr_refoffset
);
1041 DO64(drr_write_byref
.drr_key
.ddk_cksum
.zc_word
[0]);
1042 DO64(drr_write_byref
.drr_key
.ddk_cksum
.zc_word
[1]);
1043 DO64(drr_write_byref
.drr_key
.ddk_cksum
.zc_word
[2]);
1044 DO64(drr_write_byref
.drr_key
.ddk_cksum
.zc_word
[3]);
1045 DO64(drr_write_byref
.drr_key
.ddk_prop
);
1048 DO64(drr_free
.drr_object
);
1049 DO64(drr_free
.drr_offset
);
1050 DO64(drr_free
.drr_length
);
1051 DO64(drr_free
.drr_toguid
);
1054 DO64(drr_spill
.drr_object
);
1055 DO64(drr_spill
.drr_length
);
1056 DO64(drr_spill
.drr_toguid
);
1059 DO64(drr_end
.drr_checksum
.zc_word
[0]);
1060 DO64(drr_end
.drr_checksum
.zc_word
[1]);
1061 DO64(drr_end
.drr_checksum
.zc_word
[2]);
1062 DO64(drr_end
.drr_checksum
.zc_word
[3]);
1063 DO64(drr_end
.drr_toguid
);
1073 restore_object(struct restorearg
*ra
, objset_t
*os
, struct drr_object
*drro
)
1079 if (drro
->drr_type
== DMU_OT_NONE
||
1080 !DMU_OT_IS_VALID(drro
->drr_type
) ||
1081 !DMU_OT_IS_VALID(drro
->drr_bonustype
) ||
1082 drro
->drr_checksumtype
>= ZIO_CHECKSUM_FUNCTIONS
||
1083 drro
->drr_compress
>= ZIO_COMPRESS_FUNCTIONS
||
1084 P2PHASE(drro
->drr_blksz
, SPA_MINBLOCKSIZE
) ||
1085 drro
->drr_blksz
< SPA_MINBLOCKSIZE
||
1086 drro
->drr_blksz
> SPA_MAXBLOCKSIZE
||
1087 drro
->drr_bonuslen
> DN_MAX_BONUSLEN
) {
1091 err
= dmu_object_info(os
, drro
->drr_object
, NULL
);
1093 if (err
!= 0 && err
!= ENOENT
)
1096 if (drro
->drr_bonuslen
) {
1097 data
= restore_read(ra
, P2ROUNDUP(drro
->drr_bonuslen
, 8));
1102 if (err
== ENOENT
) {
1103 /* currently free, want to be allocated */
1104 tx
= dmu_tx_create(os
);
1105 dmu_tx_hold_bonus(tx
, DMU_NEW_OBJECT
);
1106 err
= dmu_tx_assign(tx
, TXG_WAIT
);
1111 err
= dmu_object_claim(os
, drro
->drr_object
,
1112 drro
->drr_type
, drro
->drr_blksz
,
1113 drro
->drr_bonustype
, drro
->drr_bonuslen
, tx
);
1116 /* currently allocated, want to be allocated */
1117 err
= dmu_object_reclaim(os
, drro
->drr_object
,
1118 drro
->drr_type
, drro
->drr_blksz
,
1119 drro
->drr_bonustype
, drro
->drr_bonuslen
);
1125 tx
= dmu_tx_create(os
);
1126 dmu_tx_hold_bonus(tx
, drro
->drr_object
);
1127 err
= dmu_tx_assign(tx
, TXG_WAIT
);
1133 dmu_object_set_checksum(os
, drro
->drr_object
, drro
->drr_checksumtype
,
1135 dmu_object_set_compress(os
, drro
->drr_object
, drro
->drr_compress
, tx
);
1140 VERIFY(0 == dmu_bonus_hold(os
, drro
->drr_object
, FTAG
, &db
));
1141 dmu_buf_will_dirty(db
, tx
);
1143 ASSERT3U(db
->db_size
, >=, drro
->drr_bonuslen
);
1144 bcopy(data
, db
->db_data
, drro
->drr_bonuslen
);
1146 dmu_object_byteswap_t byteswap
=
1147 DMU_OT_BYTESWAP(drro
->drr_bonustype
);
1148 dmu_ot_byteswap
[byteswap
].ob_func(db
->db_data
,
1149 drro
->drr_bonuslen
);
1151 dmu_buf_rele(db
, FTAG
);
1159 restore_freeobjects(struct restorearg
*ra
, objset_t
*os
,
1160 struct drr_freeobjects
*drrfo
)
1164 if (drrfo
->drr_firstobj
+ drrfo
->drr_numobjs
< drrfo
->drr_firstobj
)
1167 for (obj
= drrfo
->drr_firstobj
;
1168 obj
< drrfo
->drr_firstobj
+ drrfo
->drr_numobjs
;
1169 (void) dmu_object_next(os
, &obj
, FALSE
, 0)) {
1172 if (dmu_object_info(os
, obj
, NULL
) != 0)
1175 err
= dmu_free_object(os
, obj
);
1183 restore_write(struct restorearg
*ra
, objset_t
*os
,
1184 struct drr_write
*drrw
)
1190 if (drrw
->drr_offset
+ drrw
->drr_length
< drrw
->drr_offset
||
1191 !DMU_OT_IS_VALID(drrw
->drr_type
))
1194 data
= restore_read(ra
, drrw
->drr_length
);
1198 if (dmu_object_info(os
, drrw
->drr_object
, NULL
) != 0)
1201 tx
= dmu_tx_create(os
);
1203 dmu_tx_hold_write(tx
, drrw
->drr_object
,
1204 drrw
->drr_offset
, drrw
->drr_length
);
1205 err
= dmu_tx_assign(tx
, TXG_WAIT
);
1211 dmu_object_byteswap_t byteswap
=
1212 DMU_OT_BYTESWAP(drrw
->drr_type
);
1213 dmu_ot_byteswap
[byteswap
].ob_func(data
, drrw
->drr_length
);
1215 dmu_write(os
, drrw
->drr_object
,
1216 drrw
->drr_offset
, drrw
->drr_length
, data
, tx
);
1222 * Handle a DRR_WRITE_BYREF record. This record is used in dedup'ed
1223 * streams to refer to a copy of the data that is already on the
1224 * system because it came in earlier in the stream. This function
1225 * finds the earlier copy of the data, and uses that copy instead of
1226 * data from the stream to fulfill this write.
1229 restore_write_byref(struct restorearg
*ra
, objset_t
*os
,
1230 struct drr_write_byref
*drrwbr
)
1234 guid_map_entry_t gmesrch
;
1235 guid_map_entry_t
*gmep
;
1237 objset_t
*ref_os
= NULL
;
1240 if (drrwbr
->drr_offset
+ drrwbr
->drr_length
< drrwbr
->drr_offset
)
1244 * If the GUID of the referenced dataset is different from the
1245 * GUID of the target dataset, find the referenced dataset.
1247 if (drrwbr
->drr_toguid
!= drrwbr
->drr_refguid
) {
1248 gmesrch
.guid
= drrwbr
->drr_refguid
;
1249 if ((gmep
= avl_find(ra
->guid_to_ds_map
, &gmesrch
,
1253 if (dmu_objset_from_ds(gmep
->gme_ds
, &ref_os
))
1259 err
= dmu_buf_hold(ref_os
, drrwbr
->drr_refobject
,
1260 drrwbr
->drr_refoffset
, FTAG
, &dbp
, DMU_READ_PREFETCH
);
1264 tx
= dmu_tx_create(os
);
1266 dmu_tx_hold_write(tx
, drrwbr
->drr_object
,
1267 drrwbr
->drr_offset
, drrwbr
->drr_length
);
1268 err
= dmu_tx_assign(tx
, TXG_WAIT
);
1273 dmu_write(os
, drrwbr
->drr_object
,
1274 drrwbr
->drr_offset
, drrwbr
->drr_length
, dbp
->db_data
, tx
);
1275 dmu_buf_rele(dbp
, FTAG
);
1281 restore_spill(struct restorearg
*ra
, objset_t
*os
, struct drr_spill
*drrs
)
1285 dmu_buf_t
*db
, *db_spill
;
1288 if (drrs
->drr_length
< SPA_MINBLOCKSIZE
||
1289 drrs
->drr_length
> SPA_MAXBLOCKSIZE
)
1292 data
= restore_read(ra
, drrs
->drr_length
);
1296 if (dmu_object_info(os
, drrs
->drr_object
, NULL
) != 0)
1299 VERIFY(0 == dmu_bonus_hold(os
, drrs
->drr_object
, FTAG
, &db
));
1300 if ((err
= dmu_spill_hold_by_bonus(db
, FTAG
, &db_spill
)) != 0) {
1301 dmu_buf_rele(db
, FTAG
);
1305 tx
= dmu_tx_create(os
);
1307 dmu_tx_hold_spill(tx
, db
->db_object
);
1309 err
= dmu_tx_assign(tx
, TXG_WAIT
);
1311 dmu_buf_rele(db
, FTAG
);
1312 dmu_buf_rele(db_spill
, FTAG
);
1316 dmu_buf_will_dirty(db_spill
, tx
);
1318 if (db_spill
->db_size
< drrs
->drr_length
)
1319 VERIFY(0 == dbuf_spill_set_blksz(db_spill
,
1320 drrs
->drr_length
, tx
));
1321 bcopy(data
, db_spill
->db_data
, drrs
->drr_length
);
1323 dmu_buf_rele(db
, FTAG
);
1324 dmu_buf_rele(db_spill
, FTAG
);
1332 restore_free(struct restorearg
*ra
, objset_t
*os
,
1333 struct drr_free
*drrf
)
1337 if (drrf
->drr_length
!= -1ULL &&
1338 drrf
->drr_offset
+ drrf
->drr_length
< drrf
->drr_offset
)
1341 if (dmu_object_info(os
, drrf
->drr_object
, NULL
) != 0)
1344 err
= dmu_free_long_range(os
, drrf
->drr_object
,
1345 drrf
->drr_offset
, drrf
->drr_length
);
1350 * NB: callers *must* call dmu_recv_end() if this succeeds.
1353 dmu_recv_stream(dmu_recv_cookie_t
*drc
, vnode_t
*vp
, offset_t
*voffp
,
1354 int cleanup_fd
, uint64_t *action_handlep
)
1356 struct restorearg ra
= { 0 };
1357 dmu_replay_record_t
*drr
;
1362 if (drc
->drc_drrb
->drr_magic
== BSWAP_64(DMU_BACKUP_MAGIC
))
1366 /* compute checksum of drr_begin record */
1367 dmu_replay_record_t
*drr
;
1368 drr
= kmem_zalloc(sizeof (dmu_replay_record_t
), KM_SLEEP
);
1370 drr
->drr_type
= DRR_BEGIN
;
1371 drr
->drr_u
.drr_begin
= *drc
->drc_drrb
;
1373 fletcher_4_incremental_byteswap(drr
,
1374 sizeof (dmu_replay_record_t
), &ra
.cksum
);
1376 fletcher_4_incremental_native(drr
,
1377 sizeof (dmu_replay_record_t
), &ra
.cksum
);
1379 kmem_free(drr
, sizeof (dmu_replay_record_t
));
1383 struct drr_begin
*drrb
= drc
->drc_drrb
;
1384 drrb
->drr_magic
= BSWAP_64(drrb
->drr_magic
);
1385 drrb
->drr_versioninfo
= BSWAP_64(drrb
->drr_versioninfo
);
1386 drrb
->drr_creation_time
= BSWAP_64(drrb
->drr_creation_time
);
1387 drrb
->drr_type
= BSWAP_32(drrb
->drr_type
);
1388 drrb
->drr_toguid
= BSWAP_64(drrb
->drr_toguid
);
1389 drrb
->drr_fromguid
= BSWAP_64(drrb
->drr_fromguid
);
1395 ra
.buf
= vmem_alloc(ra
.bufsize
, KM_SLEEP
);
1397 /* these were verified in dmu_recv_begin */
1398 ASSERT(DMU_GET_STREAM_HDRTYPE(drc
->drc_drrb
->drr_versioninfo
) ==
1400 ASSERT(drc
->drc_drrb
->drr_type
< DMU_OST_NUMTYPES
);
1403 * Open the objset we are modifying.
1405 VERIFY(dmu_objset_from_ds(drc
->drc_real_ds
, &os
) == 0);
1407 ASSERT(drc
->drc_real_ds
->ds_phys
->ds_flags
& DS_FLAG_INCONSISTENT
);
1409 featureflags
= DMU_GET_FEATUREFLAGS(drc
->drc_drrb
->drr_versioninfo
);
1411 /* if this stream is dedup'ed, set up the avl tree for guid mapping */
1412 if (featureflags
& DMU_BACKUP_FEATURE_DEDUP
) {
1415 if (cleanup_fd
== -1) {
1419 ra
.err
= zfs_onexit_fd_hold(cleanup_fd
, &minor
);
1425 if (*action_handlep
== 0) {
1427 kmem_alloc(sizeof (avl_tree_t
), KM_SLEEP
);
1428 avl_create(ra
.guid_to_ds_map
, guid_compare
,
1429 sizeof (guid_map_entry_t
),
1430 offsetof(guid_map_entry_t
, avlnode
));
1431 ra
.err
= zfs_onexit_add_cb(minor
,
1432 free_guid_map_onexit
, ra
.guid_to_ds_map
,
1437 ra
.err
= zfs_onexit_cb_data(minor
, *action_handlep
,
1438 (void **)&ra
.guid_to_ds_map
);
1443 drc
->drc_guid_to_ds_map
= ra
.guid_to_ds_map
;
1447 * Read records and process them.
1450 while (ra
.err
== 0 &&
1451 NULL
!= (drr
= restore_read(&ra
, sizeof (*drr
)))) {
1452 if (issig(JUSTLOOKING
) && issig(FORREAL
)) {
1458 backup_byteswap(drr
);
1460 switch (drr
->drr_type
) {
1464 * We need to make a copy of the record header,
1465 * because restore_{object,write} may need to
1466 * restore_read(), which will invalidate drr.
1468 struct drr_object drro
= drr
->drr_u
.drr_object
;
1469 ra
.err
= restore_object(&ra
, os
, &drro
);
1472 case DRR_FREEOBJECTS
:
1474 struct drr_freeobjects drrfo
=
1475 drr
->drr_u
.drr_freeobjects
;
1476 ra
.err
= restore_freeobjects(&ra
, os
, &drrfo
);
1481 struct drr_write drrw
= drr
->drr_u
.drr_write
;
1482 ra
.err
= restore_write(&ra
, os
, &drrw
);
1485 case DRR_WRITE_BYREF
:
1487 struct drr_write_byref drrwbr
=
1488 drr
->drr_u
.drr_write_byref
;
1489 ra
.err
= restore_write_byref(&ra
, os
, &drrwbr
);
1494 struct drr_free drrf
= drr
->drr_u
.drr_free
;
1495 ra
.err
= restore_free(&ra
, os
, &drrf
);
1500 struct drr_end drre
= drr
->drr_u
.drr_end
;
1502 * We compare against the *previous* checksum
1503 * value, because the stored checksum is of
1504 * everything before the DRR_END record.
1506 if (!ZIO_CHECKSUM_EQUAL(drre
.drr_checksum
, pcksum
))
1512 struct drr_spill drrs
= drr
->drr_u
.drr_spill
;
1513 ra
.err
= restore_spill(&ra
, os
, &drrs
);
1522 ASSERT(ra
.err
!= 0);
1525 if ((featureflags
& DMU_BACKUP_FEATURE_DEDUP
) && (cleanup_fd
!= -1))
1526 zfs_onexit_fd_rele(cleanup_fd
);
1530 * destroy what we created, so we don't leave it in the
1531 * inconsistent restoring state.
1533 txg_wait_synced(drc
->drc_real_ds
->ds_dir
->dd_pool
, 0);
1535 (void) dsl_dataset_destroy(drc
->drc_real_ds
, dmu_recv_tag
,
1537 if (drc
->drc_real_ds
!= drc
->drc_logical_ds
) {
1538 mutex_exit(&drc
->drc_logical_ds
->ds_recvlock
);
1539 dsl_dataset_rele(drc
->drc_logical_ds
, dmu_recv_tag
);
1543 vmem_free(ra
.buf
, ra
.bufsize
);
1548 struct recvendsyncarg
{
1550 uint64_t creation_time
;
1555 recv_end_check(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
1557 dsl_dataset_t
*ds
= arg1
;
1558 struct recvendsyncarg
*resa
= arg2
;
1560 return (dsl_dataset_snapshot_check(ds
, resa
->tosnap
, tx
));
1564 recv_end_sync(void *arg1
, void *arg2
, dmu_tx_t
*tx
)
1566 dsl_dataset_t
*ds
= arg1
;
1567 struct recvendsyncarg
*resa
= arg2
;
1569 dsl_dataset_snapshot_sync(ds
, resa
->tosnap
, tx
);
1571 /* set snapshot's creation time and guid */
1572 dmu_buf_will_dirty(ds
->ds_prev
->ds_dbuf
, tx
);
1573 ds
->ds_prev
->ds_phys
->ds_creation_time
= resa
->creation_time
;
1574 ds
->ds_prev
->ds_phys
->ds_guid
= resa
->toguid
;
1575 ds
->ds_prev
->ds_phys
->ds_flags
&= ~DS_FLAG_INCONSISTENT
;
1577 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
1578 ds
->ds_phys
->ds_flags
&= ~DS_FLAG_INCONSISTENT
;
1582 add_ds_to_guidmap(avl_tree_t
*guid_map
, dsl_dataset_t
*ds
)
1584 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
1585 uint64_t snapobj
= ds
->ds_phys
->ds_prev_snap_obj
;
1586 dsl_dataset_t
*snapds
;
1587 guid_map_entry_t
*gmep
;
1590 ASSERT(guid_map
!= NULL
);
1592 rw_enter(&dp
->dp_config_rwlock
, RW_READER
);
1593 err
= dsl_dataset_hold_obj(dp
, snapobj
, guid_map
, &snapds
);
1595 gmep
= kmem_alloc(sizeof (guid_map_entry_t
), KM_SLEEP
);
1596 gmep
->guid
= snapds
->ds_phys
->ds_guid
;
1597 gmep
->gme_ds
= snapds
;
1598 avl_add(guid_map
, gmep
);
1601 rw_exit(&dp
->dp_config_rwlock
);
1606 dmu_recv_existing_end(dmu_recv_cookie_t
*drc
)
1608 struct recvendsyncarg resa
;
1609 dsl_dataset_t
*ds
= drc
->drc_logical_ds
;
1612 if (dsl_dataset_tryown(ds
, FALSE
, dmu_recv_tag
)) {
1613 err
= dsl_dataset_clone_swap(drc
->drc_real_ds
, ds
,
1618 mutex_exit(&ds
->ds_recvlock
);
1619 dsl_dataset_rele(ds
, dmu_recv_tag
);
1620 (void) dsl_dataset_destroy(drc
->drc_real_ds
, dmu_recv_tag
,
1625 resa
.creation_time
= drc
->drc_drrb
->drr_creation_time
;
1626 resa
.toguid
= drc
->drc_drrb
->drr_toguid
;
1627 resa
.tosnap
= drc
->drc_tosnap
;
1629 err
= dsl_sync_task_do(ds
->ds_dir
->dd_pool
,
1630 recv_end_check
, recv_end_sync
, ds
, &resa
, 3);
1633 (void) dsl_dataset_clone_swap(drc
->drc_real_ds
, ds
, B_TRUE
);
1637 mutex_exit(&ds
->ds_recvlock
);
1638 if (err
== 0 && drc
->drc_guid_to_ds_map
!= NULL
)
1639 (void) add_ds_to_guidmap(drc
->drc_guid_to_ds_map
, ds
);
1640 dsl_dataset_disown(ds
, dmu_recv_tag
);
1641 myerr
= dsl_dataset_destroy(drc
->drc_real_ds
, dmu_recv_tag
, B_FALSE
);
1642 ASSERT3U(myerr
, ==, 0);
1647 dmu_recv_new_end(dmu_recv_cookie_t
*drc
)
1649 struct recvendsyncarg resa
;
1650 dsl_dataset_t
*ds
= drc
->drc_logical_ds
;
1654 * XXX hack; seems the ds is still dirty and dsl_pool_zil_clean()
1655 * expects it to have a ds_user_ptr (and zil), but clone_swap()
1658 txg_wait_synced(ds
->ds_dir
->dd_pool
, 0);
1660 resa
.creation_time
= drc
->drc_drrb
->drr_creation_time
;
1661 resa
.toguid
= drc
->drc_drrb
->drr_toguid
;
1662 resa
.tosnap
= drc
->drc_tosnap
;
1664 err
= dsl_sync_task_do(ds
->ds_dir
->dd_pool
,
1665 recv_end_check
, recv_end_sync
, ds
, &resa
, 3);
1667 /* clean up the fs we just recv'd into */
1668 (void) dsl_dataset_destroy(ds
, dmu_recv_tag
, B_FALSE
);
1670 if (drc
->drc_guid_to_ds_map
!= NULL
)
1671 (void) add_ds_to_guidmap(drc
->drc_guid_to_ds_map
, ds
);
1672 /* release the hold from dmu_recv_begin */
1673 dsl_dataset_disown(ds
, dmu_recv_tag
);
1679 dmu_recv_end(dmu_recv_cookie_t
*drc
)
1681 if (drc
->drc_logical_ds
!= drc
->drc_real_ds
)
1682 return (dmu_recv_existing_end(drc
));
1684 return (dmu_recv_new_end(drc
));