4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011 by Delphix. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
26 * Copyright (c) 2013 by Delphix. All rights reserved.
30 #include <sys/dmu_impl.h>
31 #include <sys/dmu_tx.h>
33 #include <sys/dnode.h>
34 #include <sys/zfs_context.h>
35 #include <sys/dmu_objset.h>
36 #include <sys/dmu_traverse.h>
37 #include <sys/dsl_dataset.h>
38 #include <sys/dsl_dir.h>
39 #include <sys/dsl_prop.h>
40 #include <sys/dsl_pool.h>
41 #include <sys/dsl_synctask.h>
42 #include <sys/spa_impl.h>
43 #include <sys/zfs_ioctl.h>
45 #include <sys/zio_checksum.h>
46 #include <sys/zfs_znode.h>
47 #include <zfs_fletcher.h>
50 #include <sys/zfs_onexit.h>
51 #include <sys/dmu_send.h>
52 #include <sys/dsl_destroy.h>
54 /* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */
55 int zfs_send_corrupt_data
= B_FALSE
;
57 static char *dmu_recv_tag
= "dmu_recv_tag";
58 static const char *recv_clone_name
= "%recv";
60 typedef struct dump_bytes_io
{
61 dmu_sendarg_t
*dbi_dsp
;
67 dump_bytes_strategy(void *arg
)
69 dump_bytes_io_t
*dbi
= (dump_bytes_io_t
*)arg
;
70 dmu_sendarg_t
*dsp
= dbi
->dbi_dsp
;
71 dsl_dataset_t
*ds
= dsp
->dsa_os
->os_dsl_dataset
;
72 ssize_t resid
; /* have to get resid to get detailed errno */
73 ASSERT0(dbi
->dbi_len
% 8);
75 fletcher_4_incremental_native(dbi
->dbi_buf
, dbi
->dbi_len
, &dsp
->dsa_zc
);
76 dsp
->dsa_err
= vn_rdwr(UIO_WRITE
, dsp
->dsa_vp
,
77 (caddr_t
)dbi
->dbi_buf
, dbi
->dbi_len
,
78 0, UIO_SYSSPACE
, FAPPEND
, RLIM64_INFINITY
, CRED(), &resid
);
80 mutex_enter(&ds
->ds_sendstream_lock
);
81 *dsp
->dsa_off
+= dbi
->dbi_len
;
82 mutex_exit(&ds
->ds_sendstream_lock
);
86 dump_bytes(dmu_sendarg_t
*dsp
, void *buf
, int len
)
95 * The vn_rdwr() call is performed in a taskq to ensure that there is
96 * always enough stack space to write safely to the target filesystem.
97 * The ZIO_TYPE_FREE threads are used because there can be a lot of
98 * them and they are used in vdev_file.c for a similar purpose.
100 spa_taskq_dispatch_sync(dmu_objset_spa(dsp
->dsa_os
), ZIO_TYPE_FREE
,
101 ZIO_TASKQ_ISSUE
, dump_bytes_strategy
, &dbi
, TQ_SLEEP
);
103 return (dsp
->dsa_err
);
107 dump_free(dmu_sendarg_t
*dsp
, uint64_t object
, uint64_t offset
,
110 struct drr_free
*drrf
= &(dsp
->dsa_drr
->drr_u
.drr_free
);
112 if (length
!= -1ULL && offset
+ length
< offset
)
116 * If there is a pending op, but it's not PENDING_FREE, push it out,
117 * since free block aggregation can only be done for blocks of the
118 * same type (i.e., DRR_FREE records can only be aggregated with
119 * other DRR_FREE records. DRR_FREEOBJECTS records can only be
120 * aggregated with other DRR_FREEOBJECTS records.
122 if (dsp
->dsa_pending_op
!= PENDING_NONE
&&
123 dsp
->dsa_pending_op
!= PENDING_FREE
) {
124 if (dump_bytes(dsp
, dsp
->dsa_drr
,
125 sizeof (dmu_replay_record_t
)) != 0)
126 return (SET_ERROR(EINTR
));
127 dsp
->dsa_pending_op
= PENDING_NONE
;
130 if (dsp
->dsa_pending_op
== PENDING_FREE
) {
132 * There should never be a PENDING_FREE if length is -1
133 * (because dump_dnode is the only place where this
134 * function is called with a -1, and only after flushing
135 * any pending record).
137 ASSERT(length
!= -1ULL);
139 * Check to see whether this free block can be aggregated
142 if (drrf
->drr_object
== object
&& drrf
->drr_offset
+
143 drrf
->drr_length
== offset
) {
144 drrf
->drr_length
+= length
;
147 /* not a continuation. Push out pending record */
148 if (dump_bytes(dsp
, dsp
->dsa_drr
,
149 sizeof (dmu_replay_record_t
)) != 0)
150 return (SET_ERROR(EINTR
));
151 dsp
->dsa_pending_op
= PENDING_NONE
;
154 /* create a FREE record and make it pending */
155 bzero(dsp
->dsa_drr
, sizeof (dmu_replay_record_t
));
156 dsp
->dsa_drr
->drr_type
= DRR_FREE
;
157 drrf
->drr_object
= object
;
158 drrf
->drr_offset
= offset
;
159 drrf
->drr_length
= length
;
160 drrf
->drr_toguid
= dsp
->dsa_toguid
;
161 if (length
== -1ULL) {
162 if (dump_bytes(dsp
, dsp
->dsa_drr
,
163 sizeof (dmu_replay_record_t
)) != 0)
164 return (SET_ERROR(EINTR
));
166 dsp
->dsa_pending_op
= PENDING_FREE
;
173 dump_data(dmu_sendarg_t
*dsp
, dmu_object_type_t type
,
174 uint64_t object
, uint64_t offset
, int blksz
, const blkptr_t
*bp
, void *data
)
176 struct drr_write
*drrw
= &(dsp
->dsa_drr
->drr_u
.drr_write
);
180 * If there is any kind of pending aggregation (currently either
181 * a grouping of free objects or free blocks), push it out to
182 * the stream, since aggregation can't be done across operations
183 * of different types.
185 if (dsp
->dsa_pending_op
!= PENDING_NONE
) {
186 if (dump_bytes(dsp
, dsp
->dsa_drr
,
187 sizeof (dmu_replay_record_t
)) != 0)
188 return (SET_ERROR(EINTR
));
189 dsp
->dsa_pending_op
= PENDING_NONE
;
191 /* write a DATA record */
192 bzero(dsp
->dsa_drr
, sizeof (dmu_replay_record_t
));
193 dsp
->dsa_drr
->drr_type
= DRR_WRITE
;
194 drrw
->drr_object
= object
;
195 drrw
->drr_type
= type
;
196 drrw
->drr_offset
= offset
;
197 drrw
->drr_length
= blksz
;
198 drrw
->drr_toguid
= dsp
->dsa_toguid
;
199 drrw
->drr_checksumtype
= BP_GET_CHECKSUM(bp
);
200 if (zio_checksum_table
[drrw
->drr_checksumtype
].ci_dedup
)
201 drrw
->drr_checksumflags
|= DRR_CHECKSUM_DEDUP
;
202 DDK_SET_LSIZE(&drrw
->drr_key
, BP_GET_LSIZE(bp
));
203 DDK_SET_PSIZE(&drrw
->drr_key
, BP_GET_PSIZE(bp
));
204 DDK_SET_COMPRESS(&drrw
->drr_key
, BP_GET_COMPRESS(bp
));
205 drrw
->drr_key
.ddk_cksum
= bp
->blk_cksum
;
207 if (dump_bytes(dsp
, dsp
->dsa_drr
, sizeof (dmu_replay_record_t
)) != 0)
208 return (SET_ERROR(EINTR
));
209 if (dump_bytes(dsp
, data
, blksz
) != 0)
210 return (SET_ERROR(EINTR
));
215 dump_spill(dmu_sendarg_t
*dsp
, uint64_t object
, int blksz
, void *data
)
217 struct drr_spill
*drrs
= &(dsp
->dsa_drr
->drr_u
.drr_spill
);
219 if (dsp
->dsa_pending_op
!= PENDING_NONE
) {
220 if (dump_bytes(dsp
, dsp
->dsa_drr
,
221 sizeof (dmu_replay_record_t
)) != 0)
222 return (SET_ERROR(EINTR
));
223 dsp
->dsa_pending_op
= PENDING_NONE
;
226 /* write a SPILL record */
227 bzero(dsp
->dsa_drr
, sizeof (dmu_replay_record_t
));
228 dsp
->dsa_drr
->drr_type
= DRR_SPILL
;
229 drrs
->drr_object
= object
;
230 drrs
->drr_length
= blksz
;
231 drrs
->drr_toguid
= dsp
->dsa_toguid
;
233 if (dump_bytes(dsp
, dsp
->dsa_drr
, sizeof (dmu_replay_record_t
)))
234 return (SET_ERROR(EINTR
));
235 if (dump_bytes(dsp
, data
, blksz
))
236 return (SET_ERROR(EINTR
));
241 dump_freeobjects(dmu_sendarg_t
*dsp
, uint64_t firstobj
, uint64_t numobjs
)
243 struct drr_freeobjects
*drrfo
= &(dsp
->dsa_drr
->drr_u
.drr_freeobjects
);
246 * If there is a pending op, but it's not PENDING_FREEOBJECTS,
247 * push it out, since free block aggregation can only be done for
248 * blocks of the same type (i.e., DRR_FREE records can only be
249 * aggregated with other DRR_FREE records. DRR_FREEOBJECTS records
250 * can only be aggregated with other DRR_FREEOBJECTS records.
252 if (dsp
->dsa_pending_op
!= PENDING_NONE
&&
253 dsp
->dsa_pending_op
!= PENDING_FREEOBJECTS
) {
254 if (dump_bytes(dsp
, dsp
->dsa_drr
,
255 sizeof (dmu_replay_record_t
)) != 0)
256 return (SET_ERROR(EINTR
));
257 dsp
->dsa_pending_op
= PENDING_NONE
;
259 if (dsp
->dsa_pending_op
== PENDING_FREEOBJECTS
) {
261 * See whether this free object array can be aggregated
264 if (drrfo
->drr_firstobj
+ drrfo
->drr_numobjs
== firstobj
) {
265 drrfo
->drr_numobjs
+= numobjs
;
268 /* can't be aggregated. Push out pending record */
269 if (dump_bytes(dsp
, dsp
->dsa_drr
,
270 sizeof (dmu_replay_record_t
)) != 0)
271 return (SET_ERROR(EINTR
));
272 dsp
->dsa_pending_op
= PENDING_NONE
;
276 /* write a FREEOBJECTS record */
277 bzero(dsp
->dsa_drr
, sizeof (dmu_replay_record_t
));
278 dsp
->dsa_drr
->drr_type
= DRR_FREEOBJECTS
;
279 drrfo
->drr_firstobj
= firstobj
;
280 drrfo
->drr_numobjs
= numobjs
;
281 drrfo
->drr_toguid
= dsp
->dsa_toguid
;
283 dsp
->dsa_pending_op
= PENDING_FREEOBJECTS
;
289 dump_dnode(dmu_sendarg_t
*dsp
, uint64_t object
, dnode_phys_t
*dnp
)
291 struct drr_object
*drro
= &(dsp
->dsa_drr
->drr_u
.drr_object
);
293 if (dnp
== NULL
|| dnp
->dn_type
== DMU_OT_NONE
)
294 return (dump_freeobjects(dsp
, object
, 1));
296 if (dsp
->dsa_pending_op
!= PENDING_NONE
) {
297 if (dump_bytes(dsp
, dsp
->dsa_drr
,
298 sizeof (dmu_replay_record_t
)) != 0)
299 return (SET_ERROR(EINTR
));
300 dsp
->dsa_pending_op
= PENDING_NONE
;
303 /* write an OBJECT record */
304 bzero(dsp
->dsa_drr
, sizeof (dmu_replay_record_t
));
305 dsp
->dsa_drr
->drr_type
= DRR_OBJECT
;
306 drro
->drr_object
= object
;
307 drro
->drr_type
= dnp
->dn_type
;
308 drro
->drr_bonustype
= dnp
->dn_bonustype
;
309 drro
->drr_blksz
= dnp
->dn_datablkszsec
<< SPA_MINBLOCKSHIFT
;
310 drro
->drr_bonuslen
= dnp
->dn_bonuslen
;
311 drro
->drr_checksumtype
= dnp
->dn_checksum
;
312 drro
->drr_compress
= dnp
->dn_compress
;
313 drro
->drr_toguid
= dsp
->dsa_toguid
;
315 if (dump_bytes(dsp
, dsp
->dsa_drr
, sizeof (dmu_replay_record_t
)) != 0)
316 return (SET_ERROR(EINTR
));
318 if (dump_bytes(dsp
, DN_BONUS(dnp
), P2ROUNDUP(dnp
->dn_bonuslen
, 8)) != 0)
319 return (SET_ERROR(EINTR
));
321 /* free anything past the end of the file */
322 if (dump_free(dsp
, object
, (dnp
->dn_maxblkid
+ 1) *
323 (dnp
->dn_datablkszsec
<< SPA_MINBLOCKSHIFT
), -1ULL))
324 return (SET_ERROR(EINTR
));
325 if (dsp
->dsa_err
!= 0)
326 return (SET_ERROR(EINTR
));
330 #define BP_SPAN(dnp, level) \
331 (((uint64_t)dnp->dn_datablkszsec) << (SPA_MINBLOCKSHIFT + \
332 (level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT)))
336 backup_cb(spa_t
*spa
, zilog_t
*zilog
, const blkptr_t
*bp
,
337 const zbookmark_t
*zb
, const dnode_phys_t
*dnp
, void *arg
)
339 dmu_sendarg_t
*dsp
= arg
;
340 dmu_object_type_t type
= bp
? BP_GET_TYPE(bp
) : DMU_OT_NONE
;
343 if (issig(JUSTLOOKING
) && issig(FORREAL
))
344 return (SET_ERROR(EINTR
));
346 if (zb
->zb_object
!= DMU_META_DNODE_OBJECT
&&
347 DMU_OBJECT_IS_SPECIAL(zb
->zb_object
)) {
349 } else if (bp
== NULL
&& zb
->zb_object
== DMU_META_DNODE_OBJECT
) {
350 uint64_t span
= BP_SPAN(dnp
, zb
->zb_level
);
351 uint64_t dnobj
= (zb
->zb_blkid
* span
) >> DNODE_SHIFT
;
352 err
= dump_freeobjects(dsp
, dnobj
, span
>> DNODE_SHIFT
);
353 } else if (bp
== NULL
) {
354 uint64_t span
= BP_SPAN(dnp
, zb
->zb_level
);
355 err
= dump_free(dsp
, zb
->zb_object
, zb
->zb_blkid
* span
, span
);
356 } else if (zb
->zb_level
> 0 || type
== DMU_OT_OBJSET
) {
358 } else if (type
== DMU_OT_DNODE
) {
361 int blksz
= BP_GET_LSIZE(bp
);
362 uint32_t aflags
= ARC_WAIT
;
365 if (arc_read(NULL
, spa
, bp
, arc_getbuf_func
, &abuf
,
366 ZIO_PRIORITY_ASYNC_READ
, ZIO_FLAG_CANFAIL
,
368 return (SET_ERROR(EIO
));
371 for (i
= 0; i
< blksz
>> DNODE_SHIFT
; i
++) {
372 uint64_t dnobj
= (zb
->zb_blkid
<<
373 (DNODE_BLOCK_SHIFT
- DNODE_SHIFT
)) + i
;
374 err
= dump_dnode(dsp
, dnobj
, blk
+i
);
378 (void) arc_buf_remove_ref(abuf
, &abuf
);
379 } else if (type
== DMU_OT_SA
) {
380 uint32_t aflags
= ARC_WAIT
;
382 int blksz
= BP_GET_LSIZE(bp
);
384 if (arc_read(NULL
, spa
, bp
, arc_getbuf_func
, &abuf
,
385 ZIO_PRIORITY_ASYNC_READ
, ZIO_FLAG_CANFAIL
,
387 return (SET_ERROR(EIO
));
389 err
= dump_spill(dsp
, zb
->zb_object
, blksz
, abuf
->b_data
);
390 (void) arc_buf_remove_ref(abuf
, &abuf
);
391 } else { /* it's a level-0 block of a regular object */
392 uint32_t aflags
= ARC_WAIT
;
394 int blksz
= BP_GET_LSIZE(bp
);
396 if (arc_read(NULL
, spa
, bp
, arc_getbuf_func
, &abuf
,
397 ZIO_PRIORITY_ASYNC_READ
, ZIO_FLAG_CANFAIL
,
399 if (zfs_send_corrupt_data
) {
401 /* Send a block filled with 0x"zfs badd bloc" */
402 abuf
= arc_buf_alloc(spa
, blksz
, &abuf
,
404 for (ptr
= abuf
->b_data
;
405 (char *)ptr
< (char *)abuf
->b_data
+ blksz
;
407 *ptr
= 0x2f5baddb10cULL
;
409 return (SET_ERROR(EIO
));
413 err
= dump_data(dsp
, type
, zb
->zb_object
, zb
->zb_blkid
* blksz
,
414 blksz
, bp
, abuf
->b_data
);
415 (void) arc_buf_remove_ref(abuf
, &abuf
);
418 ASSERT(err
== 0 || err
== EINTR
);
423 * Releases dp, ds, and fromds, using the specified tag.
426 dmu_send_impl(void *tag
, dsl_pool_t
*dp
, dsl_dataset_t
*ds
,
427 dsl_dataset_t
*fromds
, int outfd
, vnode_t
*vp
, offset_t
*off
)
430 dmu_replay_record_t
*drr
;
433 uint64_t fromtxg
= 0;
435 if (fromds
!= NULL
&& !dsl_dataset_is_before(ds
, fromds
)) {
436 dsl_dataset_rele(fromds
, tag
);
437 dsl_dataset_rele(ds
, tag
);
438 dsl_pool_rele(dp
, tag
);
439 return (SET_ERROR(EXDEV
));
442 err
= dmu_objset_from_ds(ds
, &os
);
445 dsl_dataset_rele(fromds
, tag
);
446 dsl_dataset_rele(ds
, tag
);
447 dsl_pool_rele(dp
, tag
);
451 drr
= kmem_zalloc(sizeof (dmu_replay_record_t
), KM_SLEEP
);
452 drr
->drr_type
= DRR_BEGIN
;
453 drr
->drr_u
.drr_begin
.drr_magic
= DMU_BACKUP_MAGIC
;
454 DMU_SET_STREAM_HDRTYPE(drr
->drr_u
.drr_begin
.drr_versioninfo
,
458 if (dmu_objset_type(os
) == DMU_OST_ZFS
) {
460 if (zfs_get_zplprop(os
, ZFS_PROP_VERSION
, &version
) != 0) {
461 kmem_free(drr
, sizeof (dmu_replay_record_t
));
463 dsl_dataset_rele(fromds
, tag
);
464 dsl_dataset_rele(ds
, tag
);
465 dsl_pool_rele(dp
, tag
);
466 return (SET_ERROR(EINVAL
));
468 if (version
>= ZPL_VERSION_SA
) {
469 DMU_SET_FEATUREFLAGS(
470 drr
->drr_u
.drr_begin
.drr_versioninfo
,
471 DMU_BACKUP_FEATURE_SA_SPILL
);
476 drr
->drr_u
.drr_begin
.drr_creation_time
=
477 ds
->ds_phys
->ds_creation_time
;
478 drr
->drr_u
.drr_begin
.drr_type
= dmu_objset_type(os
);
479 if (fromds
!= NULL
&& ds
->ds_dir
!= fromds
->ds_dir
)
480 drr
->drr_u
.drr_begin
.drr_flags
|= DRR_FLAG_CLONE
;
481 drr
->drr_u
.drr_begin
.drr_toguid
= ds
->ds_phys
->ds_guid
;
482 if (ds
->ds_phys
->ds_flags
& DS_FLAG_CI_DATASET
)
483 drr
->drr_u
.drr_begin
.drr_flags
|= DRR_FLAG_CI_DATA
;
486 drr
->drr_u
.drr_begin
.drr_fromguid
= fromds
->ds_phys
->ds_guid
;
487 dsl_dataset_name(ds
, drr
->drr_u
.drr_begin
.drr_toname
);
489 if (fromds
!= NULL
) {
490 fromtxg
= fromds
->ds_phys
->ds_creation_txg
;
491 dsl_dataset_rele(fromds
, tag
);
495 dsp
= kmem_zalloc(sizeof (dmu_sendarg_t
), KM_SLEEP
);
499 dsp
->dsa_outfd
= outfd
;
500 dsp
->dsa_proc
= curproc
;
503 dsp
->dsa_toguid
= ds
->ds_phys
->ds_guid
;
504 ZIO_SET_CHECKSUM(&dsp
->dsa_zc
, 0, 0, 0, 0);
505 dsp
->dsa_pending_op
= PENDING_NONE
;
507 mutex_enter(&ds
->ds_sendstream_lock
);
508 list_insert_head(&ds
->ds_sendstreams
, dsp
);
509 mutex_exit(&ds
->ds_sendstream_lock
);
511 if (dump_bytes(dsp
, drr
, sizeof (dmu_replay_record_t
)) != 0) {
516 dsl_dataset_long_hold(ds
, FTAG
);
517 dsl_pool_rele(dp
, tag
);
519 err
= traverse_dataset(ds
, fromtxg
, TRAVERSE_PRE
| TRAVERSE_PREFETCH
,
522 if (dsp
->dsa_pending_op
!= PENDING_NONE
)
523 if (dump_bytes(dsp
, drr
, sizeof (dmu_replay_record_t
)) != 0)
524 err
= SET_ERROR(EINTR
);
527 if (err
== EINTR
&& dsp
->dsa_err
!= 0)
532 bzero(drr
, sizeof (dmu_replay_record_t
));
533 drr
->drr_type
= DRR_END
;
534 drr
->drr_u
.drr_end
.drr_checksum
= dsp
->dsa_zc
;
535 drr
->drr_u
.drr_end
.drr_toguid
= dsp
->dsa_toguid
;
537 if (dump_bytes(dsp
, drr
, sizeof (dmu_replay_record_t
)) != 0) {
543 mutex_enter(&ds
->ds_sendstream_lock
);
544 list_remove(&ds
->ds_sendstreams
, dsp
);
545 mutex_exit(&ds
->ds_sendstream_lock
);
547 kmem_free(drr
, sizeof (dmu_replay_record_t
));
548 kmem_free(dsp
, sizeof (dmu_sendarg_t
));
550 dsl_dataset_long_rele(ds
, FTAG
);
551 dsl_dataset_rele(ds
, tag
);
557 dmu_send_obj(const char *pool
, uint64_t tosnap
, uint64_t fromsnap
,
558 int outfd
, vnode_t
*vp
, offset_t
*off
)
562 dsl_dataset_t
*fromds
= NULL
;
565 err
= dsl_pool_hold(pool
, FTAG
, &dp
);
569 err
= dsl_dataset_hold_obj(dp
, tosnap
, FTAG
, &ds
);
571 dsl_pool_rele(dp
, FTAG
);
576 err
= dsl_dataset_hold_obj(dp
, fromsnap
, FTAG
, &fromds
);
578 dsl_dataset_rele(ds
, FTAG
);
579 dsl_pool_rele(dp
, FTAG
);
584 return (dmu_send_impl(FTAG
, dp
, ds
, fromds
, outfd
, vp
, off
));
588 dmu_send(const char *tosnap
, const char *fromsnap
,
589 int outfd
, vnode_t
*vp
, offset_t
*off
)
593 dsl_dataset_t
*fromds
= NULL
;
596 if (strchr(tosnap
, '@') == NULL
)
597 return (SET_ERROR(EINVAL
));
598 if (fromsnap
!= NULL
&& strchr(fromsnap
, '@') == NULL
)
599 return (SET_ERROR(EINVAL
));
601 err
= dsl_pool_hold(tosnap
, FTAG
, &dp
);
605 err
= dsl_dataset_hold(dp
, tosnap
, FTAG
, &ds
);
607 dsl_pool_rele(dp
, FTAG
);
611 if (fromsnap
!= NULL
) {
612 err
= dsl_dataset_hold(dp
, fromsnap
, FTAG
, &fromds
);
614 dsl_dataset_rele(ds
, FTAG
);
615 dsl_pool_rele(dp
, FTAG
);
619 return (dmu_send_impl(FTAG
, dp
, ds
, fromds
, outfd
, vp
, off
));
623 dmu_send_estimate(dsl_dataset_t
*ds
, dsl_dataset_t
*fromds
, uint64_t *sizep
)
626 uint64_t size
, recordsize
;
627 ASSERTV(dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
);
629 ASSERT(dsl_pool_config_held(dp
));
631 /* tosnap must be a snapshot */
632 if (!dsl_dataset_is_snapshot(ds
))
633 return (SET_ERROR(EINVAL
));
636 * fromsnap must be an earlier snapshot from the same fs as tosnap,
637 * or the origin's fs.
639 if (fromds
!= NULL
&& !dsl_dataset_is_before(ds
, fromds
))
640 return (SET_ERROR(EXDEV
));
642 /* Get uncompressed size estimate of changed data. */
643 if (fromds
== NULL
) {
644 size
= ds
->ds_phys
->ds_uncompressed_bytes
;
647 err
= dsl_dataset_space_written(fromds
, ds
,
648 &used
, &comp
, &size
);
654 * Assume that space (both on-disk and in-stream) is dominated by
655 * data. We will adjust for indirect blocks and the copies property,
656 * but ignore per-object space used (eg, dnodes and DRR_OBJECT records).
660 * Subtract out approximate space used by indirect blocks.
661 * Assume most space is used by data blocks (non-indirect, non-dnode).
662 * Assume all blocks are recordsize. Assume ditto blocks and
663 * internal fragmentation counter out compression.
665 * Therefore, space used by indirect blocks is sizeof(blkptr_t) per
666 * block, which we observe in practice.
668 err
= dsl_prop_get_int_ds(ds
, "recordsize", &recordsize
);
671 size
-= size
/ recordsize
* sizeof (blkptr_t
);
673 /* Add in the space for the record associated with each block. */
674 size
+= size
/ recordsize
* sizeof (dmu_replay_record_t
);
681 typedef struct dmu_recv_begin_arg
{
682 const char *drba_origin
;
683 dmu_recv_cookie_t
*drba_cookie
;
685 } dmu_recv_begin_arg_t
;
688 recv_begin_check_existing_impl(dmu_recv_begin_arg_t
*drba
, dsl_dataset_t
*ds
,
693 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
695 /* must not have any changes since most recent snapshot */
696 if (!drba
->drba_cookie
->drc_force
&&
697 dsl_dataset_modified_since_lastsnap(ds
))
698 return (SET_ERROR(ETXTBSY
));
700 /* temporary clone name must not exist */
701 error
= zap_lookup(dp
->dp_meta_objset
,
702 ds
->ds_dir
->dd_phys
->dd_child_dir_zapobj
, recv_clone_name
,
705 return (error
== 0 ? EBUSY
: error
);
707 /* new snapshot name must not exist */
708 error
= zap_lookup(dp
->dp_meta_objset
,
709 ds
->ds_phys
->ds_snapnames_zapobj
, drba
->drba_cookie
->drc_tosnap
,
712 return (error
== 0 ? EEXIST
: error
);
715 /* if incremental, most recent snapshot must match fromguid */
716 if (ds
->ds_prev
== NULL
)
717 return (SET_ERROR(ENODEV
));
720 * most recent snapshot must match fromguid, or there are no
721 * changes since the fromguid one
723 if (ds
->ds_prev
->ds_phys
->ds_guid
!= fromguid
) {
724 uint64_t birth
= ds
->ds_prev
->ds_phys
->ds_bp
.blk_birth
;
725 uint64_t obj
= ds
->ds_prev
->ds_phys
->ds_prev_snap_obj
;
728 error
= dsl_dataset_hold_obj(dp
, obj
, FTAG
,
731 return (SET_ERROR(ENODEV
));
732 if (snap
->ds_phys
->ds_creation_txg
< birth
) {
733 dsl_dataset_rele(snap
, FTAG
);
734 return (SET_ERROR(ENODEV
));
736 if (snap
->ds_phys
->ds_guid
== fromguid
) {
737 dsl_dataset_rele(snap
, FTAG
);
740 obj
= snap
->ds_phys
->ds_prev_snap_obj
;
741 dsl_dataset_rele(snap
, FTAG
);
744 return (SET_ERROR(ENODEV
));
747 /* if full, most recent snapshot must be $ORIGIN */
748 if (ds
->ds_phys
->ds_prev_snap_txg
>= TXG_INITIAL
)
749 return (SET_ERROR(ENODEV
));
757 dmu_recv_begin_check(void *arg
, dmu_tx_t
*tx
)
759 dmu_recv_begin_arg_t
*drba
= arg
;
760 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
761 struct drr_begin
*drrb
= drba
->drba_cookie
->drc_drrb
;
762 uint64_t fromguid
= drrb
->drr_fromguid
;
763 int flags
= drrb
->drr_flags
;
766 const char *tofs
= drba
->drba_cookie
->drc_tofs
;
768 /* already checked */
769 ASSERT3U(drrb
->drr_magic
, ==, DMU_BACKUP_MAGIC
);
771 if (DMU_GET_STREAM_HDRTYPE(drrb
->drr_versioninfo
) ==
772 DMU_COMPOUNDSTREAM
||
773 drrb
->drr_type
>= DMU_OST_NUMTYPES
||
774 ((flags
& DRR_FLAG_CLONE
) && drba
->drba_origin
== NULL
))
775 return (SET_ERROR(EINVAL
));
777 /* Verify pool version supports SA if SA_SPILL feature set */
778 if ((DMU_GET_FEATUREFLAGS(drrb
->drr_versioninfo
) &
779 DMU_BACKUP_FEATURE_SA_SPILL
) &&
780 spa_version(dp
->dp_spa
) < SPA_VERSION_SA
) {
781 return (SET_ERROR(ENOTSUP
));
784 error
= dsl_dataset_hold(dp
, tofs
, FTAG
, &ds
);
786 /* target fs already exists; recv into temp clone */
788 /* Can't recv a clone into an existing fs */
789 if (flags
& DRR_FLAG_CLONE
) {
790 dsl_dataset_rele(ds
, FTAG
);
791 return (SET_ERROR(EINVAL
));
794 error
= recv_begin_check_existing_impl(drba
, ds
, fromguid
);
795 dsl_dataset_rele(ds
, FTAG
);
796 } else if (error
== ENOENT
) {
797 /* target fs does not exist; must be a full backup or clone */
798 char buf
[MAXNAMELEN
];
801 * If it's a non-clone incremental, we are missing the
802 * target fs, so fail the recv.
804 if (fromguid
!= 0 && !(flags
& DRR_FLAG_CLONE
))
805 return (SET_ERROR(ENOENT
));
807 /* Open the parent of tofs */
808 ASSERT3U(strlen(tofs
), <, MAXNAMELEN
);
809 (void) strlcpy(buf
, tofs
, strrchr(tofs
, '/') - tofs
+ 1);
810 error
= dsl_dataset_hold(dp
, buf
, FTAG
, &ds
);
814 if (drba
->drba_origin
!= NULL
) {
815 dsl_dataset_t
*origin
;
816 error
= dsl_dataset_hold(dp
, drba
->drba_origin
,
819 dsl_dataset_rele(ds
, FTAG
);
822 if (!dsl_dataset_is_snapshot(origin
)) {
823 dsl_dataset_rele(origin
, FTAG
);
824 dsl_dataset_rele(ds
, FTAG
);
825 return (SET_ERROR(EINVAL
));
827 if (origin
->ds_phys
->ds_guid
!= fromguid
) {
828 dsl_dataset_rele(origin
, FTAG
);
829 dsl_dataset_rele(ds
, FTAG
);
830 return (SET_ERROR(ENODEV
));
832 dsl_dataset_rele(origin
, FTAG
);
834 dsl_dataset_rele(ds
, FTAG
);
841 dmu_recv_begin_sync(void *arg
, dmu_tx_t
*tx
)
843 dmu_recv_begin_arg_t
*drba
= arg
;
844 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
845 struct drr_begin
*drrb
= drba
->drba_cookie
->drc_drrb
;
846 const char *tofs
= drba
->drba_cookie
->drc_tofs
;
847 dsl_dataset_t
*ds
, *newds
;
852 crflags
= (drrb
->drr_flags
& DRR_FLAG_CI_DATA
) ?
853 DS_FLAG_CI_DATASET
: 0;
855 error
= dsl_dataset_hold(dp
, tofs
, FTAG
, &ds
);
857 /* create temporary clone */
858 dsobj
= dsl_dataset_create_sync(ds
->ds_dir
, recv_clone_name
,
859 ds
->ds_prev
, crflags
, drba
->drba_cred
, tx
);
860 dsl_dataset_rele(ds
, FTAG
);
864 dsl_dataset_t
*origin
= NULL
;
866 VERIFY0(dsl_dir_hold(dp
, tofs
, FTAG
, &dd
, &tail
));
868 if (drba
->drba_origin
!= NULL
) {
869 VERIFY0(dsl_dataset_hold(dp
, drba
->drba_origin
,
873 /* Create new dataset. */
874 dsobj
= dsl_dataset_create_sync(dd
,
875 strrchr(tofs
, '/') + 1,
876 origin
, crflags
, drba
->drba_cred
, tx
);
878 dsl_dataset_rele(origin
, FTAG
);
879 dsl_dir_rele(dd
, FTAG
);
880 drba
->drba_cookie
->drc_newfs
= B_TRUE
;
882 VERIFY0(dsl_dataset_own_obj(dp
, dsobj
, dmu_recv_tag
, &newds
));
884 dmu_buf_will_dirty(newds
->ds_dbuf
, tx
);
885 newds
->ds_phys
->ds_flags
|= DS_FLAG_INCONSISTENT
;
888 * If we actually created a non-clone, we need to create the
889 * objset in our new dataset.
891 if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds
))) {
892 (void) dmu_objset_create_impl(dp
->dp_spa
,
893 newds
, dsl_dataset_get_blkptr(newds
), drrb
->drr_type
, tx
);
896 drba
->drba_cookie
->drc_ds
= newds
;
898 spa_history_log_internal_ds(newds
, "receive", tx
, "");
902 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
903 * succeeds; otherwise we will leak the holds on the datasets.
906 dmu_recv_begin(char *tofs
, char *tosnap
, struct drr_begin
*drrb
,
907 boolean_t force
, char *origin
, dmu_recv_cookie_t
*drc
)
909 dmu_recv_begin_arg_t drba
= { 0 };
910 dmu_replay_record_t
*drr
;
912 bzero(drc
, sizeof (dmu_recv_cookie_t
));
913 drc
->drc_drrb
= drrb
;
914 drc
->drc_tosnap
= tosnap
;
915 drc
->drc_tofs
= tofs
;
916 drc
->drc_force
= force
;
918 if (drrb
->drr_magic
== BSWAP_64(DMU_BACKUP_MAGIC
))
919 drc
->drc_byteswap
= B_TRUE
;
920 else if (drrb
->drr_magic
!= DMU_BACKUP_MAGIC
)
921 return (SET_ERROR(EINVAL
));
923 drr
= kmem_zalloc(sizeof (dmu_replay_record_t
), KM_SLEEP
);
924 drr
->drr_type
= DRR_BEGIN
;
925 drr
->drr_u
.drr_begin
= *drc
->drc_drrb
;
926 if (drc
->drc_byteswap
) {
927 fletcher_4_incremental_byteswap(drr
,
928 sizeof (dmu_replay_record_t
), &drc
->drc_cksum
);
930 fletcher_4_incremental_native(drr
,
931 sizeof (dmu_replay_record_t
), &drc
->drc_cksum
);
933 kmem_free(drr
, sizeof (dmu_replay_record_t
));
935 if (drc
->drc_byteswap
) {
936 drrb
->drr_magic
= BSWAP_64(drrb
->drr_magic
);
937 drrb
->drr_versioninfo
= BSWAP_64(drrb
->drr_versioninfo
);
938 drrb
->drr_creation_time
= BSWAP_64(drrb
->drr_creation_time
);
939 drrb
->drr_type
= BSWAP_32(drrb
->drr_type
);
940 drrb
->drr_toguid
= BSWAP_64(drrb
->drr_toguid
);
941 drrb
->drr_fromguid
= BSWAP_64(drrb
->drr_fromguid
);
944 drba
.drba_origin
= origin
;
945 drba
.drba_cookie
= drc
;
946 drba
.drba_cred
= CRED();
948 return (dsl_sync_task(tofs
, dmu_recv_begin_check
, dmu_recv_begin_sync
,
958 int bufsize
; /* amount of memory allocated for buf */
960 avl_tree_t
*guid_to_ds_map
;
963 typedef struct guid_map_entry
{
965 dsl_dataset_t
*gme_ds
;
970 guid_compare(const void *arg1
, const void *arg2
)
972 const guid_map_entry_t
*gmep1
= arg1
;
973 const guid_map_entry_t
*gmep2
= arg2
;
975 if (gmep1
->guid
< gmep2
->guid
)
977 else if (gmep1
->guid
> gmep2
->guid
)
983 free_guid_map_onexit(void *arg
)
985 avl_tree_t
*ca
= arg
;
987 guid_map_entry_t
*gmep
;
989 while ((gmep
= avl_destroy_nodes(ca
, &cookie
)) != NULL
) {
990 dsl_dataset_long_rele(gmep
->gme_ds
, gmep
);
991 kmem_free(gmep
, sizeof (guid_map_entry_t
));
994 kmem_free(ca
, sizeof (avl_tree_t
));
998 restore_read(struct restorearg
*ra
, int len
)
1003 /* some things will require 8-byte alignment, so everything must */
1006 while (done
< len
) {
1009 ra
->err
= vn_rdwr(UIO_READ
, ra
->vp
,
1010 (caddr_t
)ra
->buf
+ done
, len
- done
,
1011 ra
->voff
, UIO_SYSSPACE
, FAPPEND
,
1012 RLIM64_INFINITY
, CRED(), &resid
);
1014 if (resid
== len
- done
)
1015 ra
->err
= SET_ERROR(EINVAL
);
1016 ra
->voff
+= len
- done
- resid
;
1022 ASSERT3U(done
, ==, len
);
1025 fletcher_4_incremental_byteswap(rv
, len
, &ra
->cksum
);
1027 fletcher_4_incremental_native(rv
, len
, &ra
->cksum
);
1031 noinline
static void
1032 backup_byteswap(dmu_replay_record_t
*drr
)
1034 #define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
1035 #define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
1036 drr
->drr_type
= BSWAP_32(drr
->drr_type
);
1037 drr
->drr_payloadlen
= BSWAP_32(drr
->drr_payloadlen
);
1038 switch (drr
->drr_type
) {
1040 DO64(drr_begin
.drr_magic
);
1041 DO64(drr_begin
.drr_versioninfo
);
1042 DO64(drr_begin
.drr_creation_time
);
1043 DO32(drr_begin
.drr_type
);
1044 DO32(drr_begin
.drr_flags
);
1045 DO64(drr_begin
.drr_toguid
);
1046 DO64(drr_begin
.drr_fromguid
);
1049 DO64(drr_object
.drr_object
);
1050 /* DO64(drr_object.drr_allocation_txg); */
1051 DO32(drr_object
.drr_type
);
1052 DO32(drr_object
.drr_bonustype
);
1053 DO32(drr_object
.drr_blksz
);
1054 DO32(drr_object
.drr_bonuslen
);
1055 DO64(drr_object
.drr_toguid
);
1057 case DRR_FREEOBJECTS
:
1058 DO64(drr_freeobjects
.drr_firstobj
);
1059 DO64(drr_freeobjects
.drr_numobjs
);
1060 DO64(drr_freeobjects
.drr_toguid
);
1063 DO64(drr_write
.drr_object
);
1064 DO32(drr_write
.drr_type
);
1065 DO64(drr_write
.drr_offset
);
1066 DO64(drr_write
.drr_length
);
1067 DO64(drr_write
.drr_toguid
);
1068 DO64(drr_write
.drr_key
.ddk_cksum
.zc_word
[0]);
1069 DO64(drr_write
.drr_key
.ddk_cksum
.zc_word
[1]);
1070 DO64(drr_write
.drr_key
.ddk_cksum
.zc_word
[2]);
1071 DO64(drr_write
.drr_key
.ddk_cksum
.zc_word
[3]);
1072 DO64(drr_write
.drr_key
.ddk_prop
);
1074 case DRR_WRITE_BYREF
:
1075 DO64(drr_write_byref
.drr_object
);
1076 DO64(drr_write_byref
.drr_offset
);
1077 DO64(drr_write_byref
.drr_length
);
1078 DO64(drr_write_byref
.drr_toguid
);
1079 DO64(drr_write_byref
.drr_refguid
);
1080 DO64(drr_write_byref
.drr_refobject
);
1081 DO64(drr_write_byref
.drr_refoffset
);
1082 DO64(drr_write_byref
.drr_key
.ddk_cksum
.zc_word
[0]);
1083 DO64(drr_write_byref
.drr_key
.ddk_cksum
.zc_word
[1]);
1084 DO64(drr_write_byref
.drr_key
.ddk_cksum
.zc_word
[2]);
1085 DO64(drr_write_byref
.drr_key
.ddk_cksum
.zc_word
[3]);
1086 DO64(drr_write_byref
.drr_key
.ddk_prop
);
1089 DO64(drr_free
.drr_object
);
1090 DO64(drr_free
.drr_offset
);
1091 DO64(drr_free
.drr_length
);
1092 DO64(drr_free
.drr_toguid
);
1095 DO64(drr_spill
.drr_object
);
1096 DO64(drr_spill
.drr_length
);
1097 DO64(drr_spill
.drr_toguid
);
1100 DO64(drr_end
.drr_checksum
.zc_word
[0]);
1101 DO64(drr_end
.drr_checksum
.zc_word
[1]);
1102 DO64(drr_end
.drr_checksum
.zc_word
[2]);
1103 DO64(drr_end
.drr_checksum
.zc_word
[3]);
1104 DO64(drr_end
.drr_toguid
);
1114 restore_object(struct restorearg
*ra
, objset_t
*os
, struct drr_object
*drro
)
1120 if (drro
->drr_type
== DMU_OT_NONE
||
1121 !DMU_OT_IS_VALID(drro
->drr_type
) ||
1122 !DMU_OT_IS_VALID(drro
->drr_bonustype
) ||
1123 drro
->drr_checksumtype
>= ZIO_CHECKSUM_FUNCTIONS
||
1124 drro
->drr_compress
>= ZIO_COMPRESS_FUNCTIONS
||
1125 P2PHASE(drro
->drr_blksz
, SPA_MINBLOCKSIZE
) ||
1126 drro
->drr_blksz
< SPA_MINBLOCKSIZE
||
1127 drro
->drr_blksz
> SPA_MAXBLOCKSIZE
||
1128 drro
->drr_bonuslen
> DN_MAX_BONUSLEN
) {
1129 return (SET_ERROR(EINVAL
));
1132 err
= dmu_object_info(os
, drro
->drr_object
, NULL
);
1134 if (err
!= 0 && err
!= ENOENT
)
1135 return (SET_ERROR(EINVAL
));
1137 if (drro
->drr_bonuslen
) {
1138 data
= restore_read(ra
, P2ROUNDUP(drro
->drr_bonuslen
, 8));
1143 if (err
== ENOENT
) {
1144 /* currently free, want to be allocated */
1145 tx
= dmu_tx_create(os
);
1146 dmu_tx_hold_bonus(tx
, DMU_NEW_OBJECT
);
1147 err
= dmu_tx_assign(tx
, TXG_WAIT
);
1152 err
= dmu_object_claim(os
, drro
->drr_object
,
1153 drro
->drr_type
, drro
->drr_blksz
,
1154 drro
->drr_bonustype
, drro
->drr_bonuslen
, tx
);
1157 /* currently allocated, want to be allocated */
1158 err
= dmu_object_reclaim(os
, drro
->drr_object
,
1159 drro
->drr_type
, drro
->drr_blksz
,
1160 drro
->drr_bonustype
, drro
->drr_bonuslen
);
1163 return (SET_ERROR(EINVAL
));
1166 tx
= dmu_tx_create(os
);
1167 dmu_tx_hold_bonus(tx
, drro
->drr_object
);
1168 err
= dmu_tx_assign(tx
, TXG_WAIT
);
1174 dmu_object_set_checksum(os
, drro
->drr_object
, drro
->drr_checksumtype
,
1176 dmu_object_set_compress(os
, drro
->drr_object
, drro
->drr_compress
, tx
);
1181 VERIFY(0 == dmu_bonus_hold(os
, drro
->drr_object
, FTAG
, &db
));
1182 dmu_buf_will_dirty(db
, tx
);
1184 ASSERT3U(db
->db_size
, >=, drro
->drr_bonuslen
);
1185 bcopy(data
, db
->db_data
, drro
->drr_bonuslen
);
1187 dmu_object_byteswap_t byteswap
=
1188 DMU_OT_BYTESWAP(drro
->drr_bonustype
);
1189 dmu_ot_byteswap
[byteswap
].ob_func(db
->db_data
,
1190 drro
->drr_bonuslen
);
1192 dmu_buf_rele(db
, FTAG
);
1200 restore_freeobjects(struct restorearg
*ra
, objset_t
*os
,
1201 struct drr_freeobjects
*drrfo
)
1205 if (drrfo
->drr_firstobj
+ drrfo
->drr_numobjs
< drrfo
->drr_firstobj
)
1206 return (SET_ERROR(EINVAL
));
1208 for (obj
= drrfo
->drr_firstobj
;
1209 obj
< drrfo
->drr_firstobj
+ drrfo
->drr_numobjs
;
1210 (void) dmu_object_next(os
, &obj
, FALSE
, 0)) {
1213 if (dmu_object_info(os
, obj
, NULL
) != 0)
1216 err
= dmu_free_object(os
, obj
);
1224 restore_write(struct restorearg
*ra
, objset_t
*os
,
1225 struct drr_write
*drrw
)
1231 if (drrw
->drr_offset
+ drrw
->drr_length
< drrw
->drr_offset
||
1232 !DMU_OT_IS_VALID(drrw
->drr_type
))
1233 return (SET_ERROR(EINVAL
));
1235 data
= restore_read(ra
, drrw
->drr_length
);
1239 if (dmu_object_info(os
, drrw
->drr_object
, NULL
) != 0)
1240 return (SET_ERROR(EINVAL
));
1242 tx
= dmu_tx_create(os
);
1244 dmu_tx_hold_write(tx
, drrw
->drr_object
,
1245 drrw
->drr_offset
, drrw
->drr_length
);
1246 err
= dmu_tx_assign(tx
, TXG_WAIT
);
1252 dmu_object_byteswap_t byteswap
=
1253 DMU_OT_BYTESWAP(drrw
->drr_type
);
1254 dmu_ot_byteswap
[byteswap
].ob_func(data
, drrw
->drr_length
);
1256 dmu_write(os
, drrw
->drr_object
,
1257 drrw
->drr_offset
, drrw
->drr_length
, data
, tx
);
1263 * Handle a DRR_WRITE_BYREF record. This record is used in dedup'ed
1264 * streams to refer to a copy of the data that is already on the
1265 * system because it came in earlier in the stream. This function
1266 * finds the earlier copy of the data, and uses that copy instead of
1267 * data from the stream to fulfill this write.
1270 restore_write_byref(struct restorearg
*ra
, objset_t
*os
,
1271 struct drr_write_byref
*drrwbr
)
1275 guid_map_entry_t gmesrch
;
1276 guid_map_entry_t
*gmep
;
1278 objset_t
*ref_os
= NULL
;
1281 if (drrwbr
->drr_offset
+ drrwbr
->drr_length
< drrwbr
->drr_offset
)
1282 return (SET_ERROR(EINVAL
));
1285 * If the GUID of the referenced dataset is different from the
1286 * GUID of the target dataset, find the referenced dataset.
1288 if (drrwbr
->drr_toguid
!= drrwbr
->drr_refguid
) {
1289 gmesrch
.guid
= drrwbr
->drr_refguid
;
1290 if ((gmep
= avl_find(ra
->guid_to_ds_map
, &gmesrch
,
1292 return (SET_ERROR(EINVAL
));
1294 if (dmu_objset_from_ds(gmep
->gme_ds
, &ref_os
))
1295 return (SET_ERROR(EINVAL
));
1300 err
= dmu_buf_hold(ref_os
, drrwbr
->drr_refobject
,
1301 drrwbr
->drr_refoffset
, FTAG
, &dbp
, DMU_READ_PREFETCH
);
1305 tx
= dmu_tx_create(os
);
1307 dmu_tx_hold_write(tx
, drrwbr
->drr_object
,
1308 drrwbr
->drr_offset
, drrwbr
->drr_length
);
1309 err
= dmu_tx_assign(tx
, TXG_WAIT
);
1314 dmu_write(os
, drrwbr
->drr_object
,
1315 drrwbr
->drr_offset
, drrwbr
->drr_length
, dbp
->db_data
, tx
);
1316 dmu_buf_rele(dbp
, FTAG
);
1322 restore_spill(struct restorearg
*ra
, objset_t
*os
, struct drr_spill
*drrs
)
1326 dmu_buf_t
*db
, *db_spill
;
1329 if (drrs
->drr_length
< SPA_MINBLOCKSIZE
||
1330 drrs
->drr_length
> SPA_MAXBLOCKSIZE
)
1331 return (SET_ERROR(EINVAL
));
1333 data
= restore_read(ra
, drrs
->drr_length
);
1337 if (dmu_object_info(os
, drrs
->drr_object
, NULL
) != 0)
1338 return (SET_ERROR(EINVAL
));
1340 VERIFY(0 == dmu_bonus_hold(os
, drrs
->drr_object
, FTAG
, &db
));
1341 if ((err
= dmu_spill_hold_by_bonus(db
, FTAG
, &db_spill
)) != 0) {
1342 dmu_buf_rele(db
, FTAG
);
1346 tx
= dmu_tx_create(os
);
1348 dmu_tx_hold_spill(tx
, db
->db_object
);
1350 err
= dmu_tx_assign(tx
, TXG_WAIT
);
1352 dmu_buf_rele(db
, FTAG
);
1353 dmu_buf_rele(db_spill
, FTAG
);
1357 dmu_buf_will_dirty(db_spill
, tx
);
1359 if (db_spill
->db_size
< drrs
->drr_length
)
1360 VERIFY(0 == dbuf_spill_set_blksz(db_spill
,
1361 drrs
->drr_length
, tx
));
1362 bcopy(data
, db_spill
->db_data
, drrs
->drr_length
);
1364 dmu_buf_rele(db
, FTAG
);
1365 dmu_buf_rele(db_spill
, FTAG
);
1373 restore_free(struct restorearg
*ra
, objset_t
*os
,
1374 struct drr_free
*drrf
)
1378 if (drrf
->drr_length
!= -1ULL &&
1379 drrf
->drr_offset
+ drrf
->drr_length
< drrf
->drr_offset
)
1380 return (SET_ERROR(EINVAL
));
1382 if (dmu_object_info(os
, drrf
->drr_object
, NULL
) != 0)
1383 return (SET_ERROR(EINVAL
));
1385 err
= dmu_free_long_range(os
, drrf
->drr_object
,
1386 drrf
->drr_offset
, drrf
->drr_length
);
1390 /* used to destroy the drc_ds on error */
1392 dmu_recv_cleanup_ds(dmu_recv_cookie_t
*drc
)
1394 char name
[MAXNAMELEN
];
1395 dsl_dataset_name(drc
->drc_ds
, name
);
1396 dsl_dataset_disown(drc
->drc_ds
, dmu_recv_tag
);
1397 (void) dsl_destroy_head(name
);
1401 * NB: callers *must* call dmu_recv_end() if this succeeds.
1404 dmu_recv_stream(dmu_recv_cookie_t
*drc
, vnode_t
*vp
, offset_t
*voffp
,
1405 int cleanup_fd
, uint64_t *action_handlep
)
1407 struct restorearg ra
= { 0 };
1408 dmu_replay_record_t
*drr
;
1413 ra
.byteswap
= drc
->drc_byteswap
;
1414 ra
.cksum
= drc
->drc_cksum
;
1418 ra
.buf
= vmem_alloc(ra
.bufsize
, KM_SLEEP
);
1420 /* these were verified in dmu_recv_begin */
1421 ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc
->drc_drrb
->drr_versioninfo
), ==,
1423 ASSERT3U(drc
->drc_drrb
->drr_type
, <, DMU_OST_NUMTYPES
);
1426 * Open the objset we are modifying.
1428 VERIFY0(dmu_objset_from_ds(drc
->drc_ds
, &os
));
1430 ASSERT(drc
->drc_ds
->ds_phys
->ds_flags
& DS_FLAG_INCONSISTENT
);
1432 featureflags
= DMU_GET_FEATUREFLAGS(drc
->drc_drrb
->drr_versioninfo
);
1434 /* if this stream is dedup'ed, set up the avl tree for guid mapping */
1435 if (featureflags
& DMU_BACKUP_FEATURE_DEDUP
) {
1438 if (cleanup_fd
== -1) {
1439 ra
.err
= SET_ERROR(EBADF
);
1442 ra
.err
= zfs_onexit_fd_hold(cleanup_fd
, &minor
);
1448 if (*action_handlep
== 0) {
1450 kmem_alloc(sizeof (avl_tree_t
), KM_SLEEP
);
1451 avl_create(ra
.guid_to_ds_map
, guid_compare
,
1452 sizeof (guid_map_entry_t
),
1453 offsetof(guid_map_entry_t
, avlnode
));
1454 ra
.err
= zfs_onexit_add_cb(minor
,
1455 free_guid_map_onexit
, ra
.guid_to_ds_map
,
1460 ra
.err
= zfs_onexit_cb_data(minor
, *action_handlep
,
1461 (void **)&ra
.guid_to_ds_map
);
1466 drc
->drc_guid_to_ds_map
= ra
.guid_to_ds_map
;
1470 * Read records and process them.
1473 while (ra
.err
== 0 &&
1474 NULL
!= (drr
= restore_read(&ra
, sizeof (*drr
)))) {
1475 if (issig(JUSTLOOKING
) && issig(FORREAL
)) {
1476 ra
.err
= SET_ERROR(EINTR
);
1481 backup_byteswap(drr
);
1483 switch (drr
->drr_type
) {
1487 * We need to make a copy of the record header,
1488 * because restore_{object,write} may need to
1489 * restore_read(), which will invalidate drr.
1491 struct drr_object drro
= drr
->drr_u
.drr_object
;
1492 ra
.err
= restore_object(&ra
, os
, &drro
);
1495 case DRR_FREEOBJECTS
:
1497 struct drr_freeobjects drrfo
=
1498 drr
->drr_u
.drr_freeobjects
;
1499 ra
.err
= restore_freeobjects(&ra
, os
, &drrfo
);
1504 struct drr_write drrw
= drr
->drr_u
.drr_write
;
1505 ra
.err
= restore_write(&ra
, os
, &drrw
);
1508 case DRR_WRITE_BYREF
:
1510 struct drr_write_byref drrwbr
=
1511 drr
->drr_u
.drr_write_byref
;
1512 ra
.err
= restore_write_byref(&ra
, os
, &drrwbr
);
1517 struct drr_free drrf
= drr
->drr_u
.drr_free
;
1518 ra
.err
= restore_free(&ra
, os
, &drrf
);
1523 struct drr_end drre
= drr
->drr_u
.drr_end
;
1525 * We compare against the *previous* checksum
1526 * value, because the stored checksum is of
1527 * everything before the DRR_END record.
1529 if (!ZIO_CHECKSUM_EQUAL(drre
.drr_checksum
, pcksum
))
1530 ra
.err
= SET_ERROR(ECKSUM
);
1535 struct drr_spill drrs
= drr
->drr_u
.drr_spill
;
1536 ra
.err
= restore_spill(&ra
, os
, &drrs
);
1540 ra
.err
= SET_ERROR(EINVAL
);
1545 ASSERT(ra
.err
!= 0);
1548 if ((featureflags
& DMU_BACKUP_FEATURE_DEDUP
) && (cleanup_fd
!= -1))
1549 zfs_onexit_fd_rele(cleanup_fd
);
1553 * destroy what we created, so we don't leave it in the
1554 * inconsistent restoring state.
1556 dmu_recv_cleanup_ds(drc
);
1559 vmem_free(ra
.buf
, ra
.bufsize
);
1565 dmu_recv_end_check(void *arg
, dmu_tx_t
*tx
)
1567 dmu_recv_cookie_t
*drc
= arg
;
1568 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1571 ASSERT3P(drc
->drc_ds
->ds_owner
, ==, dmu_recv_tag
);
1573 if (!drc
->drc_newfs
) {
1574 dsl_dataset_t
*origin_head
;
1576 error
= dsl_dataset_hold(dp
, drc
->drc_tofs
, FTAG
, &origin_head
);
1579 error
= dsl_dataset_clone_swap_check_impl(drc
->drc_ds
,
1580 origin_head
, drc
->drc_force
);
1582 dsl_dataset_rele(origin_head
, FTAG
);
1585 error
= dsl_dataset_snapshot_check_impl(origin_head
,
1586 drc
->drc_tosnap
, tx
);
1587 dsl_dataset_rele(origin_head
, FTAG
);
1591 error
= dsl_destroy_head_check_impl(drc
->drc_ds
, 1);
1593 error
= dsl_dataset_snapshot_check_impl(drc
->drc_ds
,
1594 drc
->drc_tosnap
, tx
);
1600 dmu_recv_end_sync(void *arg
, dmu_tx_t
*tx
)
1602 dmu_recv_cookie_t
*drc
= arg
;
1603 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1605 spa_history_log_internal_ds(drc
->drc_ds
, "finish receiving",
1606 tx
, "snap=%s", drc
->drc_tosnap
);
1608 if (!drc
->drc_newfs
) {
1609 dsl_dataset_t
*origin_head
;
1611 VERIFY0(dsl_dataset_hold(dp
, drc
->drc_tofs
, FTAG
,
1613 dsl_dataset_clone_swap_sync_impl(drc
->drc_ds
,
1615 dsl_dataset_snapshot_sync_impl(origin_head
,
1616 drc
->drc_tosnap
, tx
);
1618 /* set snapshot's creation time and guid */
1619 dmu_buf_will_dirty(origin_head
->ds_prev
->ds_dbuf
, tx
);
1620 origin_head
->ds_prev
->ds_phys
->ds_creation_time
=
1621 drc
->drc_drrb
->drr_creation_time
;
1622 origin_head
->ds_prev
->ds_phys
->ds_guid
=
1623 drc
->drc_drrb
->drr_toguid
;
1624 origin_head
->ds_prev
->ds_phys
->ds_flags
&=
1625 ~DS_FLAG_INCONSISTENT
;
1627 dmu_buf_will_dirty(origin_head
->ds_dbuf
, tx
);
1628 origin_head
->ds_phys
->ds_flags
&= ~DS_FLAG_INCONSISTENT
;
1630 dsl_dataset_rele(origin_head
, FTAG
);
1631 dsl_destroy_head_sync_impl(drc
->drc_ds
, tx
);
1633 dsl_dataset_t
*ds
= drc
->drc_ds
;
1635 dsl_dataset_snapshot_sync_impl(ds
, drc
->drc_tosnap
, tx
);
1637 /* set snapshot's creation time and guid */
1638 dmu_buf_will_dirty(ds
->ds_prev
->ds_dbuf
, tx
);
1639 ds
->ds_prev
->ds_phys
->ds_creation_time
=
1640 drc
->drc_drrb
->drr_creation_time
;
1641 ds
->ds_prev
->ds_phys
->ds_guid
= drc
->drc_drrb
->drr_toguid
;
1642 ds
->ds_prev
->ds_phys
->ds_flags
&= ~DS_FLAG_INCONSISTENT
;
1644 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
1645 ds
->ds_phys
->ds_flags
&= ~DS_FLAG_INCONSISTENT
;
1647 drc
->drc_newsnapobj
= drc
->drc_ds
->ds_phys
->ds_prev_snap_obj
;
1649 * Release the hold from dmu_recv_begin. This must be done before
1650 * we return to open context, so that when we free the dataset's dnode,
1651 * we can evict its bonus buffer.
1653 dsl_dataset_disown(drc
->drc_ds
, dmu_recv_tag
);
1658 add_ds_to_guidmap(const char *name
, avl_tree_t
*guid_map
, uint64_t snapobj
)
1661 dsl_dataset_t
*snapds
;
1662 guid_map_entry_t
*gmep
;
1665 ASSERT(guid_map
!= NULL
);
1667 err
= dsl_pool_hold(name
, FTAG
, &dp
);
1670 err
= dsl_dataset_hold_obj(dp
, snapobj
, FTAG
, &snapds
);
1672 gmep
= kmem_alloc(sizeof (guid_map_entry_t
), KM_SLEEP
);
1673 gmep
->guid
= snapds
->ds_phys
->ds_guid
;
1674 gmep
->gme_ds
= snapds
;
1675 avl_add(guid_map
, gmep
);
1676 dsl_dataset_long_hold(snapds
, gmep
);
1677 dsl_dataset_rele(snapds
, FTAG
);
1680 dsl_pool_rele(dp
, FTAG
);
1684 static int dmu_recv_end_modified_blocks
= 3;
1687 dmu_recv_existing_end(dmu_recv_cookie_t
*drc
)
1695 * We will be destroying the ds; make sure its origin is unmounted if
1698 name
= kmem_alloc(MAXNAMELEN
, KM_SLEEP
);
1699 dsl_dataset_name(drc
->drc_ds
, name
);
1700 zfs_destroy_unmount_origin(name
);
1701 kmem_free(name
, MAXNAMELEN
);
1704 error
= dsl_sync_task(drc
->drc_tofs
,
1705 dmu_recv_end_check
, dmu_recv_end_sync
, drc
,
1706 dmu_recv_end_modified_blocks
);
1709 dmu_recv_cleanup_ds(drc
);
1714 dmu_recv_new_end(dmu_recv_cookie_t
*drc
)
1718 error
= dsl_sync_task(drc
->drc_tofs
,
1719 dmu_recv_end_check
, dmu_recv_end_sync
, drc
,
1720 dmu_recv_end_modified_blocks
);
1723 dmu_recv_cleanup_ds(drc
);
1724 } else if (drc
->drc_guid_to_ds_map
!= NULL
) {
1725 (void) add_ds_to_guidmap(drc
->drc_tofs
,
1726 drc
->drc_guid_to_ds_map
,
1727 drc
->drc_newsnapobj
);
1733 dmu_recv_end(dmu_recv_cookie_t
*drc
)
1736 return (dmu_recv_new_end(drc
));
1738 return (dmu_recv_existing_end(drc
));