4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
25 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
26 * Copyright 2014 HybridCluster. All rights reserved.
27 * Copyright 2016 RackTop Systems.
28 * Copyright (c) 2016 Actifio, Inc. All rights reserved.
32 #include <sys/dmu_impl.h>
33 #include <sys/dmu_tx.h>
35 #include <sys/dnode.h>
36 #include <sys/zfs_context.h>
37 #include <sys/dmu_objset.h>
38 #include <sys/dmu_traverse.h>
39 #include <sys/dsl_dataset.h>
40 #include <sys/dsl_dir.h>
41 #include <sys/dsl_prop.h>
42 #include <sys/dsl_pool.h>
43 #include <sys/dsl_synctask.h>
44 #include <sys/spa_impl.h>
45 #include <sys/zfs_ioctl.h>
47 #include <sys/zio_checksum.h>
48 #include <sys/zfs_znode.h>
49 #include <zfs_fletcher.h>
52 #include <sys/zfs_onexit.h>
53 #include <sys/dmu_send.h>
54 #include <sys/dsl_destroy.h>
55 #include <sys/blkptr.h>
56 #include <sys/dsl_bookmark.h>
57 #include <sys/zfeature.h>
58 #include <sys/bqueue.h>
60 #include <sys/policy.h>
62 /* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */
63 int zfs_send_corrupt_data
= B_FALSE
;
64 int zfs_send_queue_length
= SPA_MAXBLOCKSIZE
;
65 int zfs_recv_queue_length
= SPA_MAXBLOCKSIZE
;
66 /* Set this tunable to FALSE to disable setting of DRR_FLAG_FREERECORDS */
67 int zfs_send_set_freerecords_bit
= B_TRUE
;
69 static char *dmu_recv_tag
= "dmu_recv_tag";
70 const char *recv_clone_name
= "%recv";
72 #define BP_SPAN(datablkszsec, indblkshift, level) \
73 (((uint64_t)datablkszsec) << (SPA_MINBLOCKSHIFT + \
74 (level) * (indblkshift - SPA_BLKPTRSHIFT)))
76 static void byteswap_record(dmu_replay_record_t
*drr
);
78 struct send_thread_arg
{
80 dsl_dataset_t
*ds
; /* Dataset to traverse */
81 uint64_t fromtxg
; /* Traverse from this txg */
82 int flags
; /* flags to pass to traverse_dataset */
85 zbookmark_phys_t resume
;
88 struct send_block_record
{
89 boolean_t eos_marker
; /* Marks the end of the stream */
93 uint16_t datablkszsec
;
97 typedef struct dump_bytes_io
{
98 dmu_sendarg_t
*dbi_dsp
;
104 dump_bytes_cb(void *arg
)
106 dump_bytes_io_t
*dbi
= (dump_bytes_io_t
*)arg
;
107 dmu_sendarg_t
*dsp
= dbi
->dbi_dsp
;
108 dsl_dataset_t
*ds
= dmu_objset_ds(dsp
->dsa_os
);
109 ssize_t resid
; /* have to get resid to get detailed errno */
112 * The code does not rely on this (len being a multiple of 8). We keep
113 * this assertion because of the corresponding assertion in
114 * receive_read(). Keeping this assertion ensures that we do not
115 * inadvertently break backwards compatibility (causing the assertion
116 * in receive_read() to trigger on old software).
118 * Removing the assertions could be rolled into a new feature that uses
119 * data that isn't 8-byte aligned; if the assertions were removed, a
120 * feature flag would have to be added.
123 ASSERT0(dbi
->dbi_len
% 8);
125 dsp
->dsa_err
= vn_rdwr(UIO_WRITE
, dsp
->dsa_vp
,
126 (caddr_t
)dbi
->dbi_buf
, dbi
->dbi_len
,
127 0, UIO_SYSSPACE
, FAPPEND
, RLIM64_INFINITY
, CRED(), &resid
);
129 mutex_enter(&ds
->ds_sendstream_lock
);
130 *dsp
->dsa_off
+= dbi
->dbi_len
;
131 mutex_exit(&ds
->ds_sendstream_lock
);
135 dump_bytes(dmu_sendarg_t
*dsp
, void *buf
, int len
)
143 #if defined(HAVE_LARGE_STACKS)
147 * The vn_rdwr() call is performed in a taskq to ensure that there is
148 * always enough stack space to write safely to the target filesystem.
149 * The ZIO_TYPE_FREE threads are used because there can be a lot of
150 * them and they are used in vdev_file.c for a similar purpose.
152 spa_taskq_dispatch_sync(dmu_objset_spa(dsp
->dsa_os
), ZIO_TYPE_FREE
,
153 ZIO_TASKQ_ISSUE
, dump_bytes_cb
, &dbi
, TQ_SLEEP
);
154 #endif /* HAVE_LARGE_STACKS */
156 return (dsp
->dsa_err
);
160 * For all record types except BEGIN, fill in the checksum (overlaid in
161 * drr_u.drr_checksum.drr_checksum). The checksum verifies everything
162 * up to the start of the checksum itself.
165 dump_record(dmu_sendarg_t
*dsp
, void *payload
, int payload_len
)
167 ASSERT3U(offsetof(dmu_replay_record_t
, drr_u
.drr_checksum
.drr_checksum
),
168 ==, sizeof (dmu_replay_record_t
) - sizeof (zio_cksum_t
));
169 (void) fletcher_4_incremental_native(dsp
->dsa_drr
,
170 offsetof(dmu_replay_record_t
, drr_u
.drr_checksum
.drr_checksum
),
172 if (dsp
->dsa_drr
->drr_type
== DRR_BEGIN
) {
173 dsp
->dsa_sent_begin
= B_TRUE
;
175 ASSERT(ZIO_CHECKSUM_IS_ZERO(&dsp
->dsa_drr
->drr_u
.
176 drr_checksum
.drr_checksum
));
177 dsp
->dsa_drr
->drr_u
.drr_checksum
.drr_checksum
= dsp
->dsa_zc
;
179 if (dsp
->dsa_drr
->drr_type
== DRR_END
) {
180 dsp
->dsa_sent_end
= B_TRUE
;
182 (void) fletcher_4_incremental_native(&dsp
->dsa_drr
->
183 drr_u
.drr_checksum
.drr_checksum
,
184 sizeof (zio_cksum_t
), &dsp
->dsa_zc
);
185 if (dump_bytes(dsp
, dsp
->dsa_drr
, sizeof (dmu_replay_record_t
)) != 0)
186 return (SET_ERROR(EINTR
));
187 if (payload_len
!= 0) {
188 (void) fletcher_4_incremental_native(payload
, payload_len
,
190 if (dump_bytes(dsp
, payload
, payload_len
) != 0)
191 return (SET_ERROR(EINTR
));
197 * Fill in the drr_free struct, or perform aggregation if the previous record is
198 * also a free record, and the two are adjacent.
200 * Note that we send free records even for a full send, because we want to be
201 * able to receive a full send as a clone, which requires a list of all the free
202 * and freeobject records that were generated on the source.
205 dump_free(dmu_sendarg_t
*dsp
, uint64_t object
, uint64_t offset
,
208 struct drr_free
*drrf
= &(dsp
->dsa_drr
->drr_u
.drr_free
);
211 * When we receive a free record, dbuf_free_range() assumes
212 * that the receiving system doesn't have any dbufs in the range
213 * being freed. This is always true because there is a one-record
214 * constraint: we only send one WRITE record for any given
215 * object,offset. We know that the one-record constraint is
216 * true because we always send data in increasing order by
219 * If the increasing-order constraint ever changes, we should find
220 * another way to assert that the one-record constraint is still
223 ASSERT(object
> dsp
->dsa_last_data_object
||
224 (object
== dsp
->dsa_last_data_object
&&
225 offset
> dsp
->dsa_last_data_offset
));
227 if (length
!= -1ULL && offset
+ length
< offset
)
231 * If there is a pending op, but it's not PENDING_FREE, push it out,
232 * since free block aggregation can only be done for blocks of the
233 * same type (i.e., DRR_FREE records can only be aggregated with
234 * other DRR_FREE records. DRR_FREEOBJECTS records can only be
235 * aggregated with other DRR_FREEOBJECTS records.
237 if (dsp
->dsa_pending_op
!= PENDING_NONE
&&
238 dsp
->dsa_pending_op
!= PENDING_FREE
) {
239 if (dump_record(dsp
, NULL
, 0) != 0)
240 return (SET_ERROR(EINTR
));
241 dsp
->dsa_pending_op
= PENDING_NONE
;
244 if (dsp
->dsa_pending_op
== PENDING_FREE
) {
246 * There should never be a PENDING_FREE if length is -1
247 * (because dump_dnode is the only place where this
248 * function is called with a -1, and only after flushing
249 * any pending record).
251 ASSERT(length
!= -1ULL);
253 * Check to see whether this free block can be aggregated
256 if (drrf
->drr_object
== object
&& drrf
->drr_offset
+
257 drrf
->drr_length
== offset
) {
258 drrf
->drr_length
+= length
;
261 /* not a continuation. Push out pending record */
262 if (dump_record(dsp
, NULL
, 0) != 0)
263 return (SET_ERROR(EINTR
));
264 dsp
->dsa_pending_op
= PENDING_NONE
;
267 /* create a FREE record and make it pending */
268 bzero(dsp
->dsa_drr
, sizeof (dmu_replay_record_t
));
269 dsp
->dsa_drr
->drr_type
= DRR_FREE
;
270 drrf
->drr_object
= object
;
271 drrf
->drr_offset
= offset
;
272 drrf
->drr_length
= length
;
273 drrf
->drr_toguid
= dsp
->dsa_toguid
;
274 if (length
== -1ULL) {
275 if (dump_record(dsp
, NULL
, 0) != 0)
276 return (SET_ERROR(EINTR
));
278 dsp
->dsa_pending_op
= PENDING_FREE
;
285 dump_write(dmu_sendarg_t
*dsp
, dmu_object_type_t type
,
286 uint64_t object
, uint64_t offset
, int lsize
, int psize
, const blkptr_t
*bp
,
289 uint64_t payload_size
;
290 struct drr_write
*drrw
= &(dsp
->dsa_drr
->drr_u
.drr_write
);
293 * We send data in increasing object, offset order.
294 * See comment in dump_free() for details.
296 ASSERT(object
> dsp
->dsa_last_data_object
||
297 (object
== dsp
->dsa_last_data_object
&&
298 offset
> dsp
->dsa_last_data_offset
));
299 dsp
->dsa_last_data_object
= object
;
300 dsp
->dsa_last_data_offset
= offset
+ lsize
- 1;
303 * If there is any kind of pending aggregation (currently either
304 * a grouping of free objects or free blocks), push it out to
305 * the stream, since aggregation can't be done across operations
306 * of different types.
308 if (dsp
->dsa_pending_op
!= PENDING_NONE
) {
309 if (dump_record(dsp
, NULL
, 0) != 0)
310 return (SET_ERROR(EINTR
));
311 dsp
->dsa_pending_op
= PENDING_NONE
;
313 /* write a WRITE record */
314 bzero(dsp
->dsa_drr
, sizeof (dmu_replay_record_t
));
315 dsp
->dsa_drr
->drr_type
= DRR_WRITE
;
316 drrw
->drr_object
= object
;
317 drrw
->drr_type
= type
;
318 drrw
->drr_offset
= offset
;
319 drrw
->drr_toguid
= dsp
->dsa_toguid
;
320 drrw
->drr_logical_size
= lsize
;
322 /* only set the compression fields if the buf is compressed */
323 if (lsize
!= psize
) {
324 ASSERT(dsp
->dsa_featureflags
& DMU_BACKUP_FEATURE_COMPRESSED
);
325 ASSERT(!BP_IS_EMBEDDED(bp
));
326 ASSERT(!BP_SHOULD_BYTESWAP(bp
));
327 ASSERT(!DMU_OT_IS_METADATA(BP_GET_TYPE(bp
)));
328 ASSERT3U(BP_GET_COMPRESS(bp
), !=, ZIO_COMPRESS_OFF
);
329 ASSERT3S(psize
, >, 0);
330 ASSERT3S(lsize
, >=, psize
);
332 drrw
->drr_compressiontype
= BP_GET_COMPRESS(bp
);
333 drrw
->drr_compressed_size
= psize
;
334 payload_size
= drrw
->drr_compressed_size
;
336 payload_size
= drrw
->drr_logical_size
;
339 if (bp
== NULL
|| BP_IS_EMBEDDED(bp
)) {
341 * There's no pre-computed checksum for partial-block
342 * writes or embedded BP's, so (like
343 * fletcher4-checkummed blocks) userland will have to
344 * compute a dedup-capable checksum itself.
346 drrw
->drr_checksumtype
= ZIO_CHECKSUM_OFF
;
348 drrw
->drr_checksumtype
= BP_GET_CHECKSUM(bp
);
349 if (zio_checksum_table
[drrw
->drr_checksumtype
].ci_flags
&
350 ZCHECKSUM_FLAG_DEDUP
)
351 drrw
->drr_checksumflags
|= DRR_CHECKSUM_DEDUP
;
352 DDK_SET_LSIZE(&drrw
->drr_key
, BP_GET_LSIZE(bp
));
353 DDK_SET_PSIZE(&drrw
->drr_key
, BP_GET_PSIZE(bp
));
354 DDK_SET_COMPRESS(&drrw
->drr_key
, BP_GET_COMPRESS(bp
));
355 drrw
->drr_key
.ddk_cksum
= bp
->blk_cksum
;
358 if (dump_record(dsp
, data
, payload_size
) != 0)
359 return (SET_ERROR(EINTR
));
364 dump_write_embedded(dmu_sendarg_t
*dsp
, uint64_t object
, uint64_t offset
,
365 int blksz
, const blkptr_t
*bp
)
367 char buf
[BPE_PAYLOAD_SIZE
];
368 struct drr_write_embedded
*drrw
=
369 &(dsp
->dsa_drr
->drr_u
.drr_write_embedded
);
371 if (dsp
->dsa_pending_op
!= PENDING_NONE
) {
372 if (dump_record(dsp
, NULL
, 0) != 0)
374 dsp
->dsa_pending_op
= PENDING_NONE
;
377 ASSERT(BP_IS_EMBEDDED(bp
));
379 bzero(dsp
->dsa_drr
, sizeof (dmu_replay_record_t
));
380 dsp
->dsa_drr
->drr_type
= DRR_WRITE_EMBEDDED
;
381 drrw
->drr_object
= object
;
382 drrw
->drr_offset
= offset
;
383 drrw
->drr_length
= blksz
;
384 drrw
->drr_toguid
= dsp
->dsa_toguid
;
385 drrw
->drr_compression
= BP_GET_COMPRESS(bp
);
386 drrw
->drr_etype
= BPE_GET_ETYPE(bp
);
387 drrw
->drr_lsize
= BPE_GET_LSIZE(bp
);
388 drrw
->drr_psize
= BPE_GET_PSIZE(bp
);
390 decode_embedded_bp_compressed(bp
, buf
);
392 if (dump_record(dsp
, buf
, P2ROUNDUP(drrw
->drr_psize
, 8)) != 0)
398 dump_spill(dmu_sendarg_t
*dsp
, uint64_t object
, int blksz
, void *data
)
400 struct drr_spill
*drrs
= &(dsp
->dsa_drr
->drr_u
.drr_spill
);
402 if (dsp
->dsa_pending_op
!= PENDING_NONE
) {
403 if (dump_record(dsp
, NULL
, 0) != 0)
404 return (SET_ERROR(EINTR
));
405 dsp
->dsa_pending_op
= PENDING_NONE
;
408 /* write a SPILL record */
409 bzero(dsp
->dsa_drr
, sizeof (dmu_replay_record_t
));
410 dsp
->dsa_drr
->drr_type
= DRR_SPILL
;
411 drrs
->drr_object
= object
;
412 drrs
->drr_length
= blksz
;
413 drrs
->drr_toguid
= dsp
->dsa_toguid
;
415 if (dump_record(dsp
, data
, blksz
) != 0)
416 return (SET_ERROR(EINTR
));
421 dump_freeobjects(dmu_sendarg_t
*dsp
, uint64_t firstobj
, uint64_t numobjs
)
423 struct drr_freeobjects
*drrfo
= &(dsp
->dsa_drr
->drr_u
.drr_freeobjects
);
424 uint64_t maxobj
= DNODES_PER_BLOCK
*
425 (DMU_META_DNODE(dsp
->dsa_os
)->dn_maxblkid
+ 1);
428 * ZoL < 0.7 does not handle large FREEOBJECTS records correctly,
429 * leading to zfs recv never completing. to avoid this issue, don't
430 * send FREEOBJECTS records for object IDs which cannot exist on the
434 if (maxobj
< firstobj
)
437 if (maxobj
< firstobj
+ numobjs
)
438 numobjs
= maxobj
- firstobj
;
442 * If there is a pending op, but it's not PENDING_FREEOBJECTS,
443 * push it out, since free block aggregation can only be done for
444 * blocks of the same type (i.e., DRR_FREE records can only be
445 * aggregated with other DRR_FREE records. DRR_FREEOBJECTS records
446 * can only be aggregated with other DRR_FREEOBJECTS records.
448 if (dsp
->dsa_pending_op
!= PENDING_NONE
&&
449 dsp
->dsa_pending_op
!= PENDING_FREEOBJECTS
) {
450 if (dump_record(dsp
, NULL
, 0) != 0)
451 return (SET_ERROR(EINTR
));
452 dsp
->dsa_pending_op
= PENDING_NONE
;
454 if (dsp
->dsa_pending_op
== PENDING_FREEOBJECTS
) {
456 * See whether this free object array can be aggregated
459 if (drrfo
->drr_firstobj
+ drrfo
->drr_numobjs
== firstobj
) {
460 drrfo
->drr_numobjs
+= numobjs
;
463 /* can't be aggregated. Push out pending record */
464 if (dump_record(dsp
, NULL
, 0) != 0)
465 return (SET_ERROR(EINTR
));
466 dsp
->dsa_pending_op
= PENDING_NONE
;
470 /* write a FREEOBJECTS record */
471 bzero(dsp
->dsa_drr
, sizeof (dmu_replay_record_t
));
472 dsp
->dsa_drr
->drr_type
= DRR_FREEOBJECTS
;
473 drrfo
->drr_firstobj
= firstobj
;
474 drrfo
->drr_numobjs
= numobjs
;
475 drrfo
->drr_toguid
= dsp
->dsa_toguid
;
477 dsp
->dsa_pending_op
= PENDING_FREEOBJECTS
;
483 dump_dnode(dmu_sendarg_t
*dsp
, uint64_t object
, dnode_phys_t
*dnp
)
485 struct drr_object
*drro
= &(dsp
->dsa_drr
->drr_u
.drr_object
);
487 if (object
< dsp
->dsa_resume_object
) {
489 * Note: when resuming, we will visit all the dnodes in
490 * the block of dnodes that we are resuming from. In
491 * this case it's unnecessary to send the dnodes prior to
492 * the one we are resuming from. We should be at most one
493 * block's worth of dnodes behind the resume point.
495 ASSERT3U(dsp
->dsa_resume_object
- object
, <,
496 1 << (DNODE_BLOCK_SHIFT
- DNODE_SHIFT
));
500 if (dnp
== NULL
|| dnp
->dn_type
== DMU_OT_NONE
)
501 return (dump_freeobjects(dsp
, object
, 1));
503 if (dsp
->dsa_pending_op
!= PENDING_NONE
) {
504 if (dump_record(dsp
, NULL
, 0) != 0)
505 return (SET_ERROR(EINTR
));
506 dsp
->dsa_pending_op
= PENDING_NONE
;
509 /* write an OBJECT record */
510 bzero(dsp
->dsa_drr
, sizeof (dmu_replay_record_t
));
511 dsp
->dsa_drr
->drr_type
= DRR_OBJECT
;
512 drro
->drr_object
= object
;
513 drro
->drr_type
= dnp
->dn_type
;
514 drro
->drr_bonustype
= dnp
->dn_bonustype
;
515 drro
->drr_blksz
= dnp
->dn_datablkszsec
<< SPA_MINBLOCKSHIFT
;
516 drro
->drr_bonuslen
= dnp
->dn_bonuslen
;
517 drro
->drr_dn_slots
= dnp
->dn_extra_slots
+ 1;
518 drro
->drr_checksumtype
= dnp
->dn_checksum
;
519 drro
->drr_compress
= dnp
->dn_compress
;
520 drro
->drr_toguid
= dsp
->dsa_toguid
;
522 if (!(dsp
->dsa_featureflags
& DMU_BACKUP_FEATURE_LARGE_BLOCKS
) &&
523 drro
->drr_blksz
> SPA_OLD_MAXBLOCKSIZE
)
524 drro
->drr_blksz
= SPA_OLD_MAXBLOCKSIZE
;
526 if (dump_record(dsp
, DN_BONUS(dnp
),
527 P2ROUNDUP(dnp
->dn_bonuslen
, 8)) != 0) {
528 return (SET_ERROR(EINTR
));
531 /* Free anything past the end of the file. */
532 if (dump_free(dsp
, object
, (dnp
->dn_maxblkid
+ 1) *
533 (dnp
->dn_datablkszsec
<< SPA_MINBLOCKSHIFT
), -1ULL) != 0)
534 return (SET_ERROR(EINTR
));
535 if (dsp
->dsa_err
!= 0)
536 return (SET_ERROR(EINTR
));
541 backup_do_embed(dmu_sendarg_t
*dsp
, const blkptr_t
*bp
)
543 if (!BP_IS_EMBEDDED(bp
))
547 * Compression function must be legacy, or explicitly enabled.
549 if ((BP_GET_COMPRESS(bp
) >= ZIO_COMPRESS_LEGACY_FUNCTIONS
&&
550 !(dsp
->dsa_featureflags
& DMU_BACKUP_FEATURE_LZ4
)))
554 * Embed type must be explicitly enabled.
556 switch (BPE_GET_ETYPE(bp
)) {
557 case BP_EMBEDDED_TYPE_DATA
:
558 if (dsp
->dsa_featureflags
& DMU_BACKUP_FEATURE_EMBED_DATA
)
568 * This is the callback function to traverse_dataset that acts as the worker
569 * thread for dmu_send_impl.
573 send_cb(spa_t
*spa
, zilog_t
*zilog
, const blkptr_t
*bp
,
574 const zbookmark_phys_t
*zb
, const struct dnode_phys
*dnp
, void *arg
)
576 struct send_thread_arg
*sta
= arg
;
577 struct send_block_record
*record
;
578 uint64_t record_size
;
581 ASSERT(zb
->zb_object
== DMU_META_DNODE_OBJECT
||
582 zb
->zb_object
>= sta
->resume
.zb_object
);
585 return (SET_ERROR(EINTR
));
588 ASSERT3U(zb
->zb_level
, ==, ZB_DNODE_LEVEL
);
590 } else if (zb
->zb_level
< 0) {
594 record
= kmem_zalloc(sizeof (struct send_block_record
), KM_SLEEP
);
595 record
->eos_marker
= B_FALSE
;
598 record
->indblkshift
= dnp
->dn_indblkshift
;
599 record
->datablkszsec
= dnp
->dn_datablkszsec
;
600 record_size
= dnp
->dn_datablkszsec
<< SPA_MINBLOCKSHIFT
;
601 bqueue_enqueue(&sta
->q
, record
, record_size
);
607 * This function kicks off the traverse_dataset. It also handles setting the
608 * error code of the thread in case something goes wrong, and pushes the End of
609 * Stream record when the traverse_dataset call has finished. If there is no
610 * dataset to traverse, the thread immediately pushes End of Stream marker.
613 send_traverse_thread(void *arg
)
615 struct send_thread_arg
*st_arg
= arg
;
617 struct send_block_record
*data
;
618 fstrans_cookie_t cookie
= spl_fstrans_mark();
620 if (st_arg
->ds
!= NULL
) {
621 err
= traverse_dataset_resume(st_arg
->ds
,
622 st_arg
->fromtxg
, &st_arg
->resume
,
623 st_arg
->flags
, send_cb
, st_arg
);
626 st_arg
->error_code
= err
;
628 data
= kmem_zalloc(sizeof (*data
), KM_SLEEP
);
629 data
->eos_marker
= B_TRUE
;
630 bqueue_enqueue(&st_arg
->q
, data
, 1);
631 spl_fstrans_unmark(cookie
);
636 * This function actually handles figuring out what kind of record needs to be
637 * dumped, reading the data (which has hopefully been prefetched), and calling
638 * the appropriate helper function.
641 do_dump(dmu_sendarg_t
*dsa
, struct send_block_record
*data
)
643 dsl_dataset_t
*ds
= dmu_objset_ds(dsa
->dsa_os
);
644 const blkptr_t
*bp
= &data
->bp
;
645 const zbookmark_phys_t
*zb
= &data
->zb
;
646 uint8_t indblkshift
= data
->indblkshift
;
647 uint16_t dblkszsec
= data
->datablkszsec
;
648 spa_t
*spa
= ds
->ds_dir
->dd_pool
->dp_spa
;
649 dmu_object_type_t type
= bp
? BP_GET_TYPE(bp
) : DMU_OT_NONE
;
653 ASSERT3U(zb
->zb_level
, >=, 0);
655 ASSERT(zb
->zb_object
== DMU_META_DNODE_OBJECT
||
656 zb
->zb_object
>= dsa
->dsa_resume_object
);
658 if (zb
->zb_object
!= DMU_META_DNODE_OBJECT
&&
659 DMU_OBJECT_IS_SPECIAL(zb
->zb_object
)) {
661 } else if (BP_IS_HOLE(bp
) &&
662 zb
->zb_object
== DMU_META_DNODE_OBJECT
) {
663 uint64_t span
= BP_SPAN(dblkszsec
, indblkshift
, zb
->zb_level
);
664 uint64_t dnobj
= (zb
->zb_blkid
* span
) >> DNODE_SHIFT
;
665 err
= dump_freeobjects(dsa
, dnobj
, span
>> DNODE_SHIFT
);
666 } else if (BP_IS_HOLE(bp
)) {
667 uint64_t span
= BP_SPAN(dblkszsec
, indblkshift
, zb
->zb_level
);
668 uint64_t offset
= zb
->zb_blkid
* span
;
669 err
= dump_free(dsa
, zb
->zb_object
, offset
, span
);
670 } else if (zb
->zb_level
> 0 || type
== DMU_OT_OBJSET
) {
672 } else if (type
== DMU_OT_DNODE
) {
674 int epb
= BP_GET_LSIZE(bp
) >> DNODE_SHIFT
;
675 arc_flags_t aflags
= ARC_FLAG_WAIT
;
679 ASSERT0(zb
->zb_level
);
681 if (arc_read(NULL
, spa
, bp
, arc_getbuf_func
, &abuf
,
682 ZIO_PRIORITY_ASYNC_READ
, ZIO_FLAG_CANFAIL
,
684 return (SET_ERROR(EIO
));
687 dnobj
= zb
->zb_blkid
* epb
;
688 for (i
= 0; i
< epb
; i
+= blk
[i
].dn_extra_slots
+ 1) {
689 err
= dump_dnode(dsa
, dnobj
+ i
, blk
+ i
);
693 arc_buf_destroy(abuf
, &abuf
);
694 } else if (type
== DMU_OT_SA
) {
695 arc_flags_t aflags
= ARC_FLAG_WAIT
;
697 int blksz
= BP_GET_LSIZE(bp
);
699 if (arc_read(NULL
, spa
, bp
, arc_getbuf_func
, &abuf
,
700 ZIO_PRIORITY_ASYNC_READ
, ZIO_FLAG_CANFAIL
,
702 return (SET_ERROR(EIO
));
704 err
= dump_spill(dsa
, zb
->zb_object
, blksz
, abuf
->b_data
);
705 arc_buf_destroy(abuf
, &abuf
);
706 } else if (backup_do_embed(dsa
, bp
)) {
707 /* it's an embedded level-0 block of a regular object */
708 int blksz
= dblkszsec
<< SPA_MINBLOCKSHIFT
;
709 ASSERT0(zb
->zb_level
);
710 err
= dump_write_embedded(dsa
, zb
->zb_object
,
711 zb
->zb_blkid
* blksz
, blksz
, bp
);
713 /* it's a level-0 block of a regular object */
714 arc_flags_t aflags
= ARC_FLAG_WAIT
;
716 int blksz
= dblkszsec
<< SPA_MINBLOCKSHIFT
;
720 * If we have large blocks stored on disk but the send flags
721 * don't allow us to send large blocks, we split the data from
722 * the arc buf into chunks.
724 boolean_t split_large_blocks
= blksz
> SPA_OLD_MAXBLOCKSIZE
&&
725 !(dsa
->dsa_featureflags
& DMU_BACKUP_FEATURE_LARGE_BLOCKS
);
727 * We should only request compressed data from the ARC if all
728 * the following are true:
729 * - stream compression was requested
730 * - we aren't splitting large blocks into smaller chunks
731 * - the data won't need to be byteswapped before sending
732 * - this isn't an embedded block
733 * - this isn't metadata (if receiving on a different endian
734 * system it can be byteswapped more easily)
736 boolean_t request_compressed
=
737 (dsa
->dsa_featureflags
& DMU_BACKUP_FEATURE_COMPRESSED
) &&
738 !split_large_blocks
&& !BP_SHOULD_BYTESWAP(bp
) &&
739 !BP_IS_EMBEDDED(bp
) && !DMU_OT_IS_METADATA(BP_GET_TYPE(bp
));
741 ASSERT0(zb
->zb_level
);
742 ASSERT(zb
->zb_object
> dsa
->dsa_resume_object
||
743 (zb
->zb_object
== dsa
->dsa_resume_object
&&
744 zb
->zb_blkid
* blksz
>= dsa
->dsa_resume_offset
));
746 ASSERT3U(blksz
, ==, BP_GET_LSIZE(bp
));
748 enum zio_flag zioflags
= ZIO_FLAG_CANFAIL
;
749 if (request_compressed
)
750 zioflags
|= ZIO_FLAG_RAW
;
752 if (arc_read(NULL
, spa
, bp
, arc_getbuf_func
, &abuf
,
753 ZIO_PRIORITY_ASYNC_READ
, zioflags
, &aflags
, zb
) != 0) {
754 if (zfs_send_corrupt_data
) {
755 /* Send a block filled with 0x"zfs badd bloc" */
756 abuf
= arc_alloc_buf(spa
, &abuf
, ARC_BUFC_DATA
,
759 for (ptr
= abuf
->b_data
;
760 (char *)ptr
< (char *)abuf
->b_data
+ blksz
;
762 *ptr
= 0x2f5baddb10cULL
;
764 return (SET_ERROR(EIO
));
768 offset
= zb
->zb_blkid
* blksz
;
770 if (split_large_blocks
) {
771 ASSERT3U(arc_get_compression(abuf
), ==,
773 char *buf
= abuf
->b_data
;
774 while (blksz
> 0 && err
== 0) {
775 int n
= MIN(blksz
, SPA_OLD_MAXBLOCKSIZE
);
776 err
= dump_write(dsa
, type
, zb
->zb_object
,
777 offset
, n
, n
, NULL
, buf
);
783 err
= dump_write(dsa
, type
, zb
->zb_object
, offset
,
784 blksz
, arc_buf_size(abuf
), bp
,
787 arc_buf_destroy(abuf
, &abuf
);
790 ASSERT(err
== 0 || err
== EINTR
);
795 * Pop the new data off the queue, and free the old data.
797 static struct send_block_record
*
798 get_next_record(bqueue_t
*bq
, struct send_block_record
*data
)
800 struct send_block_record
*tmp
= bqueue_dequeue(bq
);
801 kmem_free(data
, sizeof (*data
));
806 * Actually do the bulk of the work in a zfs send.
808 * Note: Releases dp using the specified tag.
811 dmu_send_impl(void *tag
, dsl_pool_t
*dp
, dsl_dataset_t
*to_ds
,
812 zfs_bookmark_phys_t
*ancestor_zb
, boolean_t is_clone
,
813 boolean_t embedok
, boolean_t large_block_ok
, boolean_t compressok
,
814 int outfd
, uint64_t resumeobj
, uint64_t resumeoff
,
815 vnode_t
*vp
, offset_t
*off
)
818 dmu_replay_record_t
*drr
;
821 uint64_t fromtxg
= 0;
822 uint64_t featureflags
= 0;
823 struct send_thread_arg to_arg
;
824 void *payload
= NULL
;
825 size_t payload_len
= 0;
826 struct send_block_record
*to_data
;
828 err
= dmu_objset_from_ds(to_ds
, &os
);
830 dsl_pool_rele(dp
, tag
);
834 drr
= kmem_zalloc(sizeof (dmu_replay_record_t
), KM_SLEEP
);
835 drr
->drr_type
= DRR_BEGIN
;
836 drr
->drr_u
.drr_begin
.drr_magic
= DMU_BACKUP_MAGIC
;
837 DMU_SET_STREAM_HDRTYPE(drr
->drr_u
.drr_begin
.drr_versioninfo
,
840 bzero(&to_arg
, sizeof (to_arg
));
843 if (dmu_objset_type(os
) == DMU_OST_ZFS
) {
845 if (zfs_get_zplprop(os
, ZFS_PROP_VERSION
, &version
) != 0) {
846 kmem_free(drr
, sizeof (dmu_replay_record_t
));
847 dsl_pool_rele(dp
, tag
);
848 return (SET_ERROR(EINVAL
));
850 if (version
>= ZPL_VERSION_SA
) {
851 featureflags
|= DMU_BACKUP_FEATURE_SA_SPILL
;
856 if (large_block_ok
&& to_ds
->ds_feature_inuse
[SPA_FEATURE_LARGE_BLOCKS
])
857 featureflags
|= DMU_BACKUP_FEATURE_LARGE_BLOCKS
;
858 if (to_ds
->ds_feature_inuse
[SPA_FEATURE_LARGE_DNODE
])
859 featureflags
|= DMU_BACKUP_FEATURE_LARGE_DNODE
;
861 spa_feature_is_active(dp
->dp_spa
, SPA_FEATURE_EMBEDDED_DATA
)) {
862 featureflags
|= DMU_BACKUP_FEATURE_EMBED_DATA
;
865 featureflags
|= DMU_BACKUP_FEATURE_COMPRESSED
;
868 (DMU_BACKUP_FEATURE_EMBED_DATA
| DMU_BACKUP_FEATURE_COMPRESSED
)) !=
869 0 && spa_feature_is_active(dp
->dp_spa
, SPA_FEATURE_LZ4_COMPRESS
)) {
870 featureflags
|= DMU_BACKUP_FEATURE_LZ4
;
873 if (resumeobj
!= 0 || resumeoff
!= 0) {
874 featureflags
|= DMU_BACKUP_FEATURE_RESUMING
;
877 DMU_SET_FEATUREFLAGS(drr
->drr_u
.drr_begin
.drr_versioninfo
,
880 drr
->drr_u
.drr_begin
.drr_creation_time
=
881 dsl_dataset_phys(to_ds
)->ds_creation_time
;
882 drr
->drr_u
.drr_begin
.drr_type
= dmu_objset_type(os
);
884 drr
->drr_u
.drr_begin
.drr_flags
|= DRR_FLAG_CLONE
;
885 drr
->drr_u
.drr_begin
.drr_toguid
= dsl_dataset_phys(to_ds
)->ds_guid
;
886 if (dsl_dataset_phys(to_ds
)->ds_flags
& DS_FLAG_CI_DATASET
)
887 drr
->drr_u
.drr_begin
.drr_flags
|= DRR_FLAG_CI_DATA
;
888 if (zfs_send_set_freerecords_bit
)
889 drr
->drr_u
.drr_begin
.drr_flags
|= DRR_FLAG_FREERECORDS
;
891 if (ancestor_zb
!= NULL
) {
892 drr
->drr_u
.drr_begin
.drr_fromguid
=
893 ancestor_zb
->zbm_guid
;
894 fromtxg
= ancestor_zb
->zbm_creation_txg
;
896 dsl_dataset_name(to_ds
, drr
->drr_u
.drr_begin
.drr_toname
);
897 if (!to_ds
->ds_is_snapshot
) {
898 (void) strlcat(drr
->drr_u
.drr_begin
.drr_toname
, "@--head--",
899 sizeof (drr
->drr_u
.drr_begin
.drr_toname
));
902 dsp
= kmem_zalloc(sizeof (dmu_sendarg_t
), KM_SLEEP
);
906 dsp
->dsa_outfd
= outfd
;
907 dsp
->dsa_proc
= curproc
;
910 dsp
->dsa_toguid
= dsl_dataset_phys(to_ds
)->ds_guid
;
911 dsp
->dsa_pending_op
= PENDING_NONE
;
912 dsp
->dsa_featureflags
= featureflags
;
913 dsp
->dsa_resume_object
= resumeobj
;
914 dsp
->dsa_resume_offset
= resumeoff
;
916 mutex_enter(&to_ds
->ds_sendstream_lock
);
917 list_insert_head(&to_ds
->ds_sendstreams
, dsp
);
918 mutex_exit(&to_ds
->ds_sendstream_lock
);
920 dsl_dataset_long_hold(to_ds
, FTAG
);
921 dsl_pool_rele(dp
, tag
);
923 if (resumeobj
!= 0 || resumeoff
!= 0) {
924 dmu_object_info_t to_doi
;
926 err
= dmu_object_info(os
, resumeobj
, &to_doi
);
929 SET_BOOKMARK(&to_arg
.resume
, to_ds
->ds_object
, resumeobj
, 0,
930 resumeoff
/ to_doi
.doi_data_block_size
);
932 nvl
= fnvlist_alloc();
933 fnvlist_add_uint64(nvl
, "resume_object", resumeobj
);
934 fnvlist_add_uint64(nvl
, "resume_offset", resumeoff
);
935 payload
= fnvlist_pack(nvl
, &payload_len
);
936 drr
->drr_payloadlen
= payload_len
;
940 err
= dump_record(dsp
, payload
, payload_len
);
941 fnvlist_pack_free(payload
, payload_len
);
947 err
= bqueue_init(&to_arg
.q
,
948 MAX(zfs_send_queue_length
, 2 * zfs_max_recordsize
),
949 offsetof(struct send_block_record
, ln
));
950 to_arg
.error_code
= 0;
951 to_arg
.cancel
= B_FALSE
;
953 to_arg
.fromtxg
= fromtxg
;
954 to_arg
.flags
= TRAVERSE_PRE
| TRAVERSE_PREFETCH
;
955 (void) thread_create(NULL
, 0, send_traverse_thread
, &to_arg
, 0, curproc
,
956 TS_RUN
, minclsyspri
);
958 to_data
= bqueue_dequeue(&to_arg
.q
);
960 while (!to_data
->eos_marker
&& err
== 0) {
961 err
= do_dump(dsp
, to_data
);
962 to_data
= get_next_record(&to_arg
.q
, to_data
);
963 if (issig(JUSTLOOKING
) && issig(FORREAL
))
968 to_arg
.cancel
= B_TRUE
;
969 while (!to_data
->eos_marker
) {
970 to_data
= get_next_record(&to_arg
.q
, to_data
);
973 kmem_free(to_data
, sizeof (*to_data
));
975 bqueue_destroy(&to_arg
.q
);
977 if (err
== 0 && to_arg
.error_code
!= 0)
978 err
= to_arg
.error_code
;
983 if (dsp
->dsa_pending_op
!= PENDING_NONE
)
984 if (dump_record(dsp
, NULL
, 0) != 0)
985 err
= SET_ERROR(EINTR
);
988 if (err
== EINTR
&& dsp
->dsa_err
!= 0)
993 bzero(drr
, sizeof (dmu_replay_record_t
));
994 drr
->drr_type
= DRR_END
;
995 drr
->drr_u
.drr_end
.drr_checksum
= dsp
->dsa_zc
;
996 drr
->drr_u
.drr_end
.drr_toguid
= dsp
->dsa_toguid
;
998 if (dump_record(dsp
, NULL
, 0) != 0)
1002 mutex_enter(&to_ds
->ds_sendstream_lock
);
1003 list_remove(&to_ds
->ds_sendstreams
, dsp
);
1004 mutex_exit(&to_ds
->ds_sendstream_lock
);
1006 VERIFY(err
!= 0 || (dsp
->dsa_sent_begin
&& dsp
->dsa_sent_end
));
1008 kmem_free(drr
, sizeof (dmu_replay_record_t
));
1009 kmem_free(dsp
, sizeof (dmu_sendarg_t
));
1011 dsl_dataset_long_rele(to_ds
, FTAG
);
1017 dmu_send_obj(const char *pool
, uint64_t tosnap
, uint64_t fromsnap
,
1018 boolean_t embedok
, boolean_t large_block_ok
, boolean_t compressok
,
1019 int outfd
, vnode_t
*vp
, offset_t
*off
)
1023 dsl_dataset_t
*fromds
= NULL
;
1026 err
= dsl_pool_hold(pool
, FTAG
, &dp
);
1030 err
= dsl_dataset_hold_obj(dp
, tosnap
, FTAG
, &ds
);
1032 dsl_pool_rele(dp
, FTAG
);
1036 if (fromsnap
!= 0) {
1037 zfs_bookmark_phys_t zb
;
1040 err
= dsl_dataset_hold_obj(dp
, fromsnap
, FTAG
, &fromds
);
1042 dsl_dataset_rele(ds
, FTAG
);
1043 dsl_pool_rele(dp
, FTAG
);
1046 if (!dsl_dataset_is_before(ds
, fromds
, 0))
1047 err
= SET_ERROR(EXDEV
);
1048 zb
.zbm_creation_time
=
1049 dsl_dataset_phys(fromds
)->ds_creation_time
;
1050 zb
.zbm_creation_txg
= dsl_dataset_phys(fromds
)->ds_creation_txg
;
1051 zb
.zbm_guid
= dsl_dataset_phys(fromds
)->ds_guid
;
1052 is_clone
= (fromds
->ds_dir
!= ds
->ds_dir
);
1053 dsl_dataset_rele(fromds
, FTAG
);
1054 err
= dmu_send_impl(FTAG
, dp
, ds
, &zb
, is_clone
,
1055 embedok
, large_block_ok
, compressok
, outfd
, 0, 0, vp
, off
);
1057 err
= dmu_send_impl(FTAG
, dp
, ds
, NULL
, B_FALSE
,
1058 embedok
, large_block_ok
, compressok
, outfd
, 0, 0, vp
, off
);
1060 dsl_dataset_rele(ds
, FTAG
);
1065 dmu_send(const char *tosnap
, const char *fromsnap
, boolean_t embedok
,
1066 boolean_t large_block_ok
, boolean_t compressok
, int outfd
,
1067 uint64_t resumeobj
, uint64_t resumeoff
,
1068 vnode_t
*vp
, offset_t
*off
)
1073 boolean_t owned
= B_FALSE
;
1075 if (fromsnap
!= NULL
&& strpbrk(fromsnap
, "@#") == NULL
)
1076 return (SET_ERROR(EINVAL
));
1078 err
= dsl_pool_hold(tosnap
, FTAG
, &dp
);
1082 if (strchr(tosnap
, '@') == NULL
&& spa_writeable(dp
->dp_spa
)) {
1084 * We are sending a filesystem or volume. Ensure
1085 * that it doesn't change by owning the dataset.
1087 err
= dsl_dataset_own(dp
, tosnap
, FTAG
, &ds
);
1090 err
= dsl_dataset_hold(dp
, tosnap
, FTAG
, &ds
);
1093 dsl_pool_rele(dp
, FTAG
);
1097 if (fromsnap
!= NULL
) {
1098 zfs_bookmark_phys_t zb
;
1099 boolean_t is_clone
= B_FALSE
;
1100 int fsnamelen
= strchr(tosnap
, '@') - tosnap
;
1103 * If the fromsnap is in a different filesystem, then
1104 * mark the send stream as a clone.
1106 if (strncmp(tosnap
, fromsnap
, fsnamelen
) != 0 ||
1107 (fromsnap
[fsnamelen
] != '@' &&
1108 fromsnap
[fsnamelen
] != '#')) {
1112 if (strchr(fromsnap
, '@')) {
1113 dsl_dataset_t
*fromds
;
1114 err
= dsl_dataset_hold(dp
, fromsnap
, FTAG
, &fromds
);
1116 if (!dsl_dataset_is_before(ds
, fromds
, 0))
1117 err
= SET_ERROR(EXDEV
);
1118 zb
.zbm_creation_time
=
1119 dsl_dataset_phys(fromds
)->ds_creation_time
;
1120 zb
.zbm_creation_txg
=
1121 dsl_dataset_phys(fromds
)->ds_creation_txg
;
1122 zb
.zbm_guid
= dsl_dataset_phys(fromds
)->ds_guid
;
1123 is_clone
= (ds
->ds_dir
!= fromds
->ds_dir
);
1124 dsl_dataset_rele(fromds
, FTAG
);
1127 err
= dsl_bookmark_lookup(dp
, fromsnap
, ds
, &zb
);
1130 dsl_dataset_rele(ds
, FTAG
);
1131 dsl_pool_rele(dp
, FTAG
);
1134 err
= dmu_send_impl(FTAG
, dp
, ds
, &zb
, is_clone
,
1135 embedok
, large_block_ok
, compressok
,
1136 outfd
, resumeobj
, resumeoff
, vp
, off
);
1138 err
= dmu_send_impl(FTAG
, dp
, ds
, NULL
, B_FALSE
,
1139 embedok
, large_block_ok
, compressok
,
1140 outfd
, resumeobj
, resumeoff
, vp
, off
);
1143 dsl_dataset_disown(ds
, FTAG
);
1145 dsl_dataset_rele(ds
, FTAG
);
1150 dmu_adjust_send_estimate_for_indirects(dsl_dataset_t
*ds
, uint64_t uncompressed
,
1151 uint64_t compressed
, boolean_t stream_compressed
, uint64_t *sizep
)
1156 * Assume that space (both on-disk and in-stream) is dominated by
1157 * data. We will adjust for indirect blocks and the copies property,
1158 * but ignore per-object space used (eg, dnodes and DRR_OBJECT records).
1161 uint64_t recordsize
;
1162 uint64_t record_count
;
1164 VERIFY0(dmu_objset_from_ds(ds
, &os
));
1166 /* Assume all (uncompressed) blocks are recordsize. */
1167 if (os
->os_phys
->os_type
== DMU_OST_ZVOL
) {
1168 err
= dsl_prop_get_int_ds(ds
,
1169 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE
), &recordsize
);
1171 err
= dsl_prop_get_int_ds(ds
,
1172 zfs_prop_to_name(ZFS_PROP_RECORDSIZE
), &recordsize
);
1176 record_count
= uncompressed
/ recordsize
;
1179 * If we're estimating a send size for a compressed stream, use the
1180 * compressed data size to estimate the stream size. Otherwise, use the
1181 * uncompressed data size.
1183 size
= stream_compressed
? compressed
: uncompressed
;
1186 * Subtract out approximate space used by indirect blocks.
1187 * Assume most space is used by data blocks (non-indirect, non-dnode).
1188 * Assume no ditto blocks or internal fragmentation.
1190 * Therefore, space used by indirect blocks is sizeof(blkptr_t) per
1193 size
-= record_count
* sizeof (blkptr_t
);
1195 /* Add in the space for the record associated with each block. */
1196 size
+= record_count
* sizeof (dmu_replay_record_t
);
1204 dmu_send_estimate(dsl_dataset_t
*ds
, dsl_dataset_t
*fromds
,
1205 boolean_t stream_compressed
, uint64_t *sizep
)
1208 uint64_t uncomp
, comp
;
1210 ASSERT(dsl_pool_config_held(ds
->ds_dir
->dd_pool
));
1212 /* tosnap must be a snapshot */
1213 if (!ds
->ds_is_snapshot
)
1214 return (SET_ERROR(EINVAL
));
1216 /* fromsnap, if provided, must be a snapshot */
1217 if (fromds
!= NULL
&& !fromds
->ds_is_snapshot
)
1218 return (SET_ERROR(EINVAL
));
1221 * fromsnap must be an earlier snapshot from the same fs as tosnap,
1222 * or the origin's fs.
1224 if (fromds
!= NULL
&& !dsl_dataset_is_before(ds
, fromds
, 0))
1225 return (SET_ERROR(EXDEV
));
1227 /* Get compressed and uncompressed size estimates of changed data. */
1228 if (fromds
== NULL
) {
1229 uncomp
= dsl_dataset_phys(ds
)->ds_uncompressed_bytes
;
1230 comp
= dsl_dataset_phys(ds
)->ds_compressed_bytes
;
1233 err
= dsl_dataset_space_written(fromds
, ds
,
1234 &used
, &comp
, &uncomp
);
1239 err
= dmu_adjust_send_estimate_for_indirects(ds
, uncomp
, comp
,
1240 stream_compressed
, sizep
);
1242 * Add the size of the BEGIN and END records to the estimate.
1244 *sizep
+= 2 * sizeof (dmu_replay_record_t
);
1248 struct calculate_send_arg
{
1249 uint64_t uncompressed
;
1250 uint64_t compressed
;
1254 * Simple callback used to traverse the blocks of a snapshot and sum their
1255 * uncompressed and compressed sizes.
1259 dmu_calculate_send_traversal(spa_t
*spa
, zilog_t
*zilog
, const blkptr_t
*bp
,
1260 const zbookmark_phys_t
*zb
, const dnode_phys_t
*dnp
, void *arg
)
1262 struct calculate_send_arg
*space
= arg
;
1263 if (bp
!= NULL
&& !BP_IS_HOLE(bp
)) {
1264 space
->uncompressed
+= BP_GET_UCSIZE(bp
);
1265 space
->compressed
+= BP_GET_PSIZE(bp
);
1271 * Given a desination snapshot and a TXG, calculate the approximate size of a
1272 * send stream sent from that TXG. from_txg may be zero, indicating that the
1273 * whole snapshot will be sent.
1276 dmu_send_estimate_from_txg(dsl_dataset_t
*ds
, uint64_t from_txg
,
1277 boolean_t stream_compressed
, uint64_t *sizep
)
1280 struct calculate_send_arg size
= { 0 };
1282 ASSERT(dsl_pool_config_held(ds
->ds_dir
->dd_pool
));
1284 /* tosnap must be a snapshot */
1285 if (!dsl_dataset_is_snapshot(ds
))
1286 return (SET_ERROR(EINVAL
));
1288 /* verify that from_txg is before the provided snapshot was taken */
1289 if (from_txg
>= dsl_dataset_phys(ds
)->ds_creation_txg
) {
1290 return (SET_ERROR(EXDEV
));
1293 * traverse the blocks of the snapshot with birth times after
1294 * from_txg, summing their uncompressed size
1296 err
= traverse_dataset(ds
, from_txg
, TRAVERSE_POST
,
1297 dmu_calculate_send_traversal
, &size
);
1302 err
= dmu_adjust_send_estimate_for_indirects(ds
, size
.uncompressed
,
1303 size
.compressed
, stream_compressed
, sizep
);
1307 typedef struct dmu_recv_begin_arg
{
1308 const char *drba_origin
;
1309 dmu_recv_cookie_t
*drba_cookie
;
1311 uint64_t drba_snapobj
;
1312 } dmu_recv_begin_arg_t
;
1315 recv_begin_check_existing_impl(dmu_recv_begin_arg_t
*drba
, dsl_dataset_t
*ds
,
1320 dsl_pool_t
*dp
= ds
->ds_dir
->dd_pool
;
1322 /* temporary clone name must not exist */
1323 error
= zap_lookup(dp
->dp_meta_objset
,
1324 dsl_dir_phys(ds
->ds_dir
)->dd_child_dir_zapobj
, recv_clone_name
,
1326 if (error
!= ENOENT
)
1327 return (error
== 0 ? EBUSY
: error
);
1329 /* new snapshot name must not exist */
1330 error
= zap_lookup(dp
->dp_meta_objset
,
1331 dsl_dataset_phys(ds
)->ds_snapnames_zapobj
,
1332 drba
->drba_cookie
->drc_tosnap
, 8, 1, &val
);
1333 if (error
!= ENOENT
)
1334 return (error
== 0 ? EEXIST
: error
);
1337 * Check snapshot limit before receiving. We'll recheck again at the
1338 * end, but might as well abort before receiving if we're already over
1341 * Note that we do not check the file system limit with
1342 * dsl_dir_fscount_check because the temporary %clones don't count
1343 * against that limit.
1345 error
= dsl_fs_ss_limit_check(ds
->ds_dir
, 1, ZFS_PROP_SNAPSHOT_LIMIT
,
1346 NULL
, drba
->drba_cred
);
1350 if (fromguid
!= 0) {
1351 dsl_dataset_t
*snap
;
1352 uint64_t obj
= dsl_dataset_phys(ds
)->ds_prev_snap_obj
;
1354 /* Find snapshot in this dir that matches fromguid. */
1356 error
= dsl_dataset_hold_obj(dp
, obj
, FTAG
,
1359 return (SET_ERROR(ENODEV
));
1360 if (snap
->ds_dir
!= ds
->ds_dir
) {
1361 dsl_dataset_rele(snap
, FTAG
);
1362 return (SET_ERROR(ENODEV
));
1364 if (dsl_dataset_phys(snap
)->ds_guid
== fromguid
)
1366 obj
= dsl_dataset_phys(snap
)->ds_prev_snap_obj
;
1367 dsl_dataset_rele(snap
, FTAG
);
1370 return (SET_ERROR(ENODEV
));
1372 if (drba
->drba_cookie
->drc_force
) {
1373 drba
->drba_snapobj
= obj
;
1376 * If we are not forcing, there must be no
1377 * changes since fromsnap.
1379 if (dsl_dataset_modified_since_snap(ds
, snap
)) {
1380 dsl_dataset_rele(snap
, FTAG
);
1381 return (SET_ERROR(ETXTBSY
));
1383 drba
->drba_snapobj
= ds
->ds_prev
->ds_object
;
1386 dsl_dataset_rele(snap
, FTAG
);
1388 /* if full, then must be forced */
1389 if (!drba
->drba_cookie
->drc_force
)
1390 return (SET_ERROR(EEXIST
));
1391 /* start from $ORIGIN@$ORIGIN, if supported */
1392 drba
->drba_snapobj
= dp
->dp_origin_snap
!= NULL
?
1393 dp
->dp_origin_snap
->ds_object
: 0;
1401 dmu_recv_begin_check(void *arg
, dmu_tx_t
*tx
)
1403 dmu_recv_begin_arg_t
*drba
= arg
;
1404 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1405 struct drr_begin
*drrb
= drba
->drba_cookie
->drc_drrb
;
1406 uint64_t fromguid
= drrb
->drr_fromguid
;
1407 int flags
= drrb
->drr_flags
;
1409 uint64_t featureflags
= DMU_GET_FEATUREFLAGS(drrb
->drr_versioninfo
);
1411 const char *tofs
= drba
->drba_cookie
->drc_tofs
;
1413 /* already checked */
1414 ASSERT3U(drrb
->drr_magic
, ==, DMU_BACKUP_MAGIC
);
1415 ASSERT(!(featureflags
& DMU_BACKUP_FEATURE_RESUMING
));
1417 if (DMU_GET_STREAM_HDRTYPE(drrb
->drr_versioninfo
) ==
1418 DMU_COMPOUNDSTREAM
||
1419 drrb
->drr_type
>= DMU_OST_NUMTYPES
||
1420 ((flags
& DRR_FLAG_CLONE
) && drba
->drba_origin
== NULL
))
1421 return (SET_ERROR(EINVAL
));
1423 /* Verify pool version supports SA if SA_SPILL feature set */
1424 if ((featureflags
& DMU_BACKUP_FEATURE_SA_SPILL
) &&
1425 spa_version(dp
->dp_spa
) < SPA_VERSION_SA
)
1426 return (SET_ERROR(ENOTSUP
));
1428 if (drba
->drba_cookie
->drc_resumable
&&
1429 !spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_EXTENSIBLE_DATASET
))
1430 return (SET_ERROR(ENOTSUP
));
1433 * The receiving code doesn't know how to translate a WRITE_EMBEDDED
1434 * record to a plain WRITE record, so the pool must have the
1435 * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED
1436 * records. Same with WRITE_EMBEDDED records that use LZ4 compression.
1438 if ((featureflags
& DMU_BACKUP_FEATURE_EMBED_DATA
) &&
1439 !spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_EMBEDDED_DATA
))
1440 return (SET_ERROR(ENOTSUP
));
1441 if ((featureflags
& DMU_BACKUP_FEATURE_LZ4
) &&
1442 !spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_LZ4_COMPRESS
))
1443 return (SET_ERROR(ENOTSUP
));
1446 * The receiving code doesn't know how to translate large blocks
1447 * to smaller ones, so the pool must have the LARGE_BLOCKS
1448 * feature enabled if the stream has LARGE_BLOCKS. Same with
1451 if ((featureflags
& DMU_BACKUP_FEATURE_LARGE_BLOCKS
) &&
1452 !spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_LARGE_BLOCKS
))
1453 return (SET_ERROR(ENOTSUP
));
1454 if ((featureflags
& DMU_BACKUP_FEATURE_LARGE_DNODE
) &&
1455 !spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_LARGE_DNODE
))
1456 return (SET_ERROR(ENOTSUP
));
1458 error
= dsl_dataset_hold(dp
, tofs
, FTAG
, &ds
);
1460 /* target fs already exists; recv into temp clone */
1462 /* Can't recv a clone into an existing fs */
1463 if (flags
& DRR_FLAG_CLONE
|| drba
->drba_origin
) {
1464 dsl_dataset_rele(ds
, FTAG
);
1465 return (SET_ERROR(EINVAL
));
1468 error
= recv_begin_check_existing_impl(drba
, ds
, fromguid
);
1469 dsl_dataset_rele(ds
, FTAG
);
1470 } else if (error
== ENOENT
) {
1471 /* target fs does not exist; must be a full backup or clone */
1472 char buf
[ZFS_MAX_DATASET_NAME_LEN
];
1475 * If it's a non-clone incremental, we are missing the
1476 * target fs, so fail the recv.
1478 if (fromguid
!= 0 && !(flags
& DRR_FLAG_CLONE
||
1480 return (SET_ERROR(ENOENT
));
1483 * If we're receiving a full send as a clone, and it doesn't
1484 * contain all the necessary free records and freeobject
1485 * records, reject it.
1487 if (fromguid
== 0 && drba
->drba_origin
&&
1488 !(flags
& DRR_FLAG_FREERECORDS
))
1489 return (SET_ERROR(EINVAL
));
1491 /* Open the parent of tofs */
1492 ASSERT3U(strlen(tofs
), <, sizeof (buf
));
1493 (void) strlcpy(buf
, tofs
, strrchr(tofs
, '/') - tofs
+ 1);
1494 error
= dsl_dataset_hold(dp
, buf
, FTAG
, &ds
);
1499 * Check filesystem and snapshot limits before receiving. We'll
1500 * recheck snapshot limits again at the end (we create the
1501 * filesystems and increment those counts during begin_sync).
1503 error
= dsl_fs_ss_limit_check(ds
->ds_dir
, 1,
1504 ZFS_PROP_FILESYSTEM_LIMIT
, NULL
, drba
->drba_cred
);
1506 dsl_dataset_rele(ds
, FTAG
);
1510 error
= dsl_fs_ss_limit_check(ds
->ds_dir
, 1,
1511 ZFS_PROP_SNAPSHOT_LIMIT
, NULL
, drba
->drba_cred
);
1513 dsl_dataset_rele(ds
, FTAG
);
1517 if (drba
->drba_origin
!= NULL
) {
1518 dsl_dataset_t
*origin
;
1519 error
= dsl_dataset_hold(dp
, drba
->drba_origin
,
1522 dsl_dataset_rele(ds
, FTAG
);
1525 if (!origin
->ds_is_snapshot
) {
1526 dsl_dataset_rele(origin
, FTAG
);
1527 dsl_dataset_rele(ds
, FTAG
);
1528 return (SET_ERROR(EINVAL
));
1530 if (dsl_dataset_phys(origin
)->ds_guid
!= fromguid
&&
1532 dsl_dataset_rele(origin
, FTAG
);
1533 dsl_dataset_rele(ds
, FTAG
);
1534 return (SET_ERROR(ENODEV
));
1536 dsl_dataset_rele(origin
, FTAG
);
1538 dsl_dataset_rele(ds
, FTAG
);
1545 dmu_recv_begin_sync(void *arg
, dmu_tx_t
*tx
)
1547 dmu_recv_begin_arg_t
*drba
= arg
;
1548 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1549 objset_t
*mos
= dp
->dp_meta_objset
;
1550 struct drr_begin
*drrb
= drba
->drba_cookie
->drc_drrb
;
1551 const char *tofs
= drba
->drba_cookie
->drc_tofs
;
1552 dsl_dataset_t
*ds
, *newds
;
1555 uint64_t crflags
= 0;
1557 if (drrb
->drr_flags
& DRR_FLAG_CI_DATA
)
1558 crflags
|= DS_FLAG_CI_DATASET
;
1560 error
= dsl_dataset_hold(dp
, tofs
, FTAG
, &ds
);
1562 /* create temporary clone */
1563 dsl_dataset_t
*snap
= NULL
;
1564 if (drba
->drba_snapobj
!= 0) {
1565 VERIFY0(dsl_dataset_hold_obj(dp
,
1566 drba
->drba_snapobj
, FTAG
, &snap
));
1568 dsobj
= dsl_dataset_create_sync(ds
->ds_dir
, recv_clone_name
,
1569 snap
, crflags
, drba
->drba_cred
, tx
);
1570 if (drba
->drba_snapobj
!= 0)
1571 dsl_dataset_rele(snap
, FTAG
);
1572 dsl_dataset_rele(ds
, FTAG
);
1576 dsl_dataset_t
*origin
= NULL
;
1578 VERIFY0(dsl_dir_hold(dp
, tofs
, FTAG
, &dd
, &tail
));
1580 if (drba
->drba_origin
!= NULL
) {
1581 VERIFY0(dsl_dataset_hold(dp
, drba
->drba_origin
,
1585 /* Create new dataset. */
1586 dsobj
= dsl_dataset_create_sync(dd
,
1587 strrchr(tofs
, '/') + 1,
1588 origin
, crflags
, drba
->drba_cred
, tx
);
1590 dsl_dataset_rele(origin
, FTAG
);
1591 dsl_dir_rele(dd
, FTAG
);
1592 drba
->drba_cookie
->drc_newfs
= B_TRUE
;
1594 VERIFY0(dsl_dataset_own_obj(dp
, dsobj
, dmu_recv_tag
, &newds
));
1596 if (drba
->drba_cookie
->drc_resumable
) {
1600 dsl_dataset_zapify(newds
, tx
);
1601 if (drrb
->drr_fromguid
!= 0) {
1602 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_FROMGUID
,
1603 8, 1, &drrb
->drr_fromguid
, tx
));
1605 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_TOGUID
,
1606 8, 1, &drrb
->drr_toguid
, tx
));
1607 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_TONAME
,
1608 1, strlen(drrb
->drr_toname
) + 1, drrb
->drr_toname
, tx
));
1609 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_OBJECT
,
1611 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_OFFSET
,
1613 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_BYTES
,
1615 if (DMU_GET_FEATUREFLAGS(drrb
->drr_versioninfo
) &
1616 DMU_BACKUP_FEATURE_LARGE_BLOCKS
) {
1617 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_LARGEBLOCK
,
1620 if (DMU_GET_FEATUREFLAGS(drrb
->drr_versioninfo
) &
1621 DMU_BACKUP_FEATURE_EMBED_DATA
) {
1622 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_EMBEDOK
,
1625 if (DMU_GET_FEATUREFLAGS(drrb
->drr_versioninfo
) &
1626 DMU_BACKUP_FEATURE_COMPRESSED
) {
1627 VERIFY0(zap_add(mos
, dsobj
, DS_FIELD_RESUME_COMPRESSOK
,
1632 dmu_buf_will_dirty(newds
->ds_dbuf
, tx
);
1633 dsl_dataset_phys(newds
)->ds_flags
|= DS_FLAG_INCONSISTENT
;
1636 * If we actually created a non-clone, we need to create the
1637 * objset in our new dataset.
1639 rrw_enter(&newds
->ds_bp_rwlock
, RW_READER
, FTAG
);
1640 if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds
))) {
1641 (void) dmu_objset_create_impl(dp
->dp_spa
,
1642 newds
, dsl_dataset_get_blkptr(newds
), drrb
->drr_type
, tx
);
1644 rrw_exit(&newds
->ds_bp_rwlock
, FTAG
);
1646 drba
->drba_cookie
->drc_ds
= newds
;
1648 spa_history_log_internal_ds(newds
, "receive", tx
, "");
1652 dmu_recv_resume_begin_check(void *arg
, dmu_tx_t
*tx
)
1654 dmu_recv_begin_arg_t
*drba
= arg
;
1655 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1656 struct drr_begin
*drrb
= drba
->drba_cookie
->drc_drrb
;
1658 uint64_t featureflags
= DMU_GET_FEATUREFLAGS(drrb
->drr_versioninfo
);
1660 const char *tofs
= drba
->drba_cookie
->drc_tofs
;
1663 /* 6 extra bytes for /%recv */
1664 char recvname
[ZFS_MAX_DATASET_NAME_LEN
+ 6];
1666 /* already checked */
1667 ASSERT3U(drrb
->drr_magic
, ==, DMU_BACKUP_MAGIC
);
1668 ASSERT(featureflags
& DMU_BACKUP_FEATURE_RESUMING
);
1670 if (DMU_GET_STREAM_HDRTYPE(drrb
->drr_versioninfo
) ==
1671 DMU_COMPOUNDSTREAM
||
1672 drrb
->drr_type
>= DMU_OST_NUMTYPES
)
1673 return (SET_ERROR(EINVAL
));
1675 /* Verify pool version supports SA if SA_SPILL feature set */
1676 if ((featureflags
& DMU_BACKUP_FEATURE_SA_SPILL
) &&
1677 spa_version(dp
->dp_spa
) < SPA_VERSION_SA
)
1678 return (SET_ERROR(ENOTSUP
));
1681 * The receiving code doesn't know how to translate a WRITE_EMBEDDED
1682 * record to a plain WRITE record, so the pool must have the
1683 * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED
1684 * records. Same with WRITE_EMBEDDED records that use LZ4 compression.
1686 if ((featureflags
& DMU_BACKUP_FEATURE_EMBED_DATA
) &&
1687 !spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_EMBEDDED_DATA
))
1688 return (SET_ERROR(ENOTSUP
));
1689 if ((featureflags
& DMU_BACKUP_FEATURE_LZ4
) &&
1690 !spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_LZ4_COMPRESS
))
1691 return (SET_ERROR(ENOTSUP
));
1694 * The receiving code doesn't know how to translate large blocks
1695 * to smaller ones, so the pool must have the LARGE_BLOCKS
1696 * feature enabled if the stream has LARGE_BLOCKS. Same with
1699 if ((featureflags
& DMU_BACKUP_FEATURE_LARGE_BLOCKS
) &&
1700 !spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_LARGE_BLOCKS
))
1701 return (SET_ERROR(ENOTSUP
));
1702 if ((featureflags
& DMU_BACKUP_FEATURE_LARGE_DNODE
) &&
1703 !spa_feature_is_enabled(dp
->dp_spa
, SPA_FEATURE_LARGE_DNODE
))
1704 return (SET_ERROR(ENOTSUP
));
1706 (void) snprintf(recvname
, sizeof (recvname
), "%s/%s",
1707 tofs
, recv_clone_name
);
1709 if (dsl_dataset_hold(dp
, recvname
, FTAG
, &ds
) != 0) {
1710 /* %recv does not exist; continue in tofs */
1711 error
= dsl_dataset_hold(dp
, tofs
, FTAG
, &ds
);
1716 /* check that ds is marked inconsistent */
1717 if (!DS_IS_INCONSISTENT(ds
)) {
1718 dsl_dataset_rele(ds
, FTAG
);
1719 return (SET_ERROR(EINVAL
));
1722 /* check that there is resuming data, and that the toguid matches */
1723 if (!dsl_dataset_is_zapified(ds
)) {
1724 dsl_dataset_rele(ds
, FTAG
);
1725 return (SET_ERROR(EINVAL
));
1727 error
= zap_lookup(dp
->dp_meta_objset
, ds
->ds_object
,
1728 DS_FIELD_RESUME_TOGUID
, sizeof (val
), 1, &val
);
1729 if (error
!= 0 || drrb
->drr_toguid
!= val
) {
1730 dsl_dataset_rele(ds
, FTAG
);
1731 return (SET_ERROR(EINVAL
));
1735 * Check if the receive is still running. If so, it will be owned.
1736 * Note that nothing else can own the dataset (e.g. after the receive
1737 * fails) because it will be marked inconsistent.
1739 if (dsl_dataset_has_owner(ds
)) {
1740 dsl_dataset_rele(ds
, FTAG
);
1741 return (SET_ERROR(EBUSY
));
1744 /* There should not be any snapshots of this fs yet. */
1745 if (ds
->ds_prev
!= NULL
&& ds
->ds_prev
->ds_dir
== ds
->ds_dir
) {
1746 dsl_dataset_rele(ds
, FTAG
);
1747 return (SET_ERROR(EINVAL
));
1751 * Note: resume point will be checked when we process the first WRITE
1755 /* check that the origin matches */
1757 (void) zap_lookup(dp
->dp_meta_objset
, ds
->ds_object
,
1758 DS_FIELD_RESUME_FROMGUID
, sizeof (val
), 1, &val
);
1759 if (drrb
->drr_fromguid
!= val
) {
1760 dsl_dataset_rele(ds
, FTAG
);
1761 return (SET_ERROR(EINVAL
));
1764 dsl_dataset_rele(ds
, FTAG
);
1769 dmu_recv_resume_begin_sync(void *arg
, dmu_tx_t
*tx
)
1771 dmu_recv_begin_arg_t
*drba
= arg
;
1772 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1773 const char *tofs
= drba
->drba_cookie
->drc_tofs
;
1776 /* 6 extra bytes for /%recv */
1777 char recvname
[ZFS_MAX_DATASET_NAME_LEN
+ 6];
1779 (void) snprintf(recvname
, sizeof (recvname
), "%s/%s",
1780 tofs
, recv_clone_name
);
1782 if (dsl_dataset_hold(dp
, recvname
, FTAG
, &ds
) != 0) {
1783 /* %recv does not exist; continue in tofs */
1784 VERIFY0(dsl_dataset_hold(dp
, tofs
, FTAG
, &ds
));
1785 drba
->drba_cookie
->drc_newfs
= B_TRUE
;
1788 /* clear the inconsistent flag so that we can own it */
1789 ASSERT(DS_IS_INCONSISTENT(ds
));
1790 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
1791 dsl_dataset_phys(ds
)->ds_flags
&= ~DS_FLAG_INCONSISTENT
;
1792 dsobj
= ds
->ds_object
;
1793 dsl_dataset_rele(ds
, FTAG
);
1795 VERIFY0(dsl_dataset_own_obj(dp
, dsobj
, dmu_recv_tag
, &ds
));
1797 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
1798 dsl_dataset_phys(ds
)->ds_flags
|= DS_FLAG_INCONSISTENT
;
1800 rrw_enter(&ds
->ds_bp_rwlock
, RW_READER
, FTAG
);
1801 ASSERT(!BP_IS_HOLE(dsl_dataset_get_blkptr(ds
)));
1802 rrw_exit(&ds
->ds_bp_rwlock
, FTAG
);
1804 drba
->drba_cookie
->drc_ds
= ds
;
1806 spa_history_log_internal_ds(ds
, "resume receive", tx
, "");
1810 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
1811 * succeeds; otherwise we will leak the holds on the datasets.
1814 dmu_recv_begin(char *tofs
, char *tosnap
, dmu_replay_record_t
*drr_begin
,
1815 boolean_t force
, boolean_t resumable
, char *origin
, dmu_recv_cookie_t
*drc
)
1817 dmu_recv_begin_arg_t drba
= { 0 };
1819 bzero(drc
, sizeof (dmu_recv_cookie_t
));
1820 drc
->drc_drr_begin
= drr_begin
;
1821 drc
->drc_drrb
= &drr_begin
->drr_u
.drr_begin
;
1822 drc
->drc_tosnap
= tosnap
;
1823 drc
->drc_tofs
= tofs
;
1824 drc
->drc_force
= force
;
1825 drc
->drc_resumable
= resumable
;
1826 drc
->drc_cred
= CRED();
1827 drc
->drc_clone
= (origin
!= NULL
);
1829 if (drc
->drc_drrb
->drr_magic
== BSWAP_64(DMU_BACKUP_MAGIC
)) {
1830 drc
->drc_byteswap
= B_TRUE
;
1831 (void) fletcher_4_incremental_byteswap(drr_begin
,
1832 sizeof (dmu_replay_record_t
), &drc
->drc_cksum
);
1833 byteswap_record(drr_begin
);
1834 } else if (drc
->drc_drrb
->drr_magic
== DMU_BACKUP_MAGIC
) {
1835 (void) fletcher_4_incremental_native(drr_begin
,
1836 sizeof (dmu_replay_record_t
), &drc
->drc_cksum
);
1838 return (SET_ERROR(EINVAL
));
1841 drba
.drba_origin
= origin
;
1842 drba
.drba_cookie
= drc
;
1843 drba
.drba_cred
= CRED();
1845 if (DMU_GET_FEATUREFLAGS(drc
->drc_drrb
->drr_versioninfo
) &
1846 DMU_BACKUP_FEATURE_RESUMING
) {
1847 return (dsl_sync_task(tofs
,
1848 dmu_recv_resume_begin_check
, dmu_recv_resume_begin_sync
,
1849 &drba
, 5, ZFS_SPACE_CHECK_NORMAL
));
1851 return (dsl_sync_task(tofs
,
1852 dmu_recv_begin_check
, dmu_recv_begin_sync
,
1853 &drba
, 5, ZFS_SPACE_CHECK_NORMAL
));
1857 struct receive_record_arg
{
1858 dmu_replay_record_t header
;
1859 void *payload
; /* Pointer to a buffer containing the payload */
1861 * If the record is a write, pointer to the arc_buf_t containing the
1864 arc_buf_t
*write_buf
;
1866 uint64_t bytes_read
; /* bytes read from stream when record created */
1867 boolean_t eos_marker
; /* Marks the end of the stream */
1871 struct receive_writer_arg
{
1877 * These three args are used to signal to the main thread that we're
1885 /* A map from guid to dataset to help handle dedup'd streams. */
1886 avl_tree_t
*guid_to_ds_map
;
1887 boolean_t resumable
;
1888 uint64_t last_object
;
1889 uint64_t last_offset
;
1890 uint64_t max_object
; /* highest object ID referenced in stream */
1891 uint64_t bytes_read
; /* bytes read when current record created */
1895 list_t list
; /* List of struct receive_objnode. */
1897 * Last object looked up. Used to assert that objects are being looked
1898 * up in ascending order.
1900 uint64_t last_lookup
;
1903 struct receive_objnode
{
1908 struct receive_arg
{
1910 vnode_t
*vp
; /* The vnode to read the stream from */
1911 uint64_t voff
; /* The current offset in the stream */
1912 uint64_t bytes_read
;
1914 * A record that has had its payload read in, but hasn't yet been handed
1915 * off to the worker thread.
1917 struct receive_record_arg
*rrd
;
1918 /* A record that has had its header read in, but not its payload. */
1919 struct receive_record_arg
*next_rrd
;
1921 zio_cksum_t prev_cksum
;
1924 /* Sorted list of objects not to issue prefetches for. */
1925 struct objlist ignore_objlist
;
1928 typedef struct guid_map_entry
{
1930 dsl_dataset_t
*gme_ds
;
1935 guid_compare(const void *arg1
, const void *arg2
)
1937 const guid_map_entry_t
*gmep1
= (const guid_map_entry_t
*)arg1
;
1938 const guid_map_entry_t
*gmep2
= (const guid_map_entry_t
*)arg2
;
1940 return (AVL_CMP(gmep1
->guid
, gmep2
->guid
));
1944 free_guid_map_onexit(void *arg
)
1946 avl_tree_t
*ca
= arg
;
1947 void *cookie
= NULL
;
1948 guid_map_entry_t
*gmep
;
1950 while ((gmep
= avl_destroy_nodes(ca
, &cookie
)) != NULL
) {
1951 dsl_dataset_long_rele(gmep
->gme_ds
, gmep
);
1952 dsl_dataset_rele(gmep
->gme_ds
, gmep
);
1953 kmem_free(gmep
, sizeof (guid_map_entry_t
));
1956 kmem_free(ca
, sizeof (avl_tree_t
));
1960 receive_read(struct receive_arg
*ra
, int len
, void *buf
)
1965 * The code doesn't rely on this (lengths being multiples of 8). See
1966 * comment in dump_bytes.
1970 while (done
< len
) {
1973 ra
->err
= vn_rdwr(UIO_READ
, ra
->vp
,
1974 (char *)buf
+ done
, len
- done
,
1975 ra
->voff
, UIO_SYSSPACE
, FAPPEND
,
1976 RLIM64_INFINITY
, CRED(), &resid
);
1978 if (resid
== len
- done
) {
1980 * Note: ECKSUM indicates that the receive
1981 * was interrupted and can potentially be resumed.
1983 ra
->err
= SET_ERROR(ECKSUM
);
1985 ra
->voff
+= len
- done
- resid
;
1991 ra
->bytes_read
+= len
;
1993 ASSERT3U(done
, ==, len
);
1997 noinline
static void
1998 byteswap_record(dmu_replay_record_t
*drr
)
2000 #define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
2001 #define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
2002 drr
->drr_type
= BSWAP_32(drr
->drr_type
);
2003 drr
->drr_payloadlen
= BSWAP_32(drr
->drr_payloadlen
);
2005 switch (drr
->drr_type
) {
2007 DO64(drr_begin
.drr_magic
);
2008 DO64(drr_begin
.drr_versioninfo
);
2009 DO64(drr_begin
.drr_creation_time
);
2010 DO32(drr_begin
.drr_type
);
2011 DO32(drr_begin
.drr_flags
);
2012 DO64(drr_begin
.drr_toguid
);
2013 DO64(drr_begin
.drr_fromguid
);
2016 DO64(drr_object
.drr_object
);
2017 DO32(drr_object
.drr_type
);
2018 DO32(drr_object
.drr_bonustype
);
2019 DO32(drr_object
.drr_blksz
);
2020 DO32(drr_object
.drr_bonuslen
);
2021 DO64(drr_object
.drr_toguid
);
2023 case DRR_FREEOBJECTS
:
2024 DO64(drr_freeobjects
.drr_firstobj
);
2025 DO64(drr_freeobjects
.drr_numobjs
);
2026 DO64(drr_freeobjects
.drr_toguid
);
2029 DO64(drr_write
.drr_object
);
2030 DO32(drr_write
.drr_type
);
2031 DO64(drr_write
.drr_offset
);
2032 DO64(drr_write
.drr_logical_size
);
2033 DO64(drr_write
.drr_toguid
);
2034 ZIO_CHECKSUM_BSWAP(&drr
->drr_u
.drr_write
.drr_key
.ddk_cksum
);
2035 DO64(drr_write
.drr_key
.ddk_prop
);
2036 DO64(drr_write
.drr_compressed_size
);
2038 case DRR_WRITE_BYREF
:
2039 DO64(drr_write_byref
.drr_object
);
2040 DO64(drr_write_byref
.drr_offset
);
2041 DO64(drr_write_byref
.drr_length
);
2042 DO64(drr_write_byref
.drr_toguid
);
2043 DO64(drr_write_byref
.drr_refguid
);
2044 DO64(drr_write_byref
.drr_refobject
);
2045 DO64(drr_write_byref
.drr_refoffset
);
2046 ZIO_CHECKSUM_BSWAP(&drr
->drr_u
.drr_write_byref
.
2048 DO64(drr_write_byref
.drr_key
.ddk_prop
);
2050 case DRR_WRITE_EMBEDDED
:
2051 DO64(drr_write_embedded
.drr_object
);
2052 DO64(drr_write_embedded
.drr_offset
);
2053 DO64(drr_write_embedded
.drr_length
);
2054 DO64(drr_write_embedded
.drr_toguid
);
2055 DO32(drr_write_embedded
.drr_lsize
);
2056 DO32(drr_write_embedded
.drr_psize
);
2059 DO64(drr_free
.drr_object
);
2060 DO64(drr_free
.drr_offset
);
2061 DO64(drr_free
.drr_length
);
2062 DO64(drr_free
.drr_toguid
);
2065 DO64(drr_spill
.drr_object
);
2066 DO64(drr_spill
.drr_length
);
2067 DO64(drr_spill
.drr_toguid
);
2070 DO64(drr_end
.drr_toguid
);
2071 ZIO_CHECKSUM_BSWAP(&drr
->drr_u
.drr_end
.drr_checksum
);
2077 if (drr
->drr_type
!= DRR_BEGIN
) {
2078 ZIO_CHECKSUM_BSWAP(&drr
->drr_u
.drr_checksum
.drr_checksum
);
2085 static inline uint8_t
2086 deduce_nblkptr(dmu_object_type_t bonus_type
, uint64_t bonus_size
)
2088 if (bonus_type
== DMU_OT_SA
) {
2092 ((DN_OLD_MAX_BONUSLEN
-
2093 MIN(DN_OLD_MAX_BONUSLEN
, bonus_size
)) >> SPA_BLKPTRSHIFT
));
2098 save_resume_state(struct receive_writer_arg
*rwa
,
2099 uint64_t object
, uint64_t offset
, dmu_tx_t
*tx
)
2101 int txgoff
= dmu_tx_get_txg(tx
) & TXG_MASK
;
2103 if (!rwa
->resumable
)
2107 * We use ds_resume_bytes[] != 0 to indicate that we need to
2108 * update this on disk, so it must not be 0.
2110 ASSERT(rwa
->bytes_read
!= 0);
2113 * We only resume from write records, which have a valid
2114 * (non-meta-dnode) object number.
2116 ASSERT(object
!= 0);
2119 * For resuming to work correctly, we must receive records in order,
2120 * sorted by object,offset. This is checked by the callers, but
2121 * assert it here for good measure.
2123 ASSERT3U(object
, >=, rwa
->os
->os_dsl_dataset
->ds_resume_object
[txgoff
]);
2124 ASSERT(object
!= rwa
->os
->os_dsl_dataset
->ds_resume_object
[txgoff
] ||
2125 offset
>= rwa
->os
->os_dsl_dataset
->ds_resume_offset
[txgoff
]);
2126 ASSERT3U(rwa
->bytes_read
, >=,
2127 rwa
->os
->os_dsl_dataset
->ds_resume_bytes
[txgoff
]);
2129 rwa
->os
->os_dsl_dataset
->ds_resume_object
[txgoff
] = object
;
2130 rwa
->os
->os_dsl_dataset
->ds_resume_offset
[txgoff
] = offset
;
2131 rwa
->os
->os_dsl_dataset
->ds_resume_bytes
[txgoff
] = rwa
->bytes_read
;
2135 receive_object(struct receive_writer_arg
*rwa
, struct drr_object
*drro
,
2138 dmu_object_info_t doi
;
2142 uint8_t dn_slots
= drro
->drr_dn_slots
!= 0 ?
2143 drro
->drr_dn_slots
: DNODE_MIN_SLOTS
;
2145 if (drro
->drr_type
== DMU_OT_NONE
||
2146 !DMU_OT_IS_VALID(drro
->drr_type
) ||
2147 !DMU_OT_IS_VALID(drro
->drr_bonustype
) ||
2148 drro
->drr_checksumtype
>= ZIO_CHECKSUM_FUNCTIONS
||
2149 drro
->drr_compress
>= ZIO_COMPRESS_FUNCTIONS
||
2150 P2PHASE(drro
->drr_blksz
, SPA_MINBLOCKSIZE
) ||
2151 drro
->drr_blksz
< SPA_MINBLOCKSIZE
||
2152 drro
->drr_blksz
> spa_maxblocksize(dmu_objset_spa(rwa
->os
)) ||
2153 drro
->drr_bonuslen
>
2154 DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(rwa
->os
))) ||
2156 (spa_maxdnodesize(dmu_objset_spa(rwa
->os
)) >> DNODE_SHIFT
)) {
2157 return (SET_ERROR(EINVAL
));
2160 err
= dmu_object_info(rwa
->os
, drro
->drr_object
, &doi
);
2161 if (err
!= 0 && err
!= ENOENT
&& err
!= EEXIST
)
2162 return (SET_ERROR(EINVAL
));
2164 if (drro
->drr_object
> rwa
->max_object
)
2165 rwa
->max_object
= drro
->drr_object
;
2168 * If we are losing blkptrs or changing the block size this must
2169 * be a new file instance. We must clear out the previous file
2170 * contents before we can change this type of metadata in the dnode.
2175 nblkptr
= deduce_nblkptr(drro
->drr_bonustype
,
2176 drro
->drr_bonuslen
);
2178 object
= drro
->drr_object
;
2180 if (drro
->drr_blksz
!= doi
.doi_data_block_size
||
2181 nblkptr
< doi
.doi_nblkptr
||
2182 dn_slots
!= doi
.doi_dnodesize
>> DNODE_SHIFT
) {
2183 err
= dmu_free_long_range(rwa
->os
, drro
->drr_object
,
2186 return (SET_ERROR(EINVAL
));
2190 * The dmu does not currently support decreasing nlevels
2191 * on an object. For non-raw sends, this does not matter
2192 * and the new object can just use the previous one's nlevels.
2193 * For raw sends, however, the structure of the received dnode
2194 * (including nlevels) must match that of the send side.
2195 * Therefore, instead of using dmu_object_reclaim(), we must
2196 * free the object completely and call dmu_object_claim_dnsize()
2199 if (dn_slots
!= doi
.doi_dnodesize
>> DNODE_SHIFT
) {
2200 err
= dmu_free_long_object(rwa
->os
, drro
->drr_object
);
2202 return (SET_ERROR(EINVAL
));
2204 txg_wait_synced(dmu_objset_pool(rwa
->os
), 0);
2205 object
= DMU_NEW_OBJECT
;
2207 } else if (err
== EEXIST
) {
2209 * The object requested is currently an interior slot of a
2210 * multi-slot dnode. This will be resolved when the next txg
2211 * is synced out, since the send stream will have told us
2212 * to free this slot when we freed the associated dnode
2213 * earlier in the stream.
2215 txg_wait_synced(dmu_objset_pool(rwa
->os
), 0);
2216 object
= drro
->drr_object
;
2218 /* object is free and we are about to allocate a new one */
2219 object
= DMU_NEW_OBJECT
;
2223 * If this is a multi-slot dnode there is a chance that this
2224 * object will expand into a slot that is already used by
2225 * another object from the previous snapshot. We must free
2226 * these objects before we attempt to allocate the new dnode.
2229 for (uint64_t slot
= drro
->drr_object
+ 1;
2230 slot
< drro
->drr_object
+ dn_slots
;
2232 dmu_object_info_t slot_doi
;
2234 err
= dmu_object_info(rwa
->os
, slot
, &slot_doi
);
2235 if (err
== ENOENT
|| err
== EEXIST
)
2240 err
= dmu_free_long_object(rwa
->os
, slot
);
2246 txg_wait_synced(dmu_objset_pool(rwa
->os
), 0);
2249 tx
= dmu_tx_create(rwa
->os
);
2250 dmu_tx_hold_bonus(tx
, object
);
2251 err
= dmu_tx_assign(tx
, TXG_WAIT
);
2257 if (object
== DMU_NEW_OBJECT
) {
2258 /* currently free, want to be allocated */
2259 err
= dmu_object_claim_dnsize(rwa
->os
, drro
->drr_object
,
2260 drro
->drr_type
, drro
->drr_blksz
,
2261 drro
->drr_bonustype
, drro
->drr_bonuslen
,
2262 dn_slots
<< DNODE_SHIFT
, tx
);
2263 } else if (drro
->drr_type
!= doi
.doi_type
||
2264 drro
->drr_blksz
!= doi
.doi_data_block_size
||
2265 drro
->drr_bonustype
!= doi
.doi_bonus_type
||
2266 drro
->drr_bonuslen
!= doi
.doi_bonus_size
) {
2267 /* currently allocated, but with different properties */
2268 err
= dmu_object_reclaim_dnsize(rwa
->os
, drro
->drr_object
,
2269 drro
->drr_type
, drro
->drr_blksz
,
2270 drro
->drr_bonustype
, drro
->drr_bonuslen
,
2271 dn_slots
<< DNODE_SHIFT
, tx
);
2275 return (SET_ERROR(EINVAL
));
2278 dmu_object_set_checksum(rwa
->os
, drro
->drr_object
,
2279 drro
->drr_checksumtype
, tx
);
2280 dmu_object_set_compress(rwa
->os
, drro
->drr_object
,
2281 drro
->drr_compress
, tx
);
2286 VERIFY0(dmu_bonus_hold(rwa
->os
, drro
->drr_object
, FTAG
, &db
));
2287 dmu_buf_will_dirty(db
, tx
);
2289 ASSERT3U(db
->db_size
, >=, drro
->drr_bonuslen
);
2290 bcopy(data
, db
->db_data
, drro
->drr_bonuslen
);
2291 if (rwa
->byteswap
) {
2292 dmu_object_byteswap_t byteswap
=
2293 DMU_OT_BYTESWAP(drro
->drr_bonustype
);
2294 dmu_ot_byteswap
[byteswap
].ob_func(db
->db_data
,
2295 drro
->drr_bonuslen
);
2297 dmu_buf_rele(db
, FTAG
);
2306 receive_freeobjects(struct receive_writer_arg
*rwa
,
2307 struct drr_freeobjects
*drrfo
)
2312 if (drrfo
->drr_firstobj
+ drrfo
->drr_numobjs
< drrfo
->drr_firstobj
)
2313 return (SET_ERROR(EINVAL
));
2315 for (obj
= drrfo
->drr_firstobj
== 0 ? 1 : drrfo
->drr_firstobj
;
2316 obj
< drrfo
->drr_firstobj
+ drrfo
->drr_numobjs
&& next_err
== 0;
2317 next_err
= dmu_object_next(rwa
->os
, &obj
, FALSE
, 0)) {
2318 dmu_object_info_t doi
;
2321 err
= dmu_object_info(rwa
->os
, obj
, &doi
);
2327 err
= dmu_free_long_object(rwa
->os
, obj
);
2331 if (obj
> rwa
->max_object
)
2332 rwa
->max_object
= obj
;
2334 if (next_err
!= ESRCH
)
2340 receive_write(struct receive_writer_arg
*rwa
, struct drr_write
*drrw
,
2347 if (drrw
->drr_offset
+ drrw
->drr_logical_size
< drrw
->drr_offset
||
2348 !DMU_OT_IS_VALID(drrw
->drr_type
))
2349 return (SET_ERROR(EINVAL
));
2352 * For resuming to work, records must be in increasing order
2353 * by (object, offset).
2355 if (drrw
->drr_object
< rwa
->last_object
||
2356 (drrw
->drr_object
== rwa
->last_object
&&
2357 drrw
->drr_offset
< rwa
->last_offset
)) {
2358 return (SET_ERROR(EINVAL
));
2360 rwa
->last_object
= drrw
->drr_object
;
2361 rwa
->last_offset
= drrw
->drr_offset
;
2363 if (rwa
->last_object
> rwa
->max_object
)
2364 rwa
->max_object
= rwa
->last_object
;
2366 if (dmu_object_info(rwa
->os
, drrw
->drr_object
, NULL
) != 0)
2367 return (SET_ERROR(EINVAL
));
2369 tx
= dmu_tx_create(rwa
->os
);
2371 dmu_tx_hold_write(tx
, drrw
->drr_object
,
2372 drrw
->drr_offset
, drrw
->drr_logical_size
);
2373 err
= dmu_tx_assign(tx
, TXG_WAIT
);
2378 if (rwa
->byteswap
) {
2379 dmu_object_byteswap_t byteswap
=
2380 DMU_OT_BYTESWAP(drrw
->drr_type
);
2381 dmu_ot_byteswap
[byteswap
].ob_func(abuf
->b_data
,
2382 DRR_WRITE_PAYLOAD_SIZE(drrw
));
2385 /* use the bonus buf to look up the dnode in dmu_assign_arcbuf */
2386 if (dmu_bonus_hold(rwa
->os
, drrw
->drr_object
, FTAG
, &bonus
) != 0)
2387 return (SET_ERROR(EINVAL
));
2388 dmu_assign_arcbuf(bonus
, drrw
->drr_offset
, abuf
, tx
);
2391 * Note: If the receive fails, we want the resume stream to start
2392 * with the same record that we last successfully received (as opposed
2393 * to the next record), so that we can verify that we are
2394 * resuming from the correct location.
2396 save_resume_state(rwa
, drrw
->drr_object
, drrw
->drr_offset
, tx
);
2398 dmu_buf_rele(bonus
, FTAG
);
2404 * Handle a DRR_WRITE_BYREF record. This record is used in dedup'ed
2405 * streams to refer to a copy of the data that is already on the
2406 * system because it came in earlier in the stream. This function
2407 * finds the earlier copy of the data, and uses that copy instead of
2408 * data from the stream to fulfill this write.
2411 receive_write_byref(struct receive_writer_arg
*rwa
,
2412 struct drr_write_byref
*drrwbr
)
2416 guid_map_entry_t gmesrch
;
2417 guid_map_entry_t
*gmep
;
2419 objset_t
*ref_os
= NULL
;
2422 if (drrwbr
->drr_offset
+ drrwbr
->drr_length
< drrwbr
->drr_offset
)
2423 return (SET_ERROR(EINVAL
));
2426 * If the GUID of the referenced dataset is different from the
2427 * GUID of the target dataset, find the referenced dataset.
2429 if (drrwbr
->drr_toguid
!= drrwbr
->drr_refguid
) {
2430 gmesrch
.guid
= drrwbr
->drr_refguid
;
2431 if ((gmep
= avl_find(rwa
->guid_to_ds_map
, &gmesrch
,
2433 return (SET_ERROR(EINVAL
));
2435 if (dmu_objset_from_ds(gmep
->gme_ds
, &ref_os
))
2436 return (SET_ERROR(EINVAL
));
2441 if (drrwbr
->drr_object
> rwa
->max_object
)
2442 rwa
->max_object
= drrwbr
->drr_object
;
2444 err
= dmu_buf_hold(ref_os
, drrwbr
->drr_refobject
,
2445 drrwbr
->drr_refoffset
, FTAG
, &dbp
, DMU_READ_PREFETCH
);
2449 tx
= dmu_tx_create(rwa
->os
);
2451 dmu_tx_hold_write(tx
, drrwbr
->drr_object
,
2452 drrwbr
->drr_offset
, drrwbr
->drr_length
);
2453 err
= dmu_tx_assign(tx
, TXG_WAIT
);
2458 dmu_write(rwa
->os
, drrwbr
->drr_object
,
2459 drrwbr
->drr_offset
, drrwbr
->drr_length
, dbp
->db_data
, tx
);
2460 dmu_buf_rele(dbp
, FTAG
);
2462 /* See comment in restore_write. */
2463 save_resume_state(rwa
, drrwbr
->drr_object
, drrwbr
->drr_offset
, tx
);
2469 receive_write_embedded(struct receive_writer_arg
*rwa
,
2470 struct drr_write_embedded
*drrwe
, void *data
)
2475 if (drrwe
->drr_offset
+ drrwe
->drr_length
< drrwe
->drr_offset
)
2478 if (drrwe
->drr_psize
> BPE_PAYLOAD_SIZE
)
2481 if (drrwe
->drr_etype
>= NUM_BP_EMBEDDED_TYPES
)
2483 if (drrwe
->drr_compression
>= ZIO_COMPRESS_FUNCTIONS
)
2486 if (drrwe
->drr_object
> rwa
->max_object
)
2487 rwa
->max_object
= drrwe
->drr_object
;
2489 tx
= dmu_tx_create(rwa
->os
);
2491 dmu_tx_hold_write(tx
, drrwe
->drr_object
,
2492 drrwe
->drr_offset
, drrwe
->drr_length
);
2493 err
= dmu_tx_assign(tx
, TXG_WAIT
);
2499 dmu_write_embedded(rwa
->os
, drrwe
->drr_object
,
2500 drrwe
->drr_offset
, data
, drrwe
->drr_etype
,
2501 drrwe
->drr_compression
, drrwe
->drr_lsize
, drrwe
->drr_psize
,
2502 rwa
->byteswap
^ ZFS_HOST_BYTEORDER
, tx
);
2504 /* See comment in restore_write. */
2505 save_resume_state(rwa
, drrwe
->drr_object
, drrwe
->drr_offset
, tx
);
2511 receive_spill(struct receive_writer_arg
*rwa
, struct drr_spill
*drrs
,
2515 dmu_buf_t
*db
, *db_spill
;
2518 if (drrs
->drr_length
< SPA_MINBLOCKSIZE
||
2519 drrs
->drr_length
> spa_maxblocksize(dmu_objset_spa(rwa
->os
)))
2520 return (SET_ERROR(EINVAL
));
2522 if (dmu_object_info(rwa
->os
, drrs
->drr_object
, NULL
) != 0)
2523 return (SET_ERROR(EINVAL
));
2525 if (drrs
->drr_object
> rwa
->max_object
)
2526 rwa
->max_object
= drrs
->drr_object
;
2528 VERIFY0(dmu_bonus_hold(rwa
->os
, drrs
->drr_object
, FTAG
, &db
));
2529 if ((err
= dmu_spill_hold_by_bonus(db
, FTAG
, &db_spill
)) != 0) {
2530 dmu_buf_rele(db
, FTAG
);
2534 tx
= dmu_tx_create(rwa
->os
);
2536 dmu_tx_hold_spill(tx
, db
->db_object
);
2538 err
= dmu_tx_assign(tx
, TXG_WAIT
);
2540 dmu_buf_rele(db
, FTAG
);
2541 dmu_buf_rele(db_spill
, FTAG
);
2545 dmu_buf_will_dirty(db_spill
, tx
);
2547 if (db_spill
->db_size
< drrs
->drr_length
)
2548 VERIFY(0 == dbuf_spill_set_blksz(db_spill
,
2549 drrs
->drr_length
, tx
));
2550 bcopy(data
, db_spill
->db_data
, drrs
->drr_length
);
2552 dmu_buf_rele(db
, FTAG
);
2553 dmu_buf_rele(db_spill
, FTAG
);
2561 receive_free(struct receive_writer_arg
*rwa
, struct drr_free
*drrf
)
2565 if (drrf
->drr_length
!= -1ULL &&
2566 drrf
->drr_offset
+ drrf
->drr_length
< drrf
->drr_offset
)
2567 return (SET_ERROR(EINVAL
));
2569 if (dmu_object_info(rwa
->os
, drrf
->drr_object
, NULL
) != 0)
2570 return (SET_ERROR(EINVAL
));
2572 if (drrf
->drr_object
> rwa
->max_object
)
2573 rwa
->max_object
= drrf
->drr_object
;
2575 err
= dmu_free_long_range(rwa
->os
, drrf
->drr_object
,
2576 drrf
->drr_offset
, drrf
->drr_length
);
2581 /* used to destroy the drc_ds on error */
2583 dmu_recv_cleanup_ds(dmu_recv_cookie_t
*drc
)
2585 if (drc
->drc_resumable
) {
2586 /* wait for our resume state to be written to disk */
2587 txg_wait_synced(drc
->drc_ds
->ds_dir
->dd_pool
, 0);
2588 dsl_dataset_disown(drc
->drc_ds
, dmu_recv_tag
);
2590 char name
[ZFS_MAX_DATASET_NAME_LEN
];
2591 dsl_dataset_name(drc
->drc_ds
, name
);
2592 dsl_dataset_disown(drc
->drc_ds
, dmu_recv_tag
);
2593 (void) dsl_destroy_head(name
);
2598 receive_cksum(struct receive_arg
*ra
, int len
, void *buf
)
2601 (void) fletcher_4_incremental_byteswap(buf
, len
, &ra
->cksum
);
2603 (void) fletcher_4_incremental_native(buf
, len
, &ra
->cksum
);
2608 * Read the payload into a buffer of size len, and update the current record's
2610 * Allocate ra->next_rrd and read the next record's header into
2611 * ra->next_rrd->header.
2612 * Verify checksum of payload and next record.
2615 receive_read_payload_and_next_header(struct receive_arg
*ra
, int len
, void *buf
)
2618 zio_cksum_t cksum_orig
;
2619 zio_cksum_t
*cksump
;
2622 ASSERT3U(len
, <=, SPA_MAXBLOCKSIZE
);
2623 err
= receive_read(ra
, len
, buf
);
2626 receive_cksum(ra
, len
, buf
);
2628 /* note: rrd is NULL when reading the begin record's payload */
2629 if (ra
->rrd
!= NULL
) {
2630 ra
->rrd
->payload
= buf
;
2631 ra
->rrd
->payload_size
= len
;
2632 ra
->rrd
->bytes_read
= ra
->bytes_read
;
2636 ra
->prev_cksum
= ra
->cksum
;
2638 ra
->next_rrd
= kmem_zalloc(sizeof (*ra
->next_rrd
), KM_SLEEP
);
2639 err
= receive_read(ra
, sizeof (ra
->next_rrd
->header
),
2640 &ra
->next_rrd
->header
);
2641 ra
->next_rrd
->bytes_read
= ra
->bytes_read
;
2643 kmem_free(ra
->next_rrd
, sizeof (*ra
->next_rrd
));
2644 ra
->next_rrd
= NULL
;
2647 if (ra
->next_rrd
->header
.drr_type
== DRR_BEGIN
) {
2648 kmem_free(ra
->next_rrd
, sizeof (*ra
->next_rrd
));
2649 ra
->next_rrd
= NULL
;
2650 return (SET_ERROR(EINVAL
));
2654 * Note: checksum is of everything up to but not including the
2657 ASSERT3U(offsetof(dmu_replay_record_t
, drr_u
.drr_checksum
.drr_checksum
),
2658 ==, sizeof (dmu_replay_record_t
) - sizeof (zio_cksum_t
));
2660 offsetof(dmu_replay_record_t
, drr_u
.drr_checksum
.drr_checksum
),
2661 &ra
->next_rrd
->header
);
2663 cksum_orig
= ra
->next_rrd
->header
.drr_u
.drr_checksum
.drr_checksum
;
2664 cksump
= &ra
->next_rrd
->header
.drr_u
.drr_checksum
.drr_checksum
;
2667 byteswap_record(&ra
->next_rrd
->header
);
2669 if ((!ZIO_CHECKSUM_IS_ZERO(cksump
)) &&
2670 !ZIO_CHECKSUM_EQUAL(ra
->cksum
, *cksump
)) {
2671 kmem_free(ra
->next_rrd
, sizeof (*ra
->next_rrd
));
2672 ra
->next_rrd
= NULL
;
2673 return (SET_ERROR(ECKSUM
));
2676 receive_cksum(ra
, sizeof (cksum_orig
), &cksum_orig
);
2682 objlist_create(struct objlist
*list
)
2684 list_create(&list
->list
, sizeof (struct receive_objnode
),
2685 offsetof(struct receive_objnode
, node
));
2686 list
->last_lookup
= 0;
2690 objlist_destroy(struct objlist
*list
)
2692 struct receive_objnode
*n
;
2694 for (n
= list_remove_head(&list
->list
);
2695 n
!= NULL
; n
= list_remove_head(&list
->list
)) {
2696 kmem_free(n
, sizeof (*n
));
2698 list_destroy(&list
->list
);
2702 * This function looks through the objlist to see if the specified object number
2703 * is contained in the objlist. In the process, it will remove all object
2704 * numbers in the list that are smaller than the specified object number. Thus,
2705 * any lookup of an object number smaller than a previously looked up object
2706 * number will always return false; therefore, all lookups should be done in
2710 objlist_exists(struct objlist
*list
, uint64_t object
)
2712 struct receive_objnode
*node
= list_head(&list
->list
);
2713 ASSERT3U(object
, >=, list
->last_lookup
);
2714 list
->last_lookup
= object
;
2715 while (node
!= NULL
&& node
->object
< object
) {
2716 VERIFY3P(node
, ==, list_remove_head(&list
->list
));
2717 kmem_free(node
, sizeof (*node
));
2718 node
= list_head(&list
->list
);
2720 return (node
!= NULL
&& node
->object
== object
);
2724 * The objlist is a list of object numbers stored in ascending order. However,
2725 * the insertion of new object numbers does not seek out the correct location to
2726 * store a new object number; instead, it appends it to the list for simplicity.
2727 * Thus, any users must take care to only insert new object numbers in ascending
2731 objlist_insert(struct objlist
*list
, uint64_t object
)
2733 struct receive_objnode
*node
= kmem_zalloc(sizeof (*node
), KM_SLEEP
);
2734 node
->object
= object
;
2737 struct receive_objnode
*last_object
= list_tail(&list
->list
);
2738 uint64_t last_objnum
= (last_object
!= NULL
? last_object
->object
: 0);
2739 ASSERT3U(node
->object
, >, last_objnum
);
2742 list_insert_tail(&list
->list
, node
);
2746 * Issue the prefetch reads for any necessary indirect blocks.
2748 * We use the object ignore list to tell us whether or not to issue prefetches
2749 * for a given object. We do this for both correctness (in case the blocksize
2750 * of an object has changed) and performance (if the object doesn't exist, don't
2751 * needlessly try to issue prefetches). We also trim the list as we go through
2752 * the stream to prevent it from growing to an unbounded size.
2754 * The object numbers within will always be in sorted order, and any write
2755 * records we see will also be in sorted order, but they're not sorted with
2756 * respect to each other (i.e. we can get several object records before
2757 * receiving each object's write records). As a result, once we've reached a
2758 * given object number, we can safely remove any reference to lower object
2759 * numbers in the ignore list. In practice, we receive up to 32 object records
2760 * before receiving write records, so the list can have up to 32 nodes in it.
2764 receive_read_prefetch(struct receive_arg
*ra
,
2765 uint64_t object
, uint64_t offset
, uint64_t length
)
2767 if (!objlist_exists(&ra
->ignore_objlist
, object
)) {
2768 dmu_prefetch(ra
->os
, object
, 1, offset
, length
,
2769 ZIO_PRIORITY_SYNC_READ
);
2774 * Read records off the stream, issuing any necessary prefetches.
2777 receive_read_record(struct receive_arg
*ra
)
2781 switch (ra
->rrd
->header
.drr_type
) {
2784 struct drr_object
*drro
= &ra
->rrd
->header
.drr_u
.drr_object
;
2785 uint32_t size
= P2ROUNDUP(drro
->drr_bonuslen
, 8);
2786 void *buf
= kmem_zalloc(size
, KM_SLEEP
);
2787 dmu_object_info_t doi
;
2788 err
= receive_read_payload_and_next_header(ra
, size
, buf
);
2790 kmem_free(buf
, size
);
2793 err
= dmu_object_info(ra
->os
, drro
->drr_object
, &doi
);
2795 * See receive_read_prefetch for an explanation why we're
2796 * storing this object in the ignore_obj_list.
2798 if (err
== ENOENT
|| err
== EEXIST
||
2799 (err
== 0 && doi
.doi_data_block_size
!= drro
->drr_blksz
)) {
2800 objlist_insert(&ra
->ignore_objlist
, drro
->drr_object
);
2805 case DRR_FREEOBJECTS
:
2807 err
= receive_read_payload_and_next_header(ra
, 0, NULL
);
2812 struct drr_write
*drrw
= &ra
->rrd
->header
.drr_u
.drr_write
;
2814 boolean_t is_meta
= DMU_OT_IS_METADATA(drrw
->drr_type
);
2815 if (DRR_WRITE_COMPRESSED(drrw
)) {
2816 ASSERT3U(drrw
->drr_compressed_size
, >, 0);
2817 ASSERT3U(drrw
->drr_logical_size
, >=,
2818 drrw
->drr_compressed_size
);
2820 abuf
= arc_loan_compressed_buf(
2821 dmu_objset_spa(ra
->os
),
2822 drrw
->drr_compressed_size
, drrw
->drr_logical_size
,
2823 drrw
->drr_compressiontype
);
2825 abuf
= arc_loan_buf(dmu_objset_spa(ra
->os
),
2826 is_meta
, drrw
->drr_logical_size
);
2829 err
= receive_read_payload_and_next_header(ra
,
2830 DRR_WRITE_PAYLOAD_SIZE(drrw
), abuf
->b_data
);
2832 dmu_return_arcbuf(abuf
);
2835 ra
->rrd
->write_buf
= abuf
;
2836 receive_read_prefetch(ra
, drrw
->drr_object
, drrw
->drr_offset
,
2837 drrw
->drr_logical_size
);
2840 case DRR_WRITE_BYREF
:
2842 struct drr_write_byref
*drrwb
=
2843 &ra
->rrd
->header
.drr_u
.drr_write_byref
;
2844 err
= receive_read_payload_and_next_header(ra
, 0, NULL
);
2845 receive_read_prefetch(ra
, drrwb
->drr_object
, drrwb
->drr_offset
,
2849 case DRR_WRITE_EMBEDDED
:
2851 struct drr_write_embedded
*drrwe
=
2852 &ra
->rrd
->header
.drr_u
.drr_write_embedded
;
2853 uint32_t size
= P2ROUNDUP(drrwe
->drr_psize
, 8);
2854 void *buf
= kmem_zalloc(size
, KM_SLEEP
);
2856 err
= receive_read_payload_and_next_header(ra
, size
, buf
);
2858 kmem_free(buf
, size
);
2862 receive_read_prefetch(ra
, drrwe
->drr_object
, drrwe
->drr_offset
,
2869 * It might be beneficial to prefetch indirect blocks here, but
2870 * we don't really have the data to decide for sure.
2872 err
= receive_read_payload_and_next_header(ra
, 0, NULL
);
2877 struct drr_end
*drre
= &ra
->rrd
->header
.drr_u
.drr_end
;
2878 if (!ZIO_CHECKSUM_EQUAL(ra
->prev_cksum
, drre
->drr_checksum
))
2879 return (SET_ERROR(ECKSUM
));
2884 struct drr_spill
*drrs
= &ra
->rrd
->header
.drr_u
.drr_spill
;
2885 void *buf
= kmem_zalloc(drrs
->drr_length
, KM_SLEEP
);
2886 err
= receive_read_payload_and_next_header(ra
, drrs
->drr_length
,
2889 kmem_free(buf
, drrs
->drr_length
);
2893 return (SET_ERROR(EINVAL
));
2898 dprintf_drr(struct receive_record_arg
*rrd
, int err
)
2900 switch (rrd
->header
.drr_type
) {
2903 struct drr_object
*drro
= &rrd
->header
.drr_u
.drr_object
;
2904 dprintf("drr_type = OBJECT obj = %llu type = %u "
2905 "bonustype = %u blksz = %u bonuslen = %u cksumtype = %u "
2906 "compress = %u dn_slots = %u err = %d\n",
2907 drro
->drr_object
, drro
->drr_type
, drro
->drr_bonustype
,
2908 drro
->drr_blksz
, drro
->drr_bonuslen
,
2909 drro
->drr_checksumtype
, drro
->drr_compress
,
2910 drro
->drr_dn_slots
, err
);
2913 case DRR_FREEOBJECTS
:
2915 struct drr_freeobjects
*drrfo
=
2916 &rrd
->header
.drr_u
.drr_freeobjects
;
2917 dprintf("drr_type = FREEOBJECTS firstobj = %llu "
2918 "numobjs = %llu err = %d\n",
2919 drrfo
->drr_firstobj
, drrfo
->drr_numobjs
, err
);
2924 struct drr_write
*drrw
= &rrd
->header
.drr_u
.drr_write
;
2925 dprintf("drr_type = WRITE obj = %llu type = %u offset = %llu "
2926 "lsize = %llu cksumtype = %u cksumflags = %u "
2927 "compress = %u psize = %llu err = %d\n",
2928 drrw
->drr_object
, drrw
->drr_type
, drrw
->drr_offset
,
2929 drrw
->drr_logical_size
, drrw
->drr_checksumtype
,
2930 drrw
->drr_checksumflags
, drrw
->drr_compressiontype
,
2931 drrw
->drr_compressed_size
, err
);
2934 case DRR_WRITE_BYREF
:
2936 struct drr_write_byref
*drrwbr
=
2937 &rrd
->header
.drr_u
.drr_write_byref
;
2938 dprintf("drr_type = WRITE_BYREF obj = %llu offset = %llu "
2939 "length = %llu toguid = %llx refguid = %llx "
2940 "refobject = %llu refoffset = %llu cksumtype = %u "
2941 "cksumflags = %u err = %d\n",
2942 drrwbr
->drr_object
, drrwbr
->drr_offset
,
2943 drrwbr
->drr_length
, drrwbr
->drr_toguid
,
2944 drrwbr
->drr_refguid
, drrwbr
->drr_refobject
,
2945 drrwbr
->drr_refoffset
, drrwbr
->drr_checksumtype
,
2946 drrwbr
->drr_checksumflags
, err
);
2949 case DRR_WRITE_EMBEDDED
:
2951 struct drr_write_embedded
*drrwe
=
2952 &rrd
->header
.drr_u
.drr_write_embedded
;
2953 dprintf("drr_type = WRITE_EMBEDDED obj = %llu offset = %llu "
2954 "length = %llu compress = %u etype = %u lsize = %u "
2955 "psize = %u err = %d\n",
2956 drrwe
->drr_object
, drrwe
->drr_offset
, drrwe
->drr_length
,
2957 drrwe
->drr_compression
, drrwe
->drr_etype
,
2958 drrwe
->drr_lsize
, drrwe
->drr_psize
, err
);
2963 struct drr_free
*drrf
= &rrd
->header
.drr_u
.drr_free
;
2964 dprintf("drr_type = FREE obj = %llu offset = %llu "
2965 "length = %lld err = %d\n",
2966 drrf
->drr_object
, drrf
->drr_offset
, drrf
->drr_length
,
2972 struct drr_spill
*drrs
= &rrd
->header
.drr_u
.drr_spill
;
2973 dprintf("drr_type = SPILL obj = %llu length = %llu "
2974 "err = %d\n", drrs
->drr_object
, drrs
->drr_length
, err
);
2983 * Commit the records to the pool.
2986 receive_process_record(struct receive_writer_arg
*rwa
,
2987 struct receive_record_arg
*rrd
)
2991 /* Processing in order, therefore bytes_read should be increasing. */
2992 ASSERT3U(rrd
->bytes_read
, >=, rwa
->bytes_read
);
2993 rwa
->bytes_read
= rrd
->bytes_read
;
2995 switch (rrd
->header
.drr_type
) {
2998 struct drr_object
*drro
= &rrd
->header
.drr_u
.drr_object
;
2999 err
= receive_object(rwa
, drro
, rrd
->payload
);
3000 kmem_free(rrd
->payload
, rrd
->payload_size
);
3001 rrd
->payload
= NULL
;
3004 case DRR_FREEOBJECTS
:
3006 struct drr_freeobjects
*drrfo
=
3007 &rrd
->header
.drr_u
.drr_freeobjects
;
3008 err
= receive_freeobjects(rwa
, drrfo
);
3013 struct drr_write
*drrw
= &rrd
->header
.drr_u
.drr_write
;
3014 err
= receive_write(rwa
, drrw
, rrd
->write_buf
);
3015 /* if receive_write() is successful, it consumes the arc_buf */
3017 dmu_return_arcbuf(rrd
->write_buf
);
3018 rrd
->write_buf
= NULL
;
3019 rrd
->payload
= NULL
;
3022 case DRR_WRITE_BYREF
:
3024 struct drr_write_byref
*drrwbr
=
3025 &rrd
->header
.drr_u
.drr_write_byref
;
3026 err
= receive_write_byref(rwa
, drrwbr
);
3029 case DRR_WRITE_EMBEDDED
:
3031 struct drr_write_embedded
*drrwe
=
3032 &rrd
->header
.drr_u
.drr_write_embedded
;
3033 err
= receive_write_embedded(rwa
, drrwe
, rrd
->payload
);
3034 kmem_free(rrd
->payload
, rrd
->payload_size
);
3035 rrd
->payload
= NULL
;
3040 struct drr_free
*drrf
= &rrd
->header
.drr_u
.drr_free
;
3041 err
= receive_free(rwa
, drrf
);
3046 struct drr_spill
*drrs
= &rrd
->header
.drr_u
.drr_spill
;
3047 err
= receive_spill(rwa
, drrs
, rrd
->payload
);
3048 kmem_free(rrd
->payload
, rrd
->payload_size
);
3049 rrd
->payload
= NULL
;
3053 return (SET_ERROR(EINVAL
));
3057 dprintf_drr(rrd
, err
);
3063 * dmu_recv_stream's worker thread; pull records off the queue, and then call
3064 * receive_process_record When we're done, signal the main thread and exit.
3067 receive_writer_thread(void *arg
)
3069 struct receive_writer_arg
*rwa
= arg
;
3070 struct receive_record_arg
*rrd
;
3071 fstrans_cookie_t cookie
= spl_fstrans_mark();
3073 for (rrd
= bqueue_dequeue(&rwa
->q
); !rrd
->eos_marker
;
3074 rrd
= bqueue_dequeue(&rwa
->q
)) {
3076 * If there's an error, the main thread will stop putting things
3077 * on the queue, but we need to clear everything in it before we
3080 if (rwa
->err
== 0) {
3081 rwa
->err
= receive_process_record(rwa
, rrd
);
3082 } else if (rrd
->write_buf
!= NULL
) {
3083 dmu_return_arcbuf(rrd
->write_buf
);
3084 rrd
->write_buf
= NULL
;
3085 rrd
->payload
= NULL
;
3086 } else if (rrd
->payload
!= NULL
) {
3087 kmem_free(rrd
->payload
, rrd
->payload_size
);
3088 rrd
->payload
= NULL
;
3090 kmem_free(rrd
, sizeof (*rrd
));
3092 kmem_free(rrd
, sizeof (*rrd
));
3093 mutex_enter(&rwa
->mutex
);
3095 cv_signal(&rwa
->cv
);
3096 mutex_exit(&rwa
->mutex
);
3097 spl_fstrans_unmark(cookie
);
3102 resume_check(struct receive_arg
*ra
, nvlist_t
*begin_nvl
)
3105 objset_t
*mos
= dmu_objset_pool(ra
->os
)->dp_meta_objset
;
3106 uint64_t dsobj
= dmu_objset_id(ra
->os
);
3107 uint64_t resume_obj
, resume_off
;
3109 if (nvlist_lookup_uint64(begin_nvl
,
3110 "resume_object", &resume_obj
) != 0 ||
3111 nvlist_lookup_uint64(begin_nvl
,
3112 "resume_offset", &resume_off
) != 0) {
3113 return (SET_ERROR(EINVAL
));
3115 VERIFY0(zap_lookup(mos
, dsobj
,
3116 DS_FIELD_RESUME_OBJECT
, sizeof (val
), 1, &val
));
3117 if (resume_obj
!= val
)
3118 return (SET_ERROR(EINVAL
));
3119 VERIFY0(zap_lookup(mos
, dsobj
,
3120 DS_FIELD_RESUME_OFFSET
, sizeof (val
), 1, &val
));
3121 if (resume_off
!= val
)
3122 return (SET_ERROR(EINVAL
));
3128 * Read in the stream's records, one by one, and apply them to the pool. There
3129 * are two threads involved; the thread that calls this function will spin up a
3130 * worker thread, read the records off the stream one by one, and issue
3131 * prefetches for any necessary indirect blocks. It will then push the records
3132 * onto an internal blocking queue. The worker thread will pull the records off
3133 * the queue, and actually write the data into the DMU. This way, the worker
3134 * thread doesn't have to wait for reads to complete, since everything it needs
3135 * (the indirect blocks) will be prefetched.
3137 * NB: callers *must* call dmu_recv_end() if this succeeds.
3140 dmu_recv_stream(dmu_recv_cookie_t
*drc
, vnode_t
*vp
, offset_t
*voffp
,
3141 int cleanup_fd
, uint64_t *action_handlep
)
3144 struct receive_arg
*ra
;
3145 struct receive_writer_arg
*rwa
;
3147 uint32_t payloadlen
;
3149 nvlist_t
*begin_nvl
= NULL
;
3151 ra
= kmem_zalloc(sizeof (*ra
), KM_SLEEP
);
3152 rwa
= kmem_zalloc(sizeof (*rwa
), KM_SLEEP
);
3154 ra
->byteswap
= drc
->drc_byteswap
;
3155 ra
->cksum
= drc
->drc_cksum
;
3159 if (dsl_dataset_is_zapified(drc
->drc_ds
)) {
3160 (void) zap_lookup(drc
->drc_ds
->ds_dir
->dd_pool
->dp_meta_objset
,
3161 drc
->drc_ds
->ds_object
, DS_FIELD_RESUME_BYTES
,
3162 sizeof (ra
->bytes_read
), 1, &ra
->bytes_read
);
3165 objlist_create(&ra
->ignore_objlist
);
3167 /* these were verified in dmu_recv_begin */
3168 ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc
->drc_drrb
->drr_versioninfo
), ==,
3170 ASSERT3U(drc
->drc_drrb
->drr_type
, <, DMU_OST_NUMTYPES
);
3173 * Open the objset we are modifying.
3175 VERIFY0(dmu_objset_from_ds(drc
->drc_ds
, &ra
->os
));
3177 ASSERT(dsl_dataset_phys(drc
->drc_ds
)->ds_flags
& DS_FLAG_INCONSISTENT
);
3179 featureflags
= DMU_GET_FEATUREFLAGS(drc
->drc_drrb
->drr_versioninfo
);
3181 /* if this stream is dedup'ed, set up the avl tree for guid mapping */
3182 if (featureflags
& DMU_BACKUP_FEATURE_DEDUP
) {
3185 if (cleanup_fd
== -1) {
3186 ra
->err
= SET_ERROR(EBADF
);
3189 ra
->err
= zfs_onexit_fd_hold(cleanup_fd
, &minor
);
3195 if (*action_handlep
== 0) {
3196 rwa
->guid_to_ds_map
=
3197 kmem_alloc(sizeof (avl_tree_t
), KM_SLEEP
);
3198 avl_create(rwa
->guid_to_ds_map
, guid_compare
,
3199 sizeof (guid_map_entry_t
),
3200 offsetof(guid_map_entry_t
, avlnode
));
3201 err
= zfs_onexit_add_cb(minor
,
3202 free_guid_map_onexit
, rwa
->guid_to_ds_map
,
3207 err
= zfs_onexit_cb_data(minor
, *action_handlep
,
3208 (void **)&rwa
->guid_to_ds_map
);
3213 drc
->drc_guid_to_ds_map
= rwa
->guid_to_ds_map
;
3216 payloadlen
= drc
->drc_drr_begin
->drr_payloadlen
;
3218 if (payloadlen
!= 0)
3219 payload
= kmem_alloc(payloadlen
, KM_SLEEP
);
3221 err
= receive_read_payload_and_next_header(ra
, payloadlen
, payload
);
3223 if (payloadlen
!= 0)
3224 kmem_free(payload
, payloadlen
);
3227 if (payloadlen
!= 0) {
3228 err
= nvlist_unpack(payload
, payloadlen
, &begin_nvl
, KM_SLEEP
);
3229 kmem_free(payload
, payloadlen
);
3234 if (featureflags
& DMU_BACKUP_FEATURE_RESUMING
) {
3235 err
= resume_check(ra
, begin_nvl
);
3240 (void) bqueue_init(&rwa
->q
,
3241 MAX(zfs_recv_queue_length
, 2 * zfs_max_recordsize
),
3242 offsetof(struct receive_record_arg
, node
));
3243 cv_init(&rwa
->cv
, NULL
, CV_DEFAULT
, NULL
);
3244 mutex_init(&rwa
->mutex
, NULL
, MUTEX_DEFAULT
, NULL
);
3246 rwa
->byteswap
= drc
->drc_byteswap
;
3247 rwa
->resumable
= drc
->drc_resumable
;
3249 (void) thread_create(NULL
, 0, receive_writer_thread
, rwa
, 0, curproc
,
3250 TS_RUN
, minclsyspri
);
3252 * We're reading rwa->err without locks, which is safe since we are the
3253 * only reader, and the worker thread is the only writer. It's ok if we
3254 * miss a write for an iteration or two of the loop, since the writer
3255 * thread will keep freeing records we send it until we send it an eos
3258 * We can leave this loop in 3 ways: First, if rwa->err is
3259 * non-zero. In that case, the writer thread will free the rrd we just
3260 * pushed. Second, if we're interrupted; in that case, either it's the
3261 * first loop and ra->rrd was never allocated, or it's later and ra->rrd
3262 * has been handed off to the writer thread who will free it. Finally,
3263 * if receive_read_record fails or we're at the end of the stream, then
3264 * we free ra->rrd and exit.
3266 while (rwa
->err
== 0) {
3267 if (issig(JUSTLOOKING
) && issig(FORREAL
)) {
3268 err
= SET_ERROR(EINTR
);
3272 ASSERT3P(ra
->rrd
, ==, NULL
);
3273 ra
->rrd
= ra
->next_rrd
;
3274 ra
->next_rrd
= NULL
;
3275 /* Allocates and loads header into ra->next_rrd */
3276 err
= receive_read_record(ra
);
3278 if (ra
->rrd
->header
.drr_type
== DRR_END
|| err
!= 0) {
3279 kmem_free(ra
->rrd
, sizeof (*ra
->rrd
));
3284 bqueue_enqueue(&rwa
->q
, ra
->rrd
,
3285 sizeof (struct receive_record_arg
) + ra
->rrd
->payload_size
);
3288 if (ra
->next_rrd
== NULL
)
3289 ra
->next_rrd
= kmem_zalloc(sizeof (*ra
->next_rrd
), KM_SLEEP
);
3290 ra
->next_rrd
->eos_marker
= B_TRUE
;
3291 bqueue_enqueue(&rwa
->q
, ra
->next_rrd
, 1);
3293 mutex_enter(&rwa
->mutex
);
3294 while (!rwa
->done
) {
3295 cv_wait(&rwa
->cv
, &rwa
->mutex
);
3297 mutex_exit(&rwa
->mutex
);
3300 * If we are receiving a full stream as a clone, all object IDs which
3301 * are greater than the maximum ID referenced in the stream are
3302 * by definition unused and must be freed.
3304 if (drc
->drc_clone
&& drc
->drc_drrb
->drr_fromguid
== 0) {
3305 uint64_t obj
= rwa
->max_object
+ 1;
3309 while (next_err
== 0) {
3310 free_err
= dmu_free_long_object(rwa
->os
, obj
);
3311 if (free_err
!= 0 && free_err
!= ENOENT
)
3314 next_err
= dmu_object_next(rwa
->os
, &obj
, FALSE
, 0);
3318 if (free_err
!= 0 && free_err
!= ENOENT
)
3320 else if (next_err
!= ESRCH
)
3325 cv_destroy(&rwa
->cv
);
3326 mutex_destroy(&rwa
->mutex
);
3327 bqueue_destroy(&rwa
->q
);
3332 nvlist_free(begin_nvl
);
3333 if ((featureflags
& DMU_BACKUP_FEATURE_DEDUP
) && (cleanup_fd
!= -1))
3334 zfs_onexit_fd_rele(cleanup_fd
);
3338 * Clean up references. If receive is not resumable,
3339 * destroy what we created, so we don't leave it in
3340 * the inconsistent state.
3342 dmu_recv_cleanup_ds(drc
);
3346 objlist_destroy(&ra
->ignore_objlist
);
3347 kmem_free(ra
, sizeof (*ra
));
3348 kmem_free(rwa
, sizeof (*rwa
));
3353 dmu_recv_end_check(void *arg
, dmu_tx_t
*tx
)
3355 dmu_recv_cookie_t
*drc
= arg
;
3356 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
3359 ASSERT3P(drc
->drc_ds
->ds_owner
, ==, dmu_recv_tag
);
3361 if (!drc
->drc_newfs
) {
3362 dsl_dataset_t
*origin_head
;
3364 error
= dsl_dataset_hold(dp
, drc
->drc_tofs
, FTAG
, &origin_head
);
3367 if (drc
->drc_force
) {
3369 * We will destroy any snapshots in tofs (i.e. before
3370 * origin_head) that are after the origin (which is
3371 * the snap before drc_ds, because drc_ds can not
3372 * have any snaps of its own).
3376 obj
= dsl_dataset_phys(origin_head
)->ds_prev_snap_obj
;
3378 dsl_dataset_phys(drc
->drc_ds
)->ds_prev_snap_obj
) {
3379 dsl_dataset_t
*snap
;
3380 error
= dsl_dataset_hold_obj(dp
, obj
, FTAG
,
3384 if (snap
->ds_dir
!= origin_head
->ds_dir
)
3385 error
= SET_ERROR(EINVAL
);
3387 error
= dsl_destroy_snapshot_check_impl(
3390 obj
= dsl_dataset_phys(snap
)->ds_prev_snap_obj
;
3391 dsl_dataset_rele(snap
, FTAG
);
3396 dsl_dataset_rele(origin_head
, FTAG
);
3400 error
= dsl_dataset_clone_swap_check_impl(drc
->drc_ds
,
3401 origin_head
, drc
->drc_force
, drc
->drc_owner
, tx
);
3403 dsl_dataset_rele(origin_head
, FTAG
);
3406 error
= dsl_dataset_snapshot_check_impl(origin_head
,
3407 drc
->drc_tosnap
, tx
, B_TRUE
, 1, drc
->drc_cred
);
3408 dsl_dataset_rele(origin_head
, FTAG
);
3412 error
= dsl_destroy_head_check_impl(drc
->drc_ds
, 1);
3414 error
= dsl_dataset_snapshot_check_impl(drc
->drc_ds
,
3415 drc
->drc_tosnap
, tx
, B_TRUE
, 1, drc
->drc_cred
);
3421 dmu_recv_end_sync(void *arg
, dmu_tx_t
*tx
)
3423 dmu_recv_cookie_t
*drc
= arg
;
3424 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
3426 spa_history_log_internal_ds(drc
->drc_ds
, "finish receiving",
3427 tx
, "snap=%s", drc
->drc_tosnap
);
3429 if (!drc
->drc_newfs
) {
3430 dsl_dataset_t
*origin_head
;
3432 VERIFY0(dsl_dataset_hold(dp
, drc
->drc_tofs
, FTAG
,
3435 if (drc
->drc_force
) {
3437 * Destroy any snapshots of drc_tofs (origin_head)
3438 * after the origin (the snap before drc_ds).
3442 obj
= dsl_dataset_phys(origin_head
)->ds_prev_snap_obj
;
3444 dsl_dataset_phys(drc
->drc_ds
)->ds_prev_snap_obj
) {
3445 dsl_dataset_t
*snap
;
3446 VERIFY0(dsl_dataset_hold_obj(dp
, obj
, FTAG
,
3448 ASSERT3P(snap
->ds_dir
, ==, origin_head
->ds_dir
);
3449 obj
= dsl_dataset_phys(snap
)->ds_prev_snap_obj
;
3450 dsl_destroy_snapshot_sync_impl(snap
,
3452 dsl_dataset_rele(snap
, FTAG
);
3455 VERIFY3P(drc
->drc_ds
->ds_prev
, ==,
3456 origin_head
->ds_prev
);
3458 dsl_dataset_clone_swap_sync_impl(drc
->drc_ds
,
3460 dsl_dataset_snapshot_sync_impl(origin_head
,
3461 drc
->drc_tosnap
, tx
);
3463 /* set snapshot's creation time and guid */
3464 dmu_buf_will_dirty(origin_head
->ds_prev
->ds_dbuf
, tx
);
3465 dsl_dataset_phys(origin_head
->ds_prev
)->ds_creation_time
=
3466 drc
->drc_drrb
->drr_creation_time
;
3467 dsl_dataset_phys(origin_head
->ds_prev
)->ds_guid
=
3468 drc
->drc_drrb
->drr_toguid
;
3469 dsl_dataset_phys(origin_head
->ds_prev
)->ds_flags
&=
3470 ~DS_FLAG_INCONSISTENT
;
3472 dmu_buf_will_dirty(origin_head
->ds_dbuf
, tx
);
3473 dsl_dataset_phys(origin_head
)->ds_flags
&=
3474 ~DS_FLAG_INCONSISTENT
;
3476 drc
->drc_newsnapobj
=
3477 dsl_dataset_phys(origin_head
)->ds_prev_snap_obj
;
3479 dsl_dataset_rele(origin_head
, FTAG
);
3480 dsl_destroy_head_sync_impl(drc
->drc_ds
, tx
);
3482 if (drc
->drc_owner
!= NULL
)
3483 VERIFY3P(origin_head
->ds_owner
, ==, drc
->drc_owner
);
3485 dsl_dataset_t
*ds
= drc
->drc_ds
;
3487 dsl_dataset_snapshot_sync_impl(ds
, drc
->drc_tosnap
, tx
);
3489 /* set snapshot's creation time and guid */
3490 dmu_buf_will_dirty(ds
->ds_prev
->ds_dbuf
, tx
);
3491 dsl_dataset_phys(ds
->ds_prev
)->ds_creation_time
=
3492 drc
->drc_drrb
->drr_creation_time
;
3493 dsl_dataset_phys(ds
->ds_prev
)->ds_guid
=
3494 drc
->drc_drrb
->drr_toguid
;
3495 dsl_dataset_phys(ds
->ds_prev
)->ds_flags
&=
3496 ~DS_FLAG_INCONSISTENT
;
3498 dmu_buf_will_dirty(ds
->ds_dbuf
, tx
);
3499 dsl_dataset_phys(ds
)->ds_flags
&= ~DS_FLAG_INCONSISTENT
;
3500 if (dsl_dataset_has_resume_receive_state(ds
)) {
3501 (void) zap_remove(dp
->dp_meta_objset
, ds
->ds_object
,
3502 DS_FIELD_RESUME_FROMGUID
, tx
);
3503 (void) zap_remove(dp
->dp_meta_objset
, ds
->ds_object
,
3504 DS_FIELD_RESUME_OBJECT
, tx
);
3505 (void) zap_remove(dp
->dp_meta_objset
, ds
->ds_object
,
3506 DS_FIELD_RESUME_OFFSET
, tx
);
3507 (void) zap_remove(dp
->dp_meta_objset
, ds
->ds_object
,
3508 DS_FIELD_RESUME_BYTES
, tx
);
3509 (void) zap_remove(dp
->dp_meta_objset
, ds
->ds_object
,
3510 DS_FIELD_RESUME_TOGUID
, tx
);
3511 (void) zap_remove(dp
->dp_meta_objset
, ds
->ds_object
,
3512 DS_FIELD_RESUME_TONAME
, tx
);
3514 drc
->drc_newsnapobj
=
3515 dsl_dataset_phys(drc
->drc_ds
)->ds_prev_snap_obj
;
3517 zvol_create_minors(dp
->dp_spa
, drc
->drc_tofs
, B_TRUE
);
3519 * Release the hold from dmu_recv_begin. This must be done before
3520 * we return to open context, so that when we free the dataset's dnode,
3521 * we can evict its bonus buffer.
3523 dsl_dataset_disown(drc
->drc_ds
, dmu_recv_tag
);
3528 add_ds_to_guidmap(const char *name
, avl_tree_t
*guid_map
, uint64_t snapobj
)
3531 dsl_dataset_t
*snapds
;
3532 guid_map_entry_t
*gmep
;
3535 ASSERT(guid_map
!= NULL
);
3537 err
= dsl_pool_hold(name
, FTAG
, &dp
);
3540 gmep
= kmem_alloc(sizeof (*gmep
), KM_SLEEP
);
3541 err
= dsl_dataset_hold_obj(dp
, snapobj
, gmep
, &snapds
);
3543 gmep
->guid
= dsl_dataset_phys(snapds
)->ds_guid
;
3544 gmep
->gme_ds
= snapds
;
3545 avl_add(guid_map
, gmep
);
3546 dsl_dataset_long_hold(snapds
, gmep
);
3548 kmem_free(gmep
, sizeof (*gmep
));
3551 dsl_pool_rele(dp
, FTAG
);
3555 static int dmu_recv_end_modified_blocks
= 3;
3558 dmu_recv_existing_end(dmu_recv_cookie_t
*drc
)
3562 * We will be destroying the ds; make sure its origin is unmounted if
3565 char name
[ZFS_MAX_DATASET_NAME_LEN
];
3566 dsl_dataset_name(drc
->drc_ds
, name
);
3567 zfs_destroy_unmount_origin(name
);
3570 return (dsl_sync_task(drc
->drc_tofs
,
3571 dmu_recv_end_check
, dmu_recv_end_sync
, drc
,
3572 dmu_recv_end_modified_blocks
, ZFS_SPACE_CHECK_NORMAL
));
3576 dmu_recv_new_end(dmu_recv_cookie_t
*drc
)
3578 return (dsl_sync_task(drc
->drc_tofs
,
3579 dmu_recv_end_check
, dmu_recv_end_sync
, drc
,
3580 dmu_recv_end_modified_blocks
, ZFS_SPACE_CHECK_NORMAL
));
3584 dmu_recv_end(dmu_recv_cookie_t
*drc
, void *owner
)
3588 drc
->drc_owner
= owner
;
3591 error
= dmu_recv_new_end(drc
);
3593 error
= dmu_recv_existing_end(drc
);
3596 dmu_recv_cleanup_ds(drc
);
3597 } else if (drc
->drc_guid_to_ds_map
!= NULL
) {
3598 (void) add_ds_to_guidmap(drc
->drc_tofs
,
3599 drc
->drc_guid_to_ds_map
,
3600 drc
->drc_newsnapobj
);
3606 * Return TRUE if this objset is currently being received into.
3609 dmu_objset_is_receiving(objset_t
*os
)
3611 return (os
->os_dsl_dataset
!= NULL
&&
3612 os
->os_dsl_dataset
->ds_owner
== dmu_recv_tag
);
3615 #if defined(_KERNEL)
3616 module_param(zfs_send_corrupt_data
, int, 0644);
3617 MODULE_PARM_DESC(zfs_send_corrupt_data
, "Allow sending corrupt data");
3619 module_param(zfs_send_queue_length
, int, 0644);
3620 MODULE_PARM_DESC(zfs_send_queue_length
, "Maximum send queue length");
3622 module_param(zfs_recv_queue_length
, int, 0644);
3623 MODULE_PARM_DESC(zfs_recv_queue_length
, "Maximum receive queue length");