]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/dmu_send.c
Remove unnecessary txg syncs from receive_object()
[mirror_zfs.git] / module / zfs / dmu_send.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
428870ff 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
8d35c149 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
e6d3a843 24 * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
788eb90c 25 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
47dfff3b 26 * Copyright 2014 HybridCluster. All rights reserved.
b607405f 27 * Copyright 2016 RackTop Systems.
a0bd735a 28 * Copyright (c) 2016 Actifio, Inc. All rights reserved.
8d35c149 29 */
34dc7c2f 30
34dc7c2f
BB
31#include <sys/dmu.h>
32#include <sys/dmu_impl.h>
33#include <sys/dmu_tx.h>
34#include <sys/dbuf.h>
35#include <sys/dnode.h>
36#include <sys/zfs_context.h>
37#include <sys/dmu_objset.h>
38#include <sys/dmu_traverse.h>
39#include <sys/dsl_dataset.h>
40#include <sys/dsl_dir.h>
428870ff 41#include <sys/dsl_prop.h>
34dc7c2f
BB
42#include <sys/dsl_pool.h>
43#include <sys/dsl_synctask.h>
044baf00 44#include <sys/spa_impl.h>
34dc7c2f
BB
45#include <sys/zfs_ioctl.h>
46#include <sys/zap.h>
47#include <sys/zio_checksum.h>
428870ff
BB
48#include <sys/zfs_znode.h>
49#include <zfs_fletcher.h>
50#include <sys/avl.h>
51#include <sys/ddt.h>
572e2857 52#include <sys/zfs_onexit.h>
13fe0198
MA
53#include <sys/dmu_send.h>
54#include <sys/dsl_destroy.h>
9b67f605 55#include <sys/blkptr.h>
da536844 56#include <sys/dsl_bookmark.h>
9b67f605 57#include <sys/zfeature.h>
fcff0f35 58#include <sys/bqueue.h>
a0bd735a 59#include <sys/zvol.h>
f74b821a 60#include <sys/policy.h>
34dc7c2f 61
330d06f9
MA
62/* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */
63int zfs_send_corrupt_data = B_FALSE;
fcff0f35
PD
64int zfs_send_queue_length = 16 * 1024 * 1024;
65int zfs_recv_queue_length = 16 * 1024 * 1024;
b607405f
AS
66/* Set this tunable to FALSE to disable setting of DRR_FLAG_FREERECORDS */
67int zfs_send_set_freerecords_bit = B_TRUE;
330d06f9 68
34dc7c2f 69static char *dmu_recv_tag = "dmu_recv_tag";
47dfff3b 70const char *recv_clone_name = "%recv";
34dc7c2f 71
fcff0f35
PD
72#define BP_SPAN(datablkszsec, indblkshift, level) \
73 (((uint64_t)datablkszsec) << (SPA_MINBLOCKSHIFT + \
74 (level) * (indblkshift - SPA_BLKPTRSHIFT)))
75
47dfff3b
MA
76static void byteswap_record(dmu_replay_record_t *drr);
77
fcff0f35
PD
78struct send_thread_arg {
79 bqueue_t q;
80 dsl_dataset_t *ds; /* Dataset to traverse */
81 uint64_t fromtxg; /* Traverse from this txg */
82 int flags; /* flags to pass to traverse_dataset */
83 int error_code;
84 boolean_t cancel;
47dfff3b 85 zbookmark_phys_t resume;
fcff0f35
PD
86};
87
88struct send_block_record {
89 boolean_t eos_marker; /* Marks the end of the stream */
90 blkptr_t bp;
91 zbookmark_phys_t zb;
92 uint8_t indblkshift;
93 uint16_t datablkszsec;
94 bqueue_node_t ln;
95};
96
044baf00
BB
97typedef struct dump_bytes_io {
98 dmu_sendarg_t *dbi_dsp;
99 void *dbi_buf;
100 int dbi_len;
101} dump_bytes_io_t;
102
103static void
b58986ee 104dump_bytes_cb(void *arg)
34dc7c2f 105{
044baf00
BB
106 dump_bytes_io_t *dbi = (dump_bytes_io_t *)arg;
107 dmu_sendarg_t *dsp = dbi->dbi_dsp;
47dfff3b 108 dsl_dataset_t *ds = dmu_objset_ds(dsp->dsa_os);
34dc7c2f 109 ssize_t resid; /* have to get resid to get detailed errno */
f8866f8a
ER
110
111 /*
b5256303 112 * The code does not rely on len being a multiple of 8. We keep
f8866f8a
ER
113 * this assertion because of the corresponding assertion in
114 * receive_read(). Keeping this assertion ensures that we do not
115 * inadvertently break backwards compatibility (causing the assertion
b5256303
TC
116 * in receive_read() to trigger on old software). Newer feature flags
117 * (such as raw send) may break this assertion since they were
118 * introduced after the requirement was made obsolete.
f8866f8a
ER
119 */
120
b5256303
TC
121 ASSERT(dbi->dbi_len % 8 == 0 ||
122 (dsp->dsa_featureflags & DMU_BACKUP_FEATURE_RAW) != 0);
34dc7c2f 123
37abac6d 124 dsp->dsa_err = vn_rdwr(UIO_WRITE, dsp->dsa_vp,
044baf00 125 (caddr_t)dbi->dbi_buf, dbi->dbi_len,
34dc7c2f 126 0, UIO_SYSSPACE, FAPPEND, RLIM64_INFINITY, CRED(), &resid);
37abac6d
BP
127
128 mutex_enter(&ds->ds_sendstream_lock);
044baf00 129 *dsp->dsa_off += dbi->dbi_len;
37abac6d 130 mutex_exit(&ds->ds_sendstream_lock);
044baf00
BB
131}
132
133static int
134dump_bytes(dmu_sendarg_t *dsp, void *buf, int len)
135{
136 dump_bytes_io_t dbi;
137
138 dbi.dbi_dsp = dsp;
139 dbi.dbi_buf = buf;
140 dbi.dbi_len = len;
141
b58986ee
BB
142#if defined(HAVE_LARGE_STACKS)
143 dump_bytes_cb(&dbi);
144#else
044baf00
BB
145 /*
146 * The vn_rdwr() call is performed in a taskq to ensure that there is
147 * always enough stack space to write safely to the target filesystem.
148 * The ZIO_TYPE_FREE threads are used because there can be a lot of
149 * them and they are used in vdev_file.c for a similar purpose.
150 */
151 spa_taskq_dispatch_sync(dmu_objset_spa(dsp->dsa_os), ZIO_TYPE_FREE,
b58986ee
BB
152 ZIO_TASKQ_ISSUE, dump_bytes_cb, &dbi, TQ_SLEEP);
153#endif /* HAVE_LARGE_STACKS */
37abac6d
BP
154
155 return (dsp->dsa_err);
34dc7c2f
BB
156}
157
37f8a883
MA
158/*
159 * For all record types except BEGIN, fill in the checksum (overlaid in
160 * drr_u.drr_checksum.drr_checksum). The checksum verifies everything
161 * up to the start of the checksum itself.
162 */
163static int
164dump_record(dmu_sendarg_t *dsp, void *payload, int payload_len)
165{
166 ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
167 ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
a6255b7f 168 (void) fletcher_4_incremental_native(dsp->dsa_drr,
37f8a883
MA
169 offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
170 &dsp->dsa_zc);
51907a31
K
171 if (dsp->dsa_drr->drr_type == DRR_BEGIN) {
172 dsp->dsa_sent_begin = B_TRUE;
173 } else {
37f8a883
MA
174 ASSERT(ZIO_CHECKSUM_IS_ZERO(&dsp->dsa_drr->drr_u.
175 drr_checksum.drr_checksum));
176 dsp->dsa_drr->drr_u.drr_checksum.drr_checksum = dsp->dsa_zc;
177 }
51907a31
K
178 if (dsp->dsa_drr->drr_type == DRR_END) {
179 dsp->dsa_sent_end = B_TRUE;
180 }
a6255b7f 181 (void) fletcher_4_incremental_native(&dsp->dsa_drr->
37f8a883
MA
182 drr_u.drr_checksum.drr_checksum,
183 sizeof (zio_cksum_t), &dsp->dsa_zc);
184 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0)
185 return (SET_ERROR(EINTR));
186 if (payload_len != 0) {
a6255b7f 187 (void) fletcher_4_incremental_native(payload, payload_len,
37f8a883
MA
188 &dsp->dsa_zc);
189 if (dump_bytes(dsp, payload, payload_len) != 0)
190 return (SET_ERROR(EINTR));
191 }
192 return (0);
193}
194
e6d3a843
PD
195/*
196 * Fill in the drr_free struct, or perform aggregation if the previous record is
197 * also a free record, and the two are adjacent.
198 *
199 * Note that we send free records even for a full send, because we want to be
200 * able to receive a full send as a clone, which requires a list of all the free
201 * and freeobject records that were generated on the source.
202 */
34dc7c2f 203static int
37abac6d 204dump_free(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
34dc7c2f
BB
205 uint64_t length)
206{
37abac6d 207 struct drr_free *drrf = &(dsp->dsa_drr->drr_u.drr_free);
428870ff 208
ea97f8ce
MA
209 /*
210 * When we receive a free record, dbuf_free_range() assumes
211 * that the receiving system doesn't have any dbufs in the range
212 * being freed. This is always true because there is a one-record
213 * constraint: we only send one WRITE record for any given
47dfff3b 214 * object,offset. We know that the one-record constraint is
ea97f8ce
MA
215 * true because we always send data in increasing order by
216 * object,offset.
217 *
218 * If the increasing-order constraint ever changes, we should find
219 * another way to assert that the one-record constraint is still
220 * satisfied.
221 */
222 ASSERT(object > dsp->dsa_last_data_object ||
223 (object == dsp->dsa_last_data_object &&
224 offset > dsp->dsa_last_data_offset));
225
428870ff
BB
226 /*
227 * If there is a pending op, but it's not PENDING_FREE, push it out,
228 * since free block aggregation can only be done for blocks of the
229 * same type (i.e., DRR_FREE records can only be aggregated with
230 * other DRR_FREE records. DRR_FREEOBJECTS records can only be
231 * aggregated with other DRR_FREEOBJECTS records.
232 */
37abac6d
BP
233 if (dsp->dsa_pending_op != PENDING_NONE &&
234 dsp->dsa_pending_op != PENDING_FREE) {
37f8a883 235 if (dump_record(dsp, NULL, 0) != 0)
2e528b49 236 return (SET_ERROR(EINTR));
37abac6d 237 dsp->dsa_pending_op = PENDING_NONE;
428870ff
BB
238 }
239
37abac6d 240 if (dsp->dsa_pending_op == PENDING_FREE) {
428870ff 241 /*
ee45fbd8 242 * There should never be a PENDING_FREE if length is
243 * DMU_OBJECT_END (because dump_dnode is the only place where
244 * this function is called with a DMU_OBJECT_END, and only after
245 * flushing any pending record).
428870ff 246 */
ee45fbd8 247 ASSERT(length != DMU_OBJECT_END);
428870ff
BB
248 /*
249 * Check to see whether this free block can be aggregated
250 * with pending one.
251 */
252 if (drrf->drr_object == object && drrf->drr_offset +
253 drrf->drr_length == offset) {
ee45fbd8 254 if (offset + length < offset)
255 drrf->drr_length = DMU_OBJECT_END;
256 else
257 drrf->drr_length += length;
428870ff
BB
258 return (0);
259 } else {
260 /* not a continuation. Push out pending record */
37f8a883 261 if (dump_record(dsp, NULL, 0) != 0)
2e528b49 262 return (SET_ERROR(EINTR));
37abac6d 263 dsp->dsa_pending_op = PENDING_NONE;
428870ff
BB
264 }
265 }
266 /* create a FREE record and make it pending */
37abac6d
BP
267 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
268 dsp->dsa_drr->drr_type = DRR_FREE;
428870ff
BB
269 drrf->drr_object = object;
270 drrf->drr_offset = offset;
ee45fbd8 271 if (offset + length < offset)
272 drrf->drr_length = DMU_OBJECT_END;
273 else
274 drrf->drr_length = length;
37abac6d 275 drrf->drr_toguid = dsp->dsa_toguid;
ee45fbd8 276 if (length == DMU_OBJECT_END) {
37f8a883 277 if (dump_record(dsp, NULL, 0) != 0)
2e528b49 278 return (SET_ERROR(EINTR));
428870ff 279 } else {
37abac6d 280 dsp->dsa_pending_op = PENDING_FREE;
428870ff 281 }
34dc7c2f 282
34dc7c2f
BB
283 return (0);
284}
285
286static int
b5256303
TC
287dump_write(dmu_sendarg_t *dsp, dmu_object_type_t type, uint64_t object,
288 uint64_t offset, int lsize, int psize, const blkptr_t *bp, void *data)
34dc7c2f 289{
2aa34383 290 uint64_t payload_size;
b5256303 291 boolean_t raw = (dsp->dsa_featureflags & DMU_BACKUP_FEATURE_RAW);
37abac6d 292 struct drr_write *drrw = &(dsp->dsa_drr->drr_u.drr_write);
428870ff 293
ea97f8ce
MA
294 /*
295 * We send data in increasing object, offset order.
296 * See comment in dump_free() for details.
297 */
298 ASSERT(object > dsp->dsa_last_data_object ||
299 (object == dsp->dsa_last_data_object &&
300 offset > dsp->dsa_last_data_offset));
301 dsp->dsa_last_data_object = object;
2aa34383 302 dsp->dsa_last_data_offset = offset + lsize - 1;
428870ff
BB
303
304 /*
305 * If there is any kind of pending aggregation (currently either
306 * a grouping of free objects or free blocks), push it out to
307 * the stream, since aggregation can't be done across operations
308 * of different types.
309 */
37abac6d 310 if (dsp->dsa_pending_op != PENDING_NONE) {
37f8a883 311 if (dump_record(dsp, NULL, 0) != 0)
2e528b49 312 return (SET_ERROR(EINTR));
37abac6d 313 dsp->dsa_pending_op = PENDING_NONE;
428870ff 314 }
37f8a883 315 /* write a WRITE record */
37abac6d
BP
316 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
317 dsp->dsa_drr->drr_type = DRR_WRITE;
428870ff
BB
318 drrw->drr_object = object;
319 drrw->drr_type = type;
320 drrw->drr_offset = offset;
37abac6d 321 drrw->drr_toguid = dsp->dsa_toguid;
2aa34383
DK
322 drrw->drr_logical_size = lsize;
323
b5256303
TC
324 /* only set the compression fields if the buf is compressed or raw */
325 if (raw || lsize != psize) {
2aa34383 326 ASSERT(!BP_IS_EMBEDDED(bp));
2aa34383 327 ASSERT3S(psize, >, 0);
2aa34383 328
b5256303
TC
329 if (raw) {
330 ASSERT(BP_IS_PROTECTED(bp));
331
332 /*
9b840763
TC
333 * This is a raw protected block so we need to pass
334 * along everything the receiving side will need to
335 * interpret this block, including the byteswap, salt,
336 * IV, and MAC.
b5256303 337 */
b5256303
TC
338 if (BP_SHOULD_BYTESWAP(bp))
339 drrw->drr_flags |= DRR_RAW_BYTESWAP;
340 zio_crypt_decode_params_bp(bp, drrw->drr_salt,
341 drrw->drr_iv);
342 zio_crypt_decode_mac_bp(bp, drrw->drr_mac);
343 } else {
344 /* this is a compressed block */
345 ASSERT(dsp->dsa_featureflags &
346 DMU_BACKUP_FEATURE_COMPRESSED);
347 ASSERT(!BP_SHOULD_BYTESWAP(bp));
348 ASSERT(!DMU_OT_IS_METADATA(BP_GET_TYPE(bp)));
349 ASSERT3U(BP_GET_COMPRESS(bp), !=, ZIO_COMPRESS_OFF);
350 ASSERT3S(lsize, >=, psize);
351 }
352
353 /* set fields common to compressed and raw sends */
2aa34383
DK
354 drrw->drr_compressiontype = BP_GET_COMPRESS(bp);
355 drrw->drr_compressed_size = psize;
356 payload_size = drrw->drr_compressed_size;
357 } else {
358 payload_size = drrw->drr_logical_size;
359 }
360
b5256303 361 if (bp == NULL || BP_IS_EMBEDDED(bp) || (BP_IS_PROTECTED(bp) && !raw)) {
9b67f605 362 /*
b5256303
TC
363 * There's no pre-computed checksum for partial-block writes,
364 * embedded BP's, or encrypted BP's that are being sent as
365 * plaintext, so (like fletcher4-checkummed blocks) userland
366 * will have to compute a dedup-capable checksum itself.
9b67f605
MA
367 */
368 drrw->drr_checksumtype = ZIO_CHECKSUM_OFF;
369 } else {
370 drrw->drr_checksumtype = BP_GET_CHECKSUM(bp);
3c67d83a
TH
371 if (zio_checksum_table[drrw->drr_checksumtype].ci_flags &
372 ZCHECKSUM_FLAG_DEDUP)
b5256303 373 drrw->drr_flags |= DRR_CHECKSUM_DEDUP;
9b67f605
MA
374 DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp));
375 DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp));
376 DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp));
b5256303 377 DDK_SET_CRYPT(&drrw->drr_key, BP_IS_PROTECTED(bp));
9b67f605
MA
378 drrw->drr_key.ddk_cksum = bp->blk_cksum;
379 }
428870ff 380
2aa34383 381 if (dump_record(dsp, data, payload_size) != 0)
2e528b49 382 return (SET_ERROR(EINTR));
428870ff
BB
383 return (0);
384}
385
9b67f605
MA
386static int
387dump_write_embedded(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
388 int blksz, const blkptr_t *bp)
389{
390 char buf[BPE_PAYLOAD_SIZE];
391 struct drr_write_embedded *drrw =
392 &(dsp->dsa_drr->drr_u.drr_write_embedded);
393
394 if (dsp->dsa_pending_op != PENDING_NONE) {
37f8a883 395 if (dump_record(dsp, NULL, 0) != 0)
ecb2b7dc 396 return (SET_ERROR(EINTR));
9b67f605
MA
397 dsp->dsa_pending_op = PENDING_NONE;
398 }
399
400 ASSERT(BP_IS_EMBEDDED(bp));
401
402 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
403 dsp->dsa_drr->drr_type = DRR_WRITE_EMBEDDED;
404 drrw->drr_object = object;
405 drrw->drr_offset = offset;
406 drrw->drr_length = blksz;
407 drrw->drr_toguid = dsp->dsa_toguid;
408 drrw->drr_compression = BP_GET_COMPRESS(bp);
409 drrw->drr_etype = BPE_GET_ETYPE(bp);
410 drrw->drr_lsize = BPE_GET_LSIZE(bp);
411 drrw->drr_psize = BPE_GET_PSIZE(bp);
412
413 decode_embedded_bp_compressed(bp, buf);
414
37f8a883 415 if (dump_record(dsp, buf, P2ROUNDUP(drrw->drr_psize, 8)) != 0)
ecb2b7dc 416 return (SET_ERROR(EINTR));
9b67f605
MA
417 return (0);
418}
419
428870ff 420static int
b5256303 421dump_spill(dmu_sendarg_t *dsp, const blkptr_t *bp, uint64_t object, void *data)
428870ff 422{
37abac6d 423 struct drr_spill *drrs = &(dsp->dsa_drr->drr_u.drr_spill);
b5256303 424 uint64_t blksz = BP_GET_LSIZE(bp);
428870ff 425
37abac6d 426 if (dsp->dsa_pending_op != PENDING_NONE) {
37f8a883 427 if (dump_record(dsp, NULL, 0) != 0)
2e528b49 428 return (SET_ERROR(EINTR));
37abac6d 429 dsp->dsa_pending_op = PENDING_NONE;
428870ff
BB
430 }
431
432 /* write a SPILL record */
37abac6d
BP
433 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
434 dsp->dsa_drr->drr_type = DRR_SPILL;
428870ff
BB
435 drrs->drr_object = object;
436 drrs->drr_length = blksz;
37abac6d 437 drrs->drr_toguid = dsp->dsa_toguid;
34dc7c2f 438
b5256303 439 /* handle raw send fields */
9b840763
TC
440 if (dsp->dsa_featureflags & DMU_BACKUP_FEATURE_RAW) {
441 ASSERT(BP_IS_PROTECTED(bp));
442
b5256303
TC
443 if (BP_SHOULD_BYTESWAP(bp))
444 drrs->drr_flags |= DRR_RAW_BYTESWAP;
445 drrs->drr_compressiontype = BP_GET_COMPRESS(bp);
446 drrs->drr_compressed_size = BP_GET_PSIZE(bp);
447 zio_crypt_decode_params_bp(bp, drrs->drr_salt, drrs->drr_iv);
448 zio_crypt_decode_mac_bp(bp, drrs->drr_mac);
449 }
450
37f8a883 451 if (dump_record(dsp, data, blksz) != 0)
2e528b49 452 return (SET_ERROR(EINTR));
34dc7c2f
BB
453 return (0);
454}
455
456static int
37abac6d 457dump_freeobjects(dmu_sendarg_t *dsp, uint64_t firstobj, uint64_t numobjs)
34dc7c2f 458{
37abac6d 459 struct drr_freeobjects *drrfo = &(dsp->dsa_drr->drr_u.drr_freeobjects);
829e95c4
FG
460 uint64_t maxobj = DNODES_PER_BLOCK *
461 (DMU_META_DNODE(dsp->dsa_os)->dn_maxblkid + 1);
462
463 /*
464 * ZoL < 0.7 does not handle large FREEOBJECTS records correctly,
465 * leading to zfs recv never completing. to avoid this issue, don't
466 * send FREEOBJECTS records for object IDs which cannot exist on the
467 * receiving side.
468 */
469 if (maxobj > 0) {
470 if (maxobj < firstobj)
471 return (0);
472
473 if (maxobj < firstobj + numobjs)
474 numobjs = maxobj - firstobj;
475 }
428870ff
BB
476
477 /*
478 * If there is a pending op, but it's not PENDING_FREEOBJECTS,
479 * push it out, since free block aggregation can only be done for
480 * blocks of the same type (i.e., DRR_FREE records can only be
481 * aggregated with other DRR_FREE records. DRR_FREEOBJECTS records
482 * can only be aggregated with other DRR_FREEOBJECTS records.
483 */
37abac6d
BP
484 if (dsp->dsa_pending_op != PENDING_NONE &&
485 dsp->dsa_pending_op != PENDING_FREEOBJECTS) {
37f8a883 486 if (dump_record(dsp, NULL, 0) != 0)
2e528b49 487 return (SET_ERROR(EINTR));
37abac6d 488 dsp->dsa_pending_op = PENDING_NONE;
428870ff 489 }
37abac6d 490 if (dsp->dsa_pending_op == PENDING_FREEOBJECTS) {
428870ff
BB
491 /*
492 * See whether this free object array can be aggregated
493 * with pending one
494 */
495 if (drrfo->drr_firstobj + drrfo->drr_numobjs == firstobj) {
496 drrfo->drr_numobjs += numobjs;
497 return (0);
498 } else {
499 /* can't be aggregated. Push out pending record */
37f8a883 500 if (dump_record(dsp, NULL, 0) != 0)
2e528b49 501 return (SET_ERROR(EINTR));
37abac6d 502 dsp->dsa_pending_op = PENDING_NONE;
428870ff
BB
503 }
504 }
505
34dc7c2f 506 /* write a FREEOBJECTS record */
37abac6d
BP
507 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
508 dsp->dsa_drr->drr_type = DRR_FREEOBJECTS;
428870ff
BB
509 drrfo->drr_firstobj = firstobj;
510 drrfo->drr_numobjs = numobjs;
37abac6d 511 drrfo->drr_toguid = dsp->dsa_toguid;
428870ff 512
37abac6d 513 dsp->dsa_pending_op = PENDING_FREEOBJECTS;
34dc7c2f 514
34dc7c2f
BB
515 return (0);
516}
517
518static int
b5256303
TC
519dump_dnode(dmu_sendarg_t *dsp, const blkptr_t *bp, uint64_t object,
520 dnode_phys_t *dnp)
34dc7c2f 521{
37abac6d 522 struct drr_object *drro = &(dsp->dsa_drr->drr_u.drr_object);
4807c0ba 523 int bonuslen;
428870ff 524
47dfff3b
MA
525 if (object < dsp->dsa_resume_object) {
526 /*
527 * Note: when resuming, we will visit all the dnodes in
528 * the block of dnodes that we are resuming from. In
529 * this case it's unnecessary to send the dnodes prior to
530 * the one we are resuming from. We should be at most one
531 * block's worth of dnodes behind the resume point.
532 */
533 ASSERT3U(dsp->dsa_resume_object - object, <,
534 1 << (DNODE_BLOCK_SHIFT - DNODE_SHIFT));
535 return (0);
536 }
537
34dc7c2f 538 if (dnp == NULL || dnp->dn_type == DMU_OT_NONE)
37abac6d 539 return (dump_freeobjects(dsp, object, 1));
34dc7c2f 540
37abac6d 541 if (dsp->dsa_pending_op != PENDING_NONE) {
37f8a883 542 if (dump_record(dsp, NULL, 0) != 0)
2e528b49 543 return (SET_ERROR(EINTR));
37abac6d 544 dsp->dsa_pending_op = PENDING_NONE;
428870ff
BB
545 }
546
34dc7c2f 547 /* write an OBJECT record */
37abac6d
BP
548 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
549 dsp->dsa_drr->drr_type = DRR_OBJECT;
428870ff
BB
550 drro->drr_object = object;
551 drro->drr_type = dnp->dn_type;
552 drro->drr_bonustype = dnp->dn_bonustype;
553 drro->drr_blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
554 drro->drr_bonuslen = dnp->dn_bonuslen;
50c957f7 555 drro->drr_dn_slots = dnp->dn_extra_slots + 1;
428870ff
BB
556 drro->drr_checksumtype = dnp->dn_checksum;
557 drro->drr_compress = dnp->dn_compress;
37abac6d 558 drro->drr_toguid = dsp->dsa_toguid;
428870ff 559
f1512ee6
MA
560 if (!(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
561 drro->drr_blksz > SPA_OLD_MAXBLOCKSIZE)
562 drro->drr_blksz = SPA_OLD_MAXBLOCKSIZE;
563
4807c0ba
TC
564 bonuslen = P2ROUNDUP(dnp->dn_bonuslen, 8);
565
9b840763
TC
566 if ((dsp->dsa_featureflags & DMU_BACKUP_FEATURE_RAW)) {
567 ASSERT(BP_IS_ENCRYPTED(bp));
568
b5256303
TC
569 if (BP_SHOULD_BYTESWAP(bp))
570 drro->drr_flags |= DRR_RAW_BYTESWAP;
571
572 /* needed for reconstructing dnp on recv side */
ae76f45c 573 drro->drr_maxblkid = dnp->dn_maxblkid;
b5256303
TC
574 drro->drr_indblkshift = dnp->dn_indblkshift;
575 drro->drr_nlevels = dnp->dn_nlevels;
576 drro->drr_nblkptr = dnp->dn_nblkptr;
577
578 /*
579 * Since we encrypt the entire bonus area, the (raw) part
4807c0ba 580 * beyond the bonuslen is actually nonzero, so we need
b5256303
TC
581 * to send it.
582 */
583 if (bonuslen != 0) {
584 drro->drr_raw_bonuslen = DN_MAX_BONUS_LEN(dnp);
585 bonuslen = drro->drr_raw_bonuslen;
586 }
37f8a883 587 }
34dc7c2f 588
b5256303
TC
589 if (dump_record(dsp, DN_BONUS(dnp), bonuslen) != 0)
590 return (SET_ERROR(EINTR));
591
ea97f8ce 592 /* Free anything past the end of the file. */
37abac6d 593 if (dump_free(dsp, object, (dnp->dn_maxblkid + 1) *
ee45fbd8 594 (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), DMU_OBJECT_END) != 0)
2e528b49 595 return (SET_ERROR(EINTR));
13fe0198 596 if (dsp->dsa_err != 0)
2e528b49 597 return (SET_ERROR(EINTR));
34dc7c2f
BB
598 return (0);
599}
600
b5256303
TC
601static int
602dump_object_range(dmu_sendarg_t *dsp, const blkptr_t *bp, uint64_t firstobj,
603 uint64_t numslots)
604{
605 struct drr_object_range *drror =
606 &(dsp->dsa_drr->drr_u.drr_object_range);
607
608 /* we only use this record type for raw sends */
609 ASSERT(BP_IS_PROTECTED(bp));
610 ASSERT(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_RAW);
611 ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
612 ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_DNODE);
613 ASSERT0(BP_GET_LEVEL(bp));
614
615 if (dsp->dsa_pending_op != PENDING_NONE) {
616 if (dump_record(dsp, NULL, 0) != 0)
617 return (SET_ERROR(EINTR));
618 dsp->dsa_pending_op = PENDING_NONE;
619 }
620
621 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
622 dsp->dsa_drr->drr_type = DRR_OBJECT_RANGE;
623 drror->drr_firstobj = firstobj;
624 drror->drr_numslots = numslots;
625 drror->drr_toguid = dsp->dsa_toguid;
b5256303
TC
626 if (BP_SHOULD_BYTESWAP(bp))
627 drror->drr_flags |= DRR_RAW_BYTESWAP;
628 zio_crypt_decode_params_bp(bp, drror->drr_salt, drror->drr_iv);
629 zio_crypt_decode_mac_bp(bp, drror->drr_mac);
630
631 if (dump_record(dsp, NULL, 0) != 0)
632 return (SET_ERROR(EINTR));
633 return (0);
634}
635
9b67f605
MA
636static boolean_t
637backup_do_embed(dmu_sendarg_t *dsp, const blkptr_t *bp)
638{
639 if (!BP_IS_EMBEDDED(bp))
640 return (B_FALSE);
641
642 /*
643 * Compression function must be legacy, or explicitly enabled.
644 */
645 if ((BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_LEGACY_FUNCTIONS &&
2aa34383 646 !(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_LZ4)))
9b67f605
MA
647 return (B_FALSE);
648
649 /*
650 * Embed type must be explicitly enabled.
651 */
652 switch (BPE_GET_ETYPE(bp)) {
653 case BP_EMBEDDED_TYPE_DATA:
654 if (dsp->dsa_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)
655 return (B_TRUE);
656 break;
657 default:
658 return (B_FALSE);
659 }
660 return (B_FALSE);
661}
662
fcff0f35
PD
663/*
664 * This is the callback function to traverse_dataset that acts as the worker
665 * thread for dmu_send_impl.
666 */
667/*ARGSUSED*/
668static int
669send_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
670 const zbookmark_phys_t *zb, const struct dnode_phys *dnp, void *arg)
671{
672 struct send_thread_arg *sta = arg;
673 struct send_block_record *record;
674 uint64_t record_size;
675 int err = 0;
676
47dfff3b
MA
677 ASSERT(zb->zb_object == DMU_META_DNODE_OBJECT ||
678 zb->zb_object >= sta->resume.zb_object);
b5256303 679 ASSERT3P(sta->ds, !=, NULL);
47dfff3b 680
fcff0f35
PD
681 if (sta->cancel)
682 return (SET_ERROR(EINTR));
34dc7c2f 683
fcff0f35
PD
684 if (bp == NULL) {
685 ASSERT3U(zb->zb_level, ==, ZB_DNODE_LEVEL);
686 return (0);
687 } else if (zb->zb_level < 0) {
688 return (0);
689 }
690
691 record = kmem_zalloc(sizeof (struct send_block_record), KM_SLEEP);
692 record->eos_marker = B_FALSE;
693 record->bp = *bp;
694 record->zb = *zb;
695 record->indblkshift = dnp->dn_indblkshift;
696 record->datablkszsec = dnp->dn_datablkszsec;
697 record_size = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
698 bqueue_enqueue(&sta->q, record, record_size);
699
700 return (err);
701}
702
703/*
704 * This function kicks off the traverse_dataset. It also handles setting the
705 * error code of the thread in case something goes wrong, and pushes the End of
706 * Stream record when the traverse_dataset call has finished. If there is no
707 * dataset to traverse, the thread immediately pushes End of Stream marker.
708 */
709static void
710send_traverse_thread(void *arg)
711{
712 struct send_thread_arg *st_arg = arg;
713 int err;
714 struct send_block_record *data;
3e635ac1 715 fstrans_cookie_t cookie = spl_fstrans_mark();
fcff0f35
PD
716
717 if (st_arg->ds != NULL) {
47dfff3b
MA
718 err = traverse_dataset_resume(st_arg->ds,
719 st_arg->fromtxg, &st_arg->resume,
720 st_arg->flags, send_cb, st_arg);
721
fcff0f35
PD
722 if (err != EINTR)
723 st_arg->error_code = err;
724 }
725 data = kmem_zalloc(sizeof (*data), KM_SLEEP);
726 data->eos_marker = B_TRUE;
727 bqueue_enqueue(&st_arg->q, data, 1);
3e635ac1 728 spl_fstrans_unmark(cookie);
34a6b428 729 thread_exit();
fcff0f35
PD
730}
731
732/*
733 * This function actually handles figuring out what kind of record needs to be
734 * dumped, reading the data (which has hopefully been prefetched), and calling
735 * the appropriate helper function.
736 */
34dc7c2f 737static int
fcff0f35 738do_dump(dmu_sendarg_t *dsa, struct send_block_record *data)
34dc7c2f 739{
fcff0f35
PD
740 dsl_dataset_t *ds = dmu_objset_ds(dsa->dsa_os);
741 const blkptr_t *bp = &data->bp;
742 const zbookmark_phys_t *zb = &data->zb;
743 uint8_t indblkshift = data->indblkshift;
744 uint16_t dblkszsec = data->datablkszsec;
745 spa_t *spa = ds->ds_dir->dd_pool->dp_spa;
34dc7c2f 746 dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE;
34dc7c2f
BB
747 int err = 0;
748
fcff0f35 749 ASSERT3U(zb->zb_level, >=, 0);
34dc7c2f 750
47dfff3b
MA
751 ASSERT(zb->zb_object == DMU_META_DNODE_OBJECT ||
752 zb->zb_object >= dsa->dsa_resume_object);
753
b5256303
TC
754 /*
755 * All bps of an encrypted os should have the encryption bit set.
756 * If this is not true it indicates tampering and we report an error.
757 */
758 if (dsa->dsa_os->os_encrypted &&
759 !BP_IS_HOLE(bp) && !BP_USES_CRYPT(bp)) {
760 spa_log_error(spa, zb);
761 zfs_panic_recover("unencrypted block in encrypted "
762 "object set %llu", ds->ds_object);
763 return (SET_ERROR(EIO));
764 }
765
428870ff
BB
766 if (zb->zb_object != DMU_META_DNODE_OBJECT &&
767 DMU_OBJECT_IS_SPECIAL(zb->zb_object)) {
9babb374 768 return (0);
b0bc7a84
MG
769 } else if (BP_IS_HOLE(bp) &&
770 zb->zb_object == DMU_META_DNODE_OBJECT) {
fcff0f35 771 uint64_t span = BP_SPAN(dblkszsec, indblkshift, zb->zb_level);
b128c09f 772 uint64_t dnobj = (zb->zb_blkid * span) >> DNODE_SHIFT;
fcff0f35 773 err = dump_freeobjects(dsa, dnobj, span >> DNODE_SHIFT);
b0bc7a84 774 } else if (BP_IS_HOLE(bp)) {
fcff0f35
PD
775 uint64_t span = BP_SPAN(dblkszsec, indblkshift, zb->zb_level);
776 uint64_t offset = zb->zb_blkid * span;
ee45fbd8 777 /* Don't dump free records for offsets > DMU_OBJECT_END */
778 if (zb->zb_blkid == 0 || span <= DMU_OBJECT_END / zb->zb_blkid)
779 err = dump_free(dsa, zb->zb_object, offset, span);
b128c09f
BB
780 } else if (zb->zb_level > 0 || type == DMU_OT_OBJSET) {
781 return (0);
782 } else if (type == DMU_OT_DNODE) {
50c957f7 783 int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
2a432414 784 arc_flags_t aflags = ARC_FLAG_WAIT;
b128c09f 785 arc_buf_t *abuf;
b5256303 786 enum zio_flag zioflags = ZIO_FLAG_CANFAIL;
fcff0f35 787
b5256303
TC
788 if (dsa->dsa_featureflags & DMU_BACKUP_FEATURE_RAW) {
789 ASSERT(BP_IS_ENCRYPTED(bp));
790 ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
791 zioflags |= ZIO_FLAG_RAW;
792 }
793
fcff0f35 794 ASSERT0(zb->zb_level);
b128c09f 795
294f6806 796 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
b5256303 797 ZIO_PRIORITY_ASYNC_READ, zioflags, &aflags, zb) != 0)
2e528b49 798 return (SET_ERROR(EIO));
34dc7c2f 799
1c27024e
DB
800 dnode_phys_t *blk = abuf->b_data;
801 uint64_t dnobj = zb->zb_blkid * epb;
b5256303
TC
802
803 /*
804 * Raw sends require sending encryption parameters for the
805 * block of dnodes. Regular sends do not need to send this
806 * info.
807 */
808 if (dsa->dsa_featureflags & DMU_BACKUP_FEATURE_RAW) {
809 ASSERT(arc_is_encrypted(abuf));
810 err = dump_object_range(dsa, bp, dnobj, epb);
811 }
812
813 if (err == 0) {
1c27024e
DB
814 for (int i = 0; i < epb;
815 i += blk[i].dn_extra_slots + 1) {
b5256303
TC
816 err = dump_dnode(dsa, bp, dnobj + i, blk + i);
817 if (err != 0)
818 break;
819 }
34dc7c2f 820 }
d3c2ae1c 821 arc_buf_destroy(abuf, &abuf);
428870ff 822 } else if (type == DMU_OT_SA) {
2a432414 823 arc_flags_t aflags = ARC_FLAG_WAIT;
b128c09f 824 arc_buf_t *abuf;
b5256303
TC
825 enum zio_flag zioflags = ZIO_FLAG_CANFAIL;
826
827 if (dsa->dsa_featureflags & DMU_BACKUP_FEATURE_RAW) {
828 ASSERT(BP_IS_PROTECTED(bp));
829 zioflags |= ZIO_FLAG_RAW;
830 }
b128c09f 831
294f6806 832 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
b5256303 833 ZIO_PRIORITY_ASYNC_READ, zioflags, &aflags, zb) != 0)
2e528b49 834 return (SET_ERROR(EIO));
b128c09f 835
b5256303 836 err = dump_spill(dsa, bp, zb->zb_object, abuf->b_data);
d3c2ae1c 837 arc_buf_destroy(abuf, &abuf);
fcff0f35 838 } else if (backup_do_embed(dsa, bp)) {
9b67f605 839 /* it's an embedded level-0 block of a regular object */
fcff0f35
PD
840 int blksz = dblkszsec << SPA_MINBLOCKSHIFT;
841 ASSERT0(zb->zb_level);
842 err = dump_write_embedded(dsa, zb->zb_object,
9b67f605 843 zb->zb_blkid * blksz, blksz, bp);
fcff0f35
PD
844 } else {
845 /* it's a level-0 block of a regular object */
2a432414 846 arc_flags_t aflags = ARC_FLAG_WAIT;
428870ff 847 arc_buf_t *abuf;
fcff0f35
PD
848 int blksz = dblkszsec << SPA_MINBLOCKSHIFT;
849 uint64_t offset;
2aa34383
DK
850
851 /*
852 * If we have large blocks stored on disk but the send flags
853 * don't allow us to send large blocks, we split the data from
854 * the arc buf into chunks.
855 */
a7004725 856 boolean_t split_large_blocks = blksz > SPA_OLD_MAXBLOCKSIZE &&
2aa34383 857 !(dsa->dsa_featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS);
b5256303
TC
858
859 /*
860 * Raw sends require that we always get raw data as it exists
861 * on disk, so we assert that we are not splitting blocks here.
862 */
863 boolean_t request_raw =
864 (dsa->dsa_featureflags & DMU_BACKUP_FEATURE_RAW) != 0;
865
2aa34383
DK
866 /*
867 * We should only request compressed data from the ARC if all
868 * the following are true:
869 * - stream compression was requested
870 * - we aren't splitting large blocks into smaller chunks
871 * - the data won't need to be byteswapped before sending
872 * - this isn't an embedded block
873 * - this isn't metadata (if receiving on a different endian
874 * system it can be byteswapped more easily)
875 */
876 boolean_t request_compressed =
877 (dsa->dsa_featureflags & DMU_BACKUP_FEATURE_COMPRESSED) &&
878 !split_large_blocks && !BP_SHOULD_BYTESWAP(bp) &&
879 !BP_IS_EMBEDDED(bp) && !DMU_OT_IS_METADATA(BP_GET_TYPE(bp));
428870ff 880
b5256303
TC
881 IMPLY(request_raw, !split_large_blocks);
882 IMPLY(request_raw, BP_IS_PROTECTED(bp));
da536844 883 ASSERT0(zb->zb_level);
47dfff3b
MA
884 ASSERT(zb->zb_object > dsa->dsa_resume_object ||
885 (zb->zb_object == dsa->dsa_resume_object &&
886 zb->zb_blkid * blksz >= dsa->dsa_resume_offset));
887
a7004725
DK
888 ASSERT3U(blksz, ==, BP_GET_LSIZE(bp));
889
890 enum zio_flag zioflags = ZIO_FLAG_CANFAIL;
b5256303 891 if (request_raw)
2aa34383 892 zioflags |= ZIO_FLAG_RAW;
b5256303
TC
893 else if (request_compressed)
894 zioflags |= ZIO_FLAG_RAW_COMPRESS;
2aa34383 895
294f6806 896 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
a7004725 897 ZIO_PRIORITY_ASYNC_READ, zioflags, &aflags, zb) != 0) {
330d06f9 898 if (zfs_send_corrupt_data) {
330d06f9 899 /* Send a block filled with 0x"zfs badd bloc" */
2aa34383
DK
900 abuf = arc_alloc_buf(spa, &abuf, ARC_BUFC_DATA,
901 blksz);
a7004725 902 uint64_t *ptr;
330d06f9
MA
903 for (ptr = abuf->b_data;
904 (char *)ptr < (char *)abuf->b_data + blksz;
905 ptr++)
dd26aa53 906 *ptr = 0x2f5baddb10cULL;
330d06f9 907 } else {
2e528b49 908 return (SET_ERROR(EIO));
330d06f9
MA
909 }
910 }
428870ff 911
f1512ee6
MA
912 offset = zb->zb_blkid * blksz;
913
2aa34383 914 if (split_large_blocks) {
b5256303 915 ASSERT0(arc_is_encrypted(abuf));
2aa34383
DK
916 ASSERT3U(arc_get_compression(abuf), ==,
917 ZIO_COMPRESS_OFF);
a7004725 918 char *buf = abuf->b_data;
f1512ee6
MA
919 while (blksz > 0 && err == 0) {
920 int n = MIN(blksz, SPA_OLD_MAXBLOCKSIZE);
fcff0f35 921 err = dump_write(dsa, type, zb->zb_object,
2aa34383 922 offset, n, n, NULL, buf);
f1512ee6
MA
923 offset += n;
924 buf += n;
925 blksz -= n;
926 }
927 } else {
2aa34383 928 err = dump_write(dsa, type, zb->zb_object, offset,
b5256303 929 blksz, arc_buf_size(abuf), bp, abuf->b_data);
f1512ee6 930 }
d3c2ae1c 931 arc_buf_destroy(abuf, &abuf);
34dc7c2f
BB
932 }
933
934 ASSERT(err == 0 || err == EINTR);
935 return (err);
936}
937
6f1ffb06 938/*
fcff0f35
PD
939 * Pop the new data off the queue, and free the old data.
940 */
941static struct send_block_record *
942get_next_record(bqueue_t *bq, struct send_block_record *data)
943{
944 struct send_block_record *tmp = bqueue_dequeue(bq);
945 kmem_free(data, sizeof (*data));
946 return (tmp);
947}
948
949/*
950 * Actually do the bulk of the work in a zfs send.
951 *
952 * Note: Releases dp using the specified tag.
6f1ffb06 953 */
13fe0198 954static int
fcff0f35 955dmu_send_impl(void *tag, dsl_pool_t *dp, dsl_dataset_t *to_ds,
2aa34383
DK
956 zfs_bookmark_phys_t *ancestor_zb, boolean_t is_clone,
957 boolean_t embedok, boolean_t large_block_ok, boolean_t compressok,
b5256303 958 boolean_t rawok, int outfd, uint64_t resumeobj, uint64_t resumeoff,
47dfff3b 959 vnode_t *vp, offset_t *off)
34dc7c2f 960{
13fe0198 961 objset_t *os;
34dc7c2f 962 dmu_replay_record_t *drr;
37abac6d 963 dmu_sendarg_t *dsp;
34dc7c2f
BB
964 int err;
965 uint64_t fromtxg = 0;
9b67f605 966 uint64_t featureflags = 0;
fcff0f35 967 struct send_thread_arg to_arg;
47dfff3b
MA
968 void *payload = NULL;
969 size_t payload_len = 0;
fcff0f35 970 struct send_block_record *to_data;
34dc7c2f 971
fcff0f35 972 err = dmu_objset_from_ds(to_ds, &os);
13fe0198 973 if (err != 0) {
13fe0198
MA
974 dsl_pool_rele(dp, tag);
975 return (err);
976 }
34dc7c2f 977
b5256303
TC
978 /*
979 * If this is a non-raw send of an encrypted ds, we can ensure that
980 * the objset_phys_t is authenticated. This is safe because this is
981 * either a snapshot or we have owned the dataset, ensuring that
982 * it can't be modified.
983 */
984 if (!rawok && os->os_encrypted &&
985 arc_is_unauthenticated(os->os_phys_buf)) {
986 err = arc_untransform(os->os_phys_buf, os->os_spa,
987 to_ds->ds_object, B_FALSE);
988 if (err != 0) {
989 dsl_pool_rele(dp, tag);
990 return (err);
991 }
992
993 ASSERT0(arc_is_unauthenticated(os->os_phys_buf));
994 }
995
34dc7c2f
BB
996 drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
997 drr->drr_type = DRR_BEGIN;
998 drr->drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC;
428870ff
BB
999 DMU_SET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo,
1000 DMU_SUBSTREAM);
1001
47dfff3b
MA
1002 bzero(&to_arg, sizeof (to_arg));
1003
428870ff 1004#ifdef _KERNEL
13fe0198 1005 if (dmu_objset_type(os) == DMU_OST_ZFS) {
428870ff 1006 uint64_t version;
13fe0198 1007 if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &version) != 0) {
37abac6d 1008 kmem_free(drr, sizeof (dmu_replay_record_t));
13fe0198 1009 dsl_pool_rele(dp, tag);
2e528b49 1010 return (SET_ERROR(EINVAL));
37abac6d 1011 }
13fe0198 1012 if (version >= ZPL_VERSION_SA) {
9b67f605 1013 featureflags |= DMU_BACKUP_FEATURE_SA_SPILL;
428870ff
BB
1014 }
1015 }
1016#endif
1017
b5256303
TC
1018 /* raw sends imply large_block_ok */
1019 if ((large_block_ok || rawok) &&
1020 to_ds->ds_feature_inuse[SPA_FEATURE_LARGE_BLOCKS])
f1512ee6 1021 featureflags |= DMU_BACKUP_FEATURE_LARGE_BLOCKS;
50c957f7
NB
1022 if (to_ds->ds_feature_inuse[SPA_FEATURE_LARGE_DNODE])
1023 featureflags |= DMU_BACKUP_FEATURE_LARGE_DNODE;
b5256303
TC
1024
1025 /* encrypted datasets will not have embedded blocks */
1026 if ((embedok || rawok) && !os->os_encrypted &&
9b67f605
MA
1027 spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) {
1028 featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA;
2aa34383 1029 }
b5256303
TC
1030
1031 /* raw send implies compressok */
1032 if (compressok || rawok)
2aa34383 1033 featureflags |= DMU_BACKUP_FEATURE_COMPRESSED;
b5256303
TC
1034 if (rawok && os->os_encrypted)
1035 featureflags |= DMU_BACKUP_FEATURE_RAW;
1036
2aa34383 1037 if ((featureflags &
b5256303
TC
1038 (DMU_BACKUP_FEATURE_EMBED_DATA | DMU_BACKUP_FEATURE_COMPRESSED |
1039 DMU_BACKUP_FEATURE_RAW)) != 0 &&
1040 spa_feature_is_active(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS)) {
2aa34383 1041 featureflags |= DMU_BACKUP_FEATURE_LZ4;
9b67f605
MA
1042 }
1043
47dfff3b
MA
1044 if (resumeobj != 0 || resumeoff != 0) {
1045 featureflags |= DMU_BACKUP_FEATURE_RESUMING;
1046 }
1047
9b67f605
MA
1048 DMU_SET_FEATUREFLAGS(drr->drr_u.drr_begin.drr_versioninfo,
1049 featureflags);
1050
34dc7c2f 1051 drr->drr_u.drr_begin.drr_creation_time =
fcff0f35 1052 dsl_dataset_phys(to_ds)->ds_creation_time;
13fe0198 1053 drr->drr_u.drr_begin.drr_type = dmu_objset_type(os);
da536844 1054 if (is_clone)
34dc7c2f 1055 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CLONE;
fcff0f35
PD
1056 drr->drr_u.drr_begin.drr_toguid = dsl_dataset_phys(to_ds)->ds_guid;
1057 if (dsl_dataset_phys(to_ds)->ds_flags & DS_FLAG_CI_DATASET)
34dc7c2f 1058 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CI_DATA;
b607405f
AS
1059 if (zfs_send_set_freerecords_bit)
1060 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_FREERECORDS;
34dc7c2f 1061
fcff0f35
PD
1062 if (ancestor_zb != NULL) {
1063 drr->drr_u.drr_begin.drr_fromguid =
1064 ancestor_zb->zbm_guid;
1065 fromtxg = ancestor_zb->zbm_creation_txg;
da536844 1066 }
fcff0f35
PD
1067 dsl_dataset_name(to_ds, drr->drr_u.drr_begin.drr_toname);
1068 if (!to_ds->ds_is_snapshot) {
da536844
MA
1069 (void) strlcat(drr->drr_u.drr_begin.drr_toname, "@--head--",
1070 sizeof (drr->drr_u.drr_begin.drr_toname));
13fe0198 1071 }
34dc7c2f 1072
37abac6d
BP
1073 dsp = kmem_zalloc(sizeof (dmu_sendarg_t), KM_SLEEP);
1074
1075 dsp->dsa_drr = drr;
1076 dsp->dsa_vp = vp;
1077 dsp->dsa_outfd = outfd;
1078 dsp->dsa_proc = curproc;
13fe0198 1079 dsp->dsa_os = os;
37abac6d 1080 dsp->dsa_off = off;
fcff0f35 1081 dsp->dsa_toguid = dsl_dataset_phys(to_ds)->ds_guid;
37abac6d 1082 dsp->dsa_pending_op = PENDING_NONE;
9b67f605 1083 dsp->dsa_featureflags = featureflags;
47dfff3b
MA
1084 dsp->dsa_resume_object = resumeobj;
1085 dsp->dsa_resume_offset = resumeoff;
37abac6d 1086
fcff0f35
PD
1087 mutex_enter(&to_ds->ds_sendstream_lock);
1088 list_insert_head(&to_ds->ds_sendstreams, dsp);
1089 mutex_exit(&to_ds->ds_sendstream_lock);
37abac6d 1090
fcff0f35 1091 dsl_dataset_long_hold(to_ds, FTAG);
7ec09286
MA
1092 dsl_pool_rele(dp, tag);
1093
b5256303
TC
1094 /* handle features that require a DRR_BEGIN payload */
1095 if (featureflags &
1096 (DMU_BACKUP_FEATURE_RESUMING | DMU_BACKUP_FEATURE_RAW)) {
1097 nvlist_t *keynvl = NULL;
1098 nvlist_t *nvl = fnvlist_alloc();
1099
1100 if (featureflags & DMU_BACKUP_FEATURE_RESUMING) {
1101 dmu_object_info_t to_doi;
1102 err = dmu_object_info(os, resumeobj, &to_doi);
1103 if (err != 0) {
1104 fnvlist_free(nvl);
1105 goto out;
1106 }
1107
1108 SET_BOOKMARK(&to_arg.resume, to_ds->ds_object,
1109 resumeobj, 0,
1110 resumeoff / to_doi.doi_data_block_size);
1111
1112 fnvlist_add_uint64(nvl, "resume_object", resumeobj);
1113 fnvlist_add_uint64(nvl, "resume_offset", resumeoff);
1114 }
1115
1116 if (featureflags & DMU_BACKUP_FEATURE_RAW) {
1117 ASSERT(os->os_encrypted);
1118
1119 err = dsl_crypto_populate_key_nvlist(to_ds, &keynvl);
1120 if (err != 0) {
1121 fnvlist_free(nvl);
1122 goto out;
1123 }
1124
1125 fnvlist_add_nvlist(nvl, "crypt_keydata", keynvl);
1126 }
47dfff3b 1127
47dfff3b
MA
1128 payload = fnvlist_pack(nvl, &payload_len);
1129 drr->drr_payloadlen = payload_len;
b5256303 1130 fnvlist_free(keynvl);
47dfff3b
MA
1131 fnvlist_free(nvl);
1132 }
1133
1134 err = dump_record(dsp, payload, payload_len);
1135 fnvlist_pack_free(payload, payload_len);
1136 if (err != 0) {
37abac6d
BP
1137 err = dsp->dsa_err;
1138 goto out;
34dc7c2f
BB
1139 }
1140
fcff0f35
PD
1141 err = bqueue_init(&to_arg.q, zfs_send_queue_length,
1142 offsetof(struct send_block_record, ln));
1143 to_arg.error_code = 0;
1144 to_arg.cancel = B_FALSE;
1145 to_arg.ds = to_ds;
1146 to_arg.fromtxg = fromtxg;
1147 to_arg.flags = TRAVERSE_PRE | TRAVERSE_PREFETCH;
b5256303
TC
1148 if (rawok)
1149 to_arg.flags |= TRAVERSE_NO_DECRYPT;
fcff0f35
PD
1150 (void) thread_create(NULL, 0, send_traverse_thread, &to_arg, 0, curproc,
1151 TS_RUN, minclsyspri);
1152
1153 to_data = bqueue_dequeue(&to_arg.q);
1154
1155 while (!to_data->eos_marker && err == 0) {
1156 err = do_dump(dsp, to_data);
1157 to_data = get_next_record(&to_arg.q, to_data);
1158 if (issig(JUSTLOOKING) && issig(FORREAL))
1159 err = EINTR;
1160 }
1161
1162 if (err != 0) {
1163 to_arg.cancel = B_TRUE;
1164 while (!to_data->eos_marker) {
1165 to_data = get_next_record(&to_arg.q, to_data);
1166 }
1167 }
1168 kmem_free(to_data, sizeof (*to_data));
1169
1170 bqueue_destroy(&to_arg.q);
1171
1172 if (err == 0 && to_arg.error_code != 0)
1173 err = to_arg.error_code;
1174
1175 if (err != 0)
1176 goto out;
34dc7c2f 1177
37abac6d 1178 if (dsp->dsa_pending_op != PENDING_NONE)
37f8a883 1179 if (dump_record(dsp, NULL, 0) != 0)
2e528b49 1180 err = SET_ERROR(EINTR);
428870ff 1181
13fe0198
MA
1182 if (err != 0) {
1183 if (err == EINTR && dsp->dsa_err != 0)
37abac6d
BP
1184 err = dsp->dsa_err;
1185 goto out;
34dc7c2f
BB
1186 }
1187
1188 bzero(drr, sizeof (dmu_replay_record_t));
1189 drr->drr_type = DRR_END;
37abac6d
BP
1190 drr->drr_u.drr_end.drr_checksum = dsp->dsa_zc;
1191 drr->drr_u.drr_end.drr_toguid = dsp->dsa_toguid;
34dc7c2f 1192
fcff0f35 1193 if (dump_record(dsp, NULL, 0) != 0)
37abac6d 1194 err = dsp->dsa_err;
37abac6d 1195out:
fcff0f35
PD
1196 mutex_enter(&to_ds->ds_sendstream_lock);
1197 list_remove(&to_ds->ds_sendstreams, dsp);
1198 mutex_exit(&to_ds->ds_sendstream_lock);
37abac6d 1199
51907a31
K
1200 VERIFY(err != 0 || (dsp->dsa_sent_begin && dsp->dsa_sent_end));
1201
34dc7c2f 1202 kmem_free(drr, sizeof (dmu_replay_record_t));
37abac6d 1203 kmem_free(dsp, sizeof (dmu_sendarg_t));
34dc7c2f 1204
fcff0f35 1205 dsl_dataset_long_rele(to_ds, FTAG);
13fe0198 1206
37abac6d 1207 return (err);
34dc7c2f
BB
1208}
1209
330d06f9 1210int
13fe0198 1211dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap,
2aa34383 1212 boolean_t embedok, boolean_t large_block_ok, boolean_t compressok,
b5256303 1213 boolean_t rawok, int outfd, vnode_t *vp, offset_t *off)
13fe0198
MA
1214{
1215 dsl_pool_t *dp;
1216 dsl_dataset_t *ds;
1217 dsl_dataset_t *fromds = NULL;
b5256303 1218 ds_hold_flags_t dsflags = (rawok) ? 0 : DS_HOLD_FLAG_DECRYPT;
13fe0198
MA
1219 int err;
1220
1221 err = dsl_pool_hold(pool, FTAG, &dp);
1222 if (err != 0)
1223 return (err);
1224
b5256303 1225 err = dsl_dataset_hold_obj_flags(dp, tosnap, dsflags, FTAG, &ds);
13fe0198
MA
1226 if (err != 0) {
1227 dsl_pool_rele(dp, FTAG);
1228 return (err);
1229 }
1230
1231 if (fromsnap != 0) {
da536844
MA
1232 zfs_bookmark_phys_t zb;
1233 boolean_t is_clone;
1234
13fe0198
MA
1235 err = dsl_dataset_hold_obj(dp, fromsnap, FTAG, &fromds);
1236 if (err != 0) {
b5256303 1237 dsl_dataset_rele_flags(ds, dsflags, FTAG);
13fe0198
MA
1238 dsl_pool_rele(dp, FTAG);
1239 return (err);
1240 }
da536844
MA
1241 if (!dsl_dataset_is_before(ds, fromds, 0))
1242 err = SET_ERROR(EXDEV);
d683ddbb
JG
1243 zb.zbm_creation_time =
1244 dsl_dataset_phys(fromds)->ds_creation_time;
1245 zb.zbm_creation_txg = dsl_dataset_phys(fromds)->ds_creation_txg;
1246 zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid;
da536844
MA
1247 is_clone = (fromds->ds_dir != ds->ds_dir);
1248 dsl_dataset_rele(fromds, FTAG);
f1512ee6 1249 err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone,
b5256303
TC
1250 embedok, large_block_ok, compressok, rawok, outfd,
1251 0, 0, vp, off);
da536844 1252 } else {
f1512ee6 1253 err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE,
b5256303
TC
1254 embedok, large_block_ok, compressok, rawok, outfd,
1255 0, 0, vp, off);
13fe0198 1256 }
b5256303 1257 dsl_dataset_rele_flags(ds, dsflags, FTAG);
da536844 1258 return (err);
13fe0198
MA
1259}
1260
1261int
47dfff3b 1262dmu_send(const char *tosnap, const char *fromsnap, boolean_t embedok,
b5256303
TC
1263 boolean_t large_block_ok, boolean_t compressok, boolean_t rawok,
1264 int outfd, uint64_t resumeobj, uint64_t resumeoff, vnode_t *vp,
1265 offset_t *off)
13fe0198
MA
1266{
1267 dsl_pool_t *dp;
1268 dsl_dataset_t *ds;
13fe0198 1269 int err;
b5256303 1270 ds_hold_flags_t dsflags = (rawok) ? 0 : DS_HOLD_FLAG_DECRYPT;
da536844 1271 boolean_t owned = B_FALSE;
13fe0198 1272
da536844 1273 if (fromsnap != NULL && strpbrk(fromsnap, "@#") == NULL)
2e528b49 1274 return (SET_ERROR(EINVAL));
13fe0198
MA
1275
1276 err = dsl_pool_hold(tosnap, FTAG, &dp);
1277 if (err != 0)
1278 return (err);
1279
da536844
MA
1280 if (strchr(tosnap, '@') == NULL && spa_writeable(dp->dp_spa)) {
1281 /*
1282 * We are sending a filesystem or volume. Ensure
1283 * that it doesn't change by owning the dataset.
1284 */
b5256303 1285 err = dsl_dataset_own(dp, tosnap, dsflags, FTAG, &ds);
da536844
MA
1286 owned = B_TRUE;
1287 } else {
b5256303 1288 err = dsl_dataset_hold_flags(dp, tosnap, dsflags, FTAG, &ds);
da536844 1289 }
13fe0198
MA
1290 if (err != 0) {
1291 dsl_pool_rele(dp, FTAG);
1292 return (err);
1293 }
1294
1295 if (fromsnap != NULL) {
da536844
MA
1296 zfs_bookmark_phys_t zb;
1297 boolean_t is_clone = B_FALSE;
1298 int fsnamelen = strchr(tosnap, '@') - tosnap;
1299
1300 /*
1301 * If the fromsnap is in a different filesystem, then
1302 * mark the send stream as a clone.
1303 */
1304 if (strncmp(tosnap, fromsnap, fsnamelen) != 0 ||
1305 (fromsnap[fsnamelen] != '@' &&
1306 fromsnap[fsnamelen] != '#')) {
1307 is_clone = B_TRUE;
1308 }
1309
1310 if (strchr(fromsnap, '@')) {
1311 dsl_dataset_t *fromds;
1312 err = dsl_dataset_hold(dp, fromsnap, FTAG, &fromds);
1313 if (err == 0) {
1314 if (!dsl_dataset_is_before(ds, fromds, 0))
1315 err = SET_ERROR(EXDEV);
1316 zb.zbm_creation_time =
d683ddbb 1317 dsl_dataset_phys(fromds)->ds_creation_time;
da536844 1318 zb.zbm_creation_txg =
d683ddbb
JG
1319 dsl_dataset_phys(fromds)->ds_creation_txg;
1320 zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid;
da536844
MA
1321 is_clone = (ds->ds_dir != fromds->ds_dir);
1322 dsl_dataset_rele(fromds, FTAG);
1323 }
1324 } else {
1325 err = dsl_bookmark_lookup(dp, fromsnap, ds, &zb);
1326 }
13fe0198 1327 if (err != 0) {
b5256303
TC
1328 if (owned)
1329 dsl_dataset_disown(ds, dsflags, FTAG);
1330 else
1331 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1332
13fe0198
MA
1333 dsl_pool_rele(dp, FTAG);
1334 return (err);
1335 }
f1512ee6 1336 err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone,
b5256303 1337 embedok, large_block_ok, compressok, rawok,
47dfff3b 1338 outfd, resumeobj, resumeoff, vp, off);
da536844 1339 } else {
f1512ee6 1340 err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE,
b5256303 1341 embedok, large_block_ok, compressok, rawok,
47dfff3b 1342 outfd, resumeobj, resumeoff, vp, off);
13fe0198 1343 }
da536844 1344 if (owned)
b5256303 1345 dsl_dataset_disown(ds, dsflags, FTAG);
da536844 1346 else
b5256303
TC
1347 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1348
da536844 1349 return (err);
13fe0198
MA
1350}
1351
5dc8b736 1352static int
2aa34383
DK
1353dmu_adjust_send_estimate_for_indirects(dsl_dataset_t *ds, uint64_t uncompressed,
1354 uint64_t compressed, boolean_t stream_compressed, uint64_t *sizep)
5dc8b736
MG
1355{
1356 int err;
2aa34383 1357 uint64_t size;
5dc8b736
MG
1358 /*
1359 * Assume that space (both on-disk and in-stream) is dominated by
1360 * data. We will adjust for indirect blocks and the copies property,
1361 * but ignore per-object space used (eg, dnodes and DRR_OBJECT records).
1362 */
1363
2aa34383
DK
1364 uint64_t recordsize;
1365 uint64_t record_count;
dd429b46
PD
1366 objset_t *os;
1367 VERIFY0(dmu_objset_from_ds(ds, &os));
2aa34383
DK
1368
1369 /* Assume all (uncompressed) blocks are recordsize. */
dd429b46
PD
1370 if (os->os_phys->os_type == DMU_OST_ZVOL) {
1371 err = dsl_prop_get_int_ds(ds,
1372 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &recordsize);
1373 } else {
1374 err = dsl_prop_get_int_ds(ds,
1375 zfs_prop_to_name(ZFS_PROP_RECORDSIZE), &recordsize);
1376 }
2aa34383
DK
1377 if (err != 0)
1378 return (err);
1379 record_count = uncompressed / recordsize;
1380
1381 /*
1382 * If we're estimating a send size for a compressed stream, use the
1383 * compressed data size to estimate the stream size. Otherwise, use the
1384 * uncompressed data size.
1385 */
1386 size = stream_compressed ? compressed : uncompressed;
1387
5dc8b736
MG
1388 /*
1389 * Subtract out approximate space used by indirect blocks.
1390 * Assume most space is used by data blocks (non-indirect, non-dnode).
2aa34383 1391 * Assume no ditto blocks or internal fragmentation.
5dc8b736
MG
1392 *
1393 * Therefore, space used by indirect blocks is sizeof(blkptr_t) per
2aa34383 1394 * block.
5dc8b736 1395 */
2aa34383 1396 size -= record_count * sizeof (blkptr_t);
5dc8b736
MG
1397
1398 /* Add in the space for the record associated with each block. */
2aa34383 1399 size += record_count * sizeof (dmu_replay_record_t);
5dc8b736
MG
1400
1401 *sizep = size;
1402
1403 return (0);
1404}
1405
13fe0198 1406int
2aa34383
DK
1407dmu_send_estimate(dsl_dataset_t *ds, dsl_dataset_t *fromds,
1408 boolean_t stream_compressed, uint64_t *sizep)
330d06f9 1409{
330d06f9 1410 int err;
2aa34383 1411 uint64_t uncomp, comp;
13fe0198 1412
fd0fd646 1413 ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool));
330d06f9
MA
1414
1415 /* tosnap must be a snapshot */
0c66c32d 1416 if (!ds->ds_is_snapshot)
2e528b49 1417 return (SET_ERROR(EINVAL));
330d06f9 1418
71e2fe41
AG
1419 /* fromsnap, if provided, must be a snapshot */
1420 if (fromds != NULL && !fromds->ds_is_snapshot)
1421 return (SET_ERROR(EINVAL));
1422
6f1ffb06
MA
1423 /*
1424 * fromsnap must be an earlier snapshot from the same fs as tosnap,
1425 * or the origin's fs.
1426 */
da536844 1427 if (fromds != NULL && !dsl_dataset_is_before(ds, fromds, 0))
2e528b49 1428 return (SET_ERROR(EXDEV));
330d06f9 1429
2aa34383 1430 /* Get compressed and uncompressed size estimates of changed data. */
330d06f9 1431 if (fromds == NULL) {
2aa34383
DK
1432 uncomp = dsl_dataset_phys(ds)->ds_uncompressed_bytes;
1433 comp = dsl_dataset_phys(ds)->ds_compressed_bytes;
330d06f9 1434 } else {
2aa34383 1435 uint64_t used;
330d06f9 1436 err = dsl_dataset_space_written(fromds, ds,
2aa34383 1437 &used, &comp, &uncomp);
13fe0198 1438 if (err != 0)
330d06f9
MA
1439 return (err);
1440 }
1441
2aa34383
DK
1442 err = dmu_adjust_send_estimate_for_indirects(ds, uncomp, comp,
1443 stream_compressed, sizep);
dd429b46
PD
1444 /*
1445 * Add the size of the BEGIN and END records to the estimate.
1446 */
1447 *sizep += 2 * sizeof (dmu_replay_record_t);
5dc8b736
MG
1448 return (err);
1449}
330d06f9 1450
2aa34383
DK
1451struct calculate_send_arg {
1452 uint64_t uncompressed;
1453 uint64_t compressed;
1454};
1455
5dc8b736
MG
1456/*
1457 * Simple callback used to traverse the blocks of a snapshot and sum their
2aa34383 1458 * uncompressed and compressed sizes.
5dc8b736
MG
1459 */
1460/* ARGSUSED */
1461static int
1462dmu_calculate_send_traversal(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
1463 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
1464{
2aa34383 1465 struct calculate_send_arg *space = arg;
5dc8b736 1466 if (bp != NULL && !BP_IS_HOLE(bp)) {
2aa34383
DK
1467 space->uncompressed += BP_GET_UCSIZE(bp);
1468 space->compressed += BP_GET_PSIZE(bp);
5dc8b736
MG
1469 }
1470 return (0);
1471}
1472
1473/*
1474 * Given a desination snapshot and a TXG, calculate the approximate size of a
1475 * send stream sent from that TXG. from_txg may be zero, indicating that the
1476 * whole snapshot will be sent.
1477 */
1478int
1479dmu_send_estimate_from_txg(dsl_dataset_t *ds, uint64_t from_txg,
2aa34383 1480 boolean_t stream_compressed, uint64_t *sizep)
5dc8b736 1481{
5dc8b736 1482 int err;
2aa34383 1483 struct calculate_send_arg size = { 0 };
5dc8b736 1484
fd0fd646 1485 ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool));
5dc8b736
MG
1486
1487 /* tosnap must be a snapshot */
1488 if (!dsl_dataset_is_snapshot(ds))
1489 return (SET_ERROR(EINVAL));
1490
1491 /* verify that from_txg is before the provided snapshot was taken */
1492 if (from_txg >= dsl_dataset_phys(ds)->ds_creation_txg) {
1493 return (SET_ERROR(EXDEV));
1494 }
330d06f9 1495 /*
5dc8b736
MG
1496 * traverse the blocks of the snapshot with birth times after
1497 * from_txg, summing their uncompressed size
330d06f9 1498 */
b5256303
TC
1499 err = traverse_dataset(ds, from_txg,
1500 TRAVERSE_POST | TRAVERSE_NO_DECRYPT,
5dc8b736 1501 dmu_calculate_send_traversal, &size);
2aa34383 1502
5dc8b736 1503 if (err)
330d06f9 1504 return (err);
330d06f9 1505
2aa34383
DK
1506 err = dmu_adjust_send_estimate_for_indirects(ds, size.uncompressed,
1507 size.compressed, stream_compressed, sizep);
5dc8b736 1508 return (err);
330d06f9
MA
1509}
1510
13fe0198
MA
1511typedef struct dmu_recv_begin_arg {
1512 const char *drba_origin;
1513 dmu_recv_cookie_t *drba_cookie;
1514 cred_t *drba_cred;
19580676 1515 uint64_t drba_snapobj;
13fe0198 1516} dmu_recv_begin_arg_t;
34dc7c2f 1517
34dc7c2f 1518static int
13fe0198
MA
1519recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds,
1520 uint64_t fromguid)
34dc7c2f 1521{
34dc7c2f 1522 uint64_t val;
13fe0198
MA
1523 int error;
1524 dsl_pool_t *dp = ds->ds_dir->dd_pool;
34dc7c2f 1525
13fe0198
MA
1526 /* temporary clone name must not exist */
1527 error = zap_lookup(dp->dp_meta_objset,
d683ddbb 1528 dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, recv_clone_name,
13fe0198
MA
1529 8, 1, &val);
1530 if (error != ENOENT)
1531 return (error == 0 ? EBUSY : error);
1532
572e2857 1533 /* new snapshot name must not exist */
13fe0198 1534 error = zap_lookup(dp->dp_meta_objset,
d683ddbb
JG
1535 dsl_dataset_phys(ds)->ds_snapnames_zapobj,
1536 drba->drba_cookie->drc_tosnap, 8, 1, &val);
13fe0198
MA
1537 if (error != ENOENT)
1538 return (error == 0 ? EEXIST : error);
572e2857 1539
788eb90c
JJ
1540 /*
1541 * Check snapshot limit before receiving. We'll recheck again at the
1542 * end, but might as well abort before receiving if we're already over
1543 * the limit.
1544 *
1545 * Note that we do not check the file system limit with
1546 * dsl_dir_fscount_check because the temporary %clones don't count
1547 * against that limit.
1548 */
1549 error = dsl_fs_ss_limit_check(ds->ds_dir, 1, ZFS_PROP_SNAPSHOT_LIMIT,
1550 NULL, drba->drba_cred);
1551 if (error != 0)
1552 return (error);
1553
13fe0198 1554 if (fromguid != 0) {
19580676 1555 dsl_dataset_t *snap;
d683ddbb 1556 uint64_t obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
19580676
MA
1557
1558 /* Find snapshot in this dir that matches fromguid. */
1559 while (obj != 0) {
1560 error = dsl_dataset_hold_obj(dp, obj, FTAG,
1561 &snap);
1562 if (error != 0)
1563 return (SET_ERROR(ENODEV));
1564 if (snap->ds_dir != ds->ds_dir) {
1565 dsl_dataset_rele(snap, FTAG);
1566 return (SET_ERROR(ENODEV));
1567 }
d683ddbb 1568 if (dsl_dataset_phys(snap)->ds_guid == fromguid)
19580676 1569 break;
d683ddbb 1570 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
19580676
MA
1571 dsl_dataset_rele(snap, FTAG);
1572 }
1573 if (obj == 0)
2e528b49 1574 return (SET_ERROR(ENODEV));
34dc7c2f 1575
19580676
MA
1576 if (drba->drba_cookie->drc_force) {
1577 drba->drba_snapobj = obj;
1578 } else {
1579 /*
1580 * If we are not forcing, there must be no
1581 * changes since fromsnap.
1582 */
1583 if (dsl_dataset_modified_since_snap(ds, snap)) {
428870ff 1584 dsl_dataset_rele(snap, FTAG);
19580676 1585 return (SET_ERROR(ETXTBSY));
428870ff 1586 }
19580676 1587 drba->drba_snapobj = ds->ds_prev->ds_object;
428870ff 1588 }
19580676
MA
1589
1590 dsl_dataset_rele(snap, FTAG);
428870ff 1591 } else {
cf50a2b0
AG
1592 /* if full, then must be forced */
1593 if (!drba->drba_cookie->drc_force)
1594 return (SET_ERROR(EEXIST));
b5256303
TC
1595
1596 /*
1597 * We don't support using zfs recv -F to blow away
1598 * encrypted filesystems. This would require the
1599 * dsl dir to point to the old encryption key and
1600 * the new one at the same time during the receive.
1601 */
1602 if (ds->ds_dir->dd_crypto_obj != 0)
1603 return (SET_ERROR(EINVAL));
1604
1605 drba->drba_snapobj = 0;
428870ff 1606 }
34dc7c2f 1607
34dc7c2f 1608 return (0);
13fe0198
MA
1609
1610}
1611
1612static int
1613dmu_recv_begin_check(void *arg, dmu_tx_t *tx)
1614{
1615 dmu_recv_begin_arg_t *drba = arg;
1616 dsl_pool_t *dp = dmu_tx_pool(tx);
1617 struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
1618 uint64_t fromguid = drrb->drr_fromguid;
1619 int flags = drrb->drr_flags;
b5256303 1620 ds_hold_flags_t dsflags = 0;
13fe0198 1621 int error;
9b67f605 1622 uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
13fe0198
MA
1623 dsl_dataset_t *ds;
1624 const char *tofs = drba->drba_cookie->drc_tofs;
1625
1626 /* already checked */
1627 ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
47dfff3b 1628 ASSERT(!(featureflags & DMU_BACKUP_FEATURE_RESUMING));
13fe0198
MA
1629
1630 if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
1631 DMU_COMPOUNDSTREAM ||
1632 drrb->drr_type >= DMU_OST_NUMTYPES ||
1633 ((flags & DRR_FLAG_CLONE) && drba->drba_origin == NULL))
2e528b49 1634 return (SET_ERROR(EINVAL));
13fe0198
MA
1635
1636 /* Verify pool version supports SA if SA_SPILL feature set */
9b67f605
MA
1637 if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) &&
1638 spa_version(dp->dp_spa) < SPA_VERSION_SA)
1639 return (SET_ERROR(ENOTSUP));
1640
47dfff3b
MA
1641 if (drba->drba_cookie->drc_resumable &&
1642 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EXTENSIBLE_DATASET))
1643 return (SET_ERROR(ENOTSUP));
1644
9b67f605
MA
1645 /*
1646 * The receiving code doesn't know how to translate a WRITE_EMBEDDED
2aa34383 1647 * record to a plain WRITE record, so the pool must have the
9b67f605
MA
1648 * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED
1649 * records. Same with WRITE_EMBEDDED records that use LZ4 compression.
1650 */
1651 if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) &&
1652 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA))
1653 return (SET_ERROR(ENOTSUP));
2aa34383 1654 if ((featureflags & DMU_BACKUP_FEATURE_LZ4) &&
9b67f605 1655 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS))
2e528b49 1656 return (SET_ERROR(ENOTSUP));
13fe0198 1657
f1512ee6
MA
1658 /*
1659 * The receiving code doesn't know how to translate large blocks
1660 * to smaller ones, so the pool must have the LARGE_BLOCKS
73aac4aa
NB
1661 * feature enabled if the stream has LARGE_BLOCKS. Same with
1662 * large dnodes.
f1512ee6
MA
1663 */
1664 if ((featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
1665 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_BLOCKS))
1666 return (SET_ERROR(ENOTSUP));
50c957f7
NB
1667 if ((featureflags & DMU_BACKUP_FEATURE_LARGE_DNODE) &&
1668 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_DNODE))
1669 return (SET_ERROR(ENOTSUP));
1670
b5256303
TC
1671 if ((featureflags & DMU_BACKUP_FEATURE_RAW)) {
1672 /* raw receives require the encryption feature */
1673 if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_ENCRYPTION))
1674 return (SET_ERROR(ENOTSUP));
1675 } else {
1676 dsflags |= DS_HOLD_FLAG_DECRYPT;
1677 }
1678
1679 error = dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds);
13fe0198
MA
1680 if (error == 0) {
1681 /* target fs already exists; recv into temp clone */
1682
1683 /* Can't recv a clone into an existing fs */
e6d3a843 1684 if (flags & DRR_FLAG_CLONE || drba->drba_origin) {
b5256303 1685 dsl_dataset_rele_flags(ds, dsflags, FTAG);
2e528b49 1686 return (SET_ERROR(EINVAL));
13fe0198
MA
1687 }
1688
1689 error = recv_begin_check_existing_impl(drba, ds, fromguid);
b5256303 1690 dsl_dataset_rele_flags(ds, dsflags, FTAG);
13fe0198
MA
1691 } else if (error == ENOENT) {
1692 /* target fs does not exist; must be a full backup or clone */
eca7b760 1693 char buf[ZFS_MAX_DATASET_NAME_LEN];
13fe0198
MA
1694
1695 /*
1696 * If it's a non-clone incremental, we are missing the
1697 * target fs, so fail the recv.
1698 */
fcff0f35
PD
1699 if (fromguid != 0 && !(flags & DRR_FLAG_CLONE ||
1700 drba->drba_origin))
2e528b49 1701 return (SET_ERROR(ENOENT));
13fe0198 1702
e6d3a843
PD
1703 /*
1704 * If we're receiving a full send as a clone, and it doesn't
1705 * contain all the necessary free records and freeobject
1706 * records, reject it.
1707 */
1708 if (fromguid == 0 && drba->drba_origin &&
1709 !(flags & DRR_FLAG_FREERECORDS))
1710 return (SET_ERROR(EINVAL));
1711
13fe0198 1712 /* Open the parent of tofs */
eca7b760 1713 ASSERT3U(strlen(tofs), <, sizeof (buf));
13fe0198 1714 (void) strlcpy(buf, tofs, strrchr(tofs, '/') - tofs + 1);
b5256303 1715 error = dsl_dataset_hold_flags(dp, buf, dsflags, FTAG, &ds);
13fe0198
MA
1716 if (error != 0)
1717 return (error);
1718
788eb90c
JJ
1719 /*
1720 * Check filesystem and snapshot limits before receiving. We'll
1721 * recheck snapshot limits again at the end (we create the
1722 * filesystems and increment those counts during begin_sync).
1723 */
1724 error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
1725 ZFS_PROP_FILESYSTEM_LIMIT, NULL, drba->drba_cred);
1726 if (error != 0) {
b5256303 1727 dsl_dataset_rele_flags(ds, dsflags, FTAG);
788eb90c
JJ
1728 return (error);
1729 }
1730
1731 error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
1732 ZFS_PROP_SNAPSHOT_LIMIT, NULL, drba->drba_cred);
1733 if (error != 0) {
b5256303 1734 dsl_dataset_rele_flags(ds, dsflags, FTAG);
788eb90c
JJ
1735 return (error);
1736 }
1737
13fe0198
MA
1738 if (drba->drba_origin != NULL) {
1739 dsl_dataset_t *origin;
b5256303
TC
1740
1741 error = dsl_dataset_hold_flags(dp, drba->drba_origin,
1742 dsflags, FTAG, &origin);
13fe0198 1743 if (error != 0) {
b5256303 1744 dsl_dataset_rele_flags(ds, dsflags, FTAG);
13fe0198
MA
1745 return (error);
1746 }
0c66c32d 1747 if (!origin->ds_is_snapshot) {
9b840763 1748 dsl_dataset_rele_flags(origin, dsflags, FTAG);
b5256303 1749 dsl_dataset_rele_flags(ds, dsflags, FTAG);
2e528b49 1750 return (SET_ERROR(EINVAL));
13fe0198 1751 }
e6d3a843
PD
1752 if (dsl_dataset_phys(origin)->ds_guid != fromguid &&
1753 fromguid != 0) {
9b840763 1754 dsl_dataset_rele_flags(origin, dsflags, FTAG);
b5256303 1755 dsl_dataset_rele_flags(ds, dsflags, FTAG);
2e528b49 1756 return (SET_ERROR(ENODEV));
13fe0198 1757 }
b5256303
TC
1758 dsl_dataset_rele_flags(origin,
1759 dsflags, FTAG);
13fe0198 1760 }
b5256303 1761 dsl_dataset_rele_flags(ds, dsflags, FTAG);
13fe0198
MA
1762 error = 0;
1763 }
1764 return (error);
34dc7c2f
BB
1765}
1766
34dc7c2f 1767static void
13fe0198 1768dmu_recv_begin_sync(void *arg, dmu_tx_t *tx)
34dc7c2f 1769{
13fe0198
MA
1770 dmu_recv_begin_arg_t *drba = arg;
1771 dsl_pool_t *dp = dmu_tx_pool(tx);
47dfff3b 1772 objset_t *mos = dp->dp_meta_objset;
13fe0198
MA
1773 struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
1774 const char *tofs = drba->drba_cookie->drc_tofs;
b5256303 1775 uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
13fe0198 1776 dsl_dataset_t *ds, *newds;
b5256303 1777 objset_t *os;
34dc7c2f 1778 uint64_t dsobj;
b5256303 1779 ds_hold_flags_t dsflags = 0;
13fe0198 1780 int error;
47dfff3b 1781 uint64_t crflags = 0;
b5256303
TC
1782 dsl_crypto_params_t *dcpp = NULL;
1783 dsl_crypto_params_t dcp = { 0 };
13fe0198 1784
47dfff3b
MA
1785 if (drrb->drr_flags & DRR_FLAG_CI_DATA)
1786 crflags |= DS_FLAG_CI_DATASET;
b5256303
TC
1787 if ((featureflags & DMU_BACKUP_FEATURE_RAW) == 0) {
1788 dsflags |= DS_HOLD_FLAG_DECRYPT;
1789 } else {
1790 dcp.cp_cmd = DCP_CMD_RAW_RECV;
1791 }
34dc7c2f 1792
b5256303 1793 error = dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds);
13fe0198
MA
1794 if (error == 0) {
1795 /* create temporary clone */
19580676 1796 dsl_dataset_t *snap = NULL;
b5256303 1797
19580676
MA
1798 if (drba->drba_snapobj != 0) {
1799 VERIFY0(dsl_dataset_hold_obj(dp,
1800 drba->drba_snapobj, FTAG, &snap));
b5256303
TC
1801 } else {
1802 /* we use the dcp whenever we are not making a clone */
1803 dcpp = &dcp;
19580676 1804 }
b5256303 1805
13fe0198 1806 dsobj = dsl_dataset_create_sync(ds->ds_dir, recv_clone_name,
b5256303 1807 snap, crflags, drba->drba_cred, dcpp, tx);
6b42ea85
PD
1808 if (drba->drba_snapobj != 0)
1809 dsl_dataset_rele(snap, FTAG);
b5256303 1810 dsl_dataset_rele_flags(ds, dsflags, FTAG);
13fe0198
MA
1811 } else {
1812 dsl_dir_t *dd;
1813 const char *tail;
1814 dsl_dataset_t *origin = NULL;
1815
1816 VERIFY0(dsl_dir_hold(dp, tofs, FTAG, &dd, &tail));
1817
1818 if (drba->drba_origin != NULL) {
1819 VERIFY0(dsl_dataset_hold(dp, drba->drba_origin,
1820 FTAG, &origin));
b5256303
TC
1821 } else {
1822 /* we use the dcp whenever we are not making a clone */
1823 dcpp = &dcp;
13fe0198
MA
1824 }
1825
1826 /* Create new dataset. */
b5256303
TC
1827 dsobj = dsl_dataset_create_sync(dd, strrchr(tofs, '/') + 1,
1828 origin, crflags, drba->drba_cred, dcpp, tx);
13fe0198
MA
1829 if (origin != NULL)
1830 dsl_dataset_rele(origin, FTAG);
1831 dsl_dir_rele(dd, FTAG);
1832 drba->drba_cookie->drc_newfs = B_TRUE;
1833 }
b5256303
TC
1834 VERIFY0(dsl_dataset_own_obj(dp, dsobj, dsflags, dmu_recv_tag, &newds));
1835 VERIFY0(dmu_objset_from_ds(newds, &os));
13fe0198 1836
47dfff3b 1837 if (drba->drba_cookie->drc_resumable) {
47dfff3b
MA
1838 dsl_dataset_zapify(newds, tx);
1839 if (drrb->drr_fromguid != 0) {
1840 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_FROMGUID,
1841 8, 1, &drrb->drr_fromguid, tx));
1842 }
1843 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TOGUID,
1844 8, 1, &drrb->drr_toguid, tx));
1845 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TONAME,
1846 1, strlen(drrb->drr_toname) + 1, drrb->drr_toname, tx));
1c27024e
DB
1847 uint64_t one = 1;
1848 uint64_t zero = 0;
47dfff3b
MA
1849 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OBJECT,
1850 8, 1, &one, tx));
1851 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OFFSET,
1852 8, 1, &zero, tx));
1853 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_BYTES,
1854 8, 1, &zero, tx));
b5256303 1855 if (featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) {
2aa34383 1856 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_LARGEBLOCK,
02730c33 1857 8, 1, &one, tx));
2aa34383 1858 }
b5256303 1859 if (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) {
47dfff3b
MA
1860 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_EMBEDOK,
1861 8, 1, &one, tx));
1862 }
b5256303 1863 if (featureflags & DMU_BACKUP_FEATURE_COMPRESSED) {
2aa34383
DK
1864 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_COMPRESSOK,
1865 8, 1, &one, tx));
1866 }
b5256303
TC
1867 if (featureflags & DMU_BACKUP_FEATURE_RAW) {
1868 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_RAWOK,
1869 8, 1, &one, tx));
1870 }
1871 }
1872
1873 /*
1874 * Usually the os->os_encrypted value is tied to the presence of a
1875 * DSL Crypto Key object in the dd. However, that will not be received
1876 * until dmu_recv_stream(), so we set the value manually for now.
1877 */
1878 if (featureflags & DMU_BACKUP_FEATURE_RAW) {
1879 os->os_encrypted = B_TRUE;
1880 drba->drba_cookie->drc_raw = B_TRUE;
47dfff3b
MA
1881 }
1882
13fe0198 1883 dmu_buf_will_dirty(newds->ds_dbuf, tx);
d683ddbb 1884 dsl_dataset_phys(newds)->ds_flags |= DS_FLAG_INCONSISTENT;
34dc7c2f 1885
428870ff 1886 /*
b5256303
TC
1887 * If we actually created a non-clone, we need to create the objset
1888 * in our new dataset. If this is a raw send we postpone this until
1889 * dmu_recv_stream() so that we can allocate the metadnode with the
1890 * properties from the DRR_BEGIN payload.
428870ff 1891 */
cc9bb3e5 1892 rrw_enter(&newds->ds_bp_rwlock, RW_READER, FTAG);
b5256303
TC
1893 if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds)) &&
1894 (featureflags & DMU_BACKUP_FEATURE_RAW) == 0) {
428870ff 1895 (void) dmu_objset_create_impl(dp->dp_spa,
13fe0198 1896 newds, dsl_dataset_get_blkptr(newds), drrb->drr_type, tx);
428870ff 1897 }
cc9bb3e5 1898 rrw_exit(&newds->ds_bp_rwlock, FTAG);
34dc7c2f 1899
13fe0198 1900 drba->drba_cookie->drc_ds = newds;
428870ff 1901
13fe0198 1902 spa_history_log_internal_ds(newds, "receive", tx, "");
34dc7c2f
BB
1903}
1904
47dfff3b
MA
1905static int
1906dmu_recv_resume_begin_check(void *arg, dmu_tx_t *tx)
1907{
1908 dmu_recv_begin_arg_t *drba = arg;
1909 dsl_pool_t *dp = dmu_tx_pool(tx);
1910 struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
1911 int error;
b5256303 1912 ds_hold_flags_t dsflags = 0;
47dfff3b
MA
1913 uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
1914 dsl_dataset_t *ds;
1915 const char *tofs = drba->drba_cookie->drc_tofs;
eca7b760 1916
47dfff3b
MA
1917 /* already checked */
1918 ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
1919 ASSERT(featureflags & DMU_BACKUP_FEATURE_RESUMING);
1920
1921 if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
1922 DMU_COMPOUNDSTREAM ||
1923 drrb->drr_type >= DMU_OST_NUMTYPES)
1924 return (SET_ERROR(EINVAL));
1925
1926 /* Verify pool version supports SA if SA_SPILL feature set */
1927 if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) &&
1928 spa_version(dp->dp_spa) < SPA_VERSION_SA)
1929 return (SET_ERROR(ENOTSUP));
1930
1931 /*
1932 * The receiving code doesn't know how to translate a WRITE_EMBEDDED
1933 * record to a plain WRITE record, so the pool must have the
1934 * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED
1935 * records. Same with WRITE_EMBEDDED records that use LZ4 compression.
1936 */
1937 if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) &&
1938 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA))
1939 return (SET_ERROR(ENOTSUP));
2aa34383 1940 if ((featureflags & DMU_BACKUP_FEATURE_LZ4) &&
47dfff3b
MA
1941 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS))
1942 return (SET_ERROR(ENOTSUP));
1943
73aac4aa
NB
1944 /*
1945 * The receiving code doesn't know how to translate large blocks
1946 * to smaller ones, so the pool must have the LARGE_BLOCKS
1947 * feature enabled if the stream has LARGE_BLOCKS. Same with
1948 * large dnodes.
1949 */
1950 if ((featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
1951 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_BLOCKS))
1952 return (SET_ERROR(ENOTSUP));
1953 if ((featureflags & DMU_BACKUP_FEATURE_LARGE_DNODE) &&
1954 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_DNODE))
1955 return (SET_ERROR(ENOTSUP));
1956
1c27024e
DB
1957 /* 6 extra bytes for /%recv */
1958 char recvname[ZFS_MAX_DATASET_NAME_LEN + 6];
47dfff3b
MA
1959 (void) snprintf(recvname, sizeof (recvname), "%s/%s",
1960 tofs, recv_clone_name);
1961
b5256303
TC
1962 if ((featureflags & DMU_BACKUP_FEATURE_RAW) == 0)
1963 dsflags |= DS_HOLD_FLAG_DECRYPT;
1964
1965 if (dsl_dataset_hold_flags(dp, recvname, dsflags, FTAG, &ds) != 0) {
47dfff3b 1966 /* %recv does not exist; continue in tofs */
b5256303 1967 error = dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds);
47dfff3b
MA
1968 if (error != 0)
1969 return (error);
1970 }
1971
1972 /* check that ds is marked inconsistent */
1973 if (!DS_IS_INCONSISTENT(ds)) {
b5256303 1974 dsl_dataset_rele_flags(ds, dsflags, FTAG);
47dfff3b
MA
1975 return (SET_ERROR(EINVAL));
1976 }
1977
1978 /* check that there is resuming data, and that the toguid matches */
1979 if (!dsl_dataset_is_zapified(ds)) {
b5256303 1980 dsl_dataset_rele_flags(ds, dsflags, FTAG);
47dfff3b
MA
1981 return (SET_ERROR(EINVAL));
1982 }
1c27024e 1983 uint64_t val;
47dfff3b
MA
1984 error = zap_lookup(dp->dp_meta_objset, ds->ds_object,
1985 DS_FIELD_RESUME_TOGUID, sizeof (val), 1, &val);
1986 if (error != 0 || drrb->drr_toguid != val) {
b5256303 1987 dsl_dataset_rele_flags(ds, dsflags, FTAG);
47dfff3b
MA
1988 return (SET_ERROR(EINVAL));
1989 }
1990
1991 /*
1992 * Check if the receive is still running. If so, it will be owned.
1993 * Note that nothing else can own the dataset (e.g. after the receive
1994 * fails) because it will be marked inconsistent.
1995 */
1996 if (dsl_dataset_has_owner(ds)) {
b5256303 1997 dsl_dataset_rele_flags(ds, dsflags, FTAG);
47dfff3b
MA
1998 return (SET_ERROR(EBUSY));
1999 }
2000
2001 /* There should not be any snapshots of this fs yet. */
2002 if (ds->ds_prev != NULL && ds->ds_prev->ds_dir == ds->ds_dir) {
b5256303 2003 dsl_dataset_rele_flags(ds, dsflags, FTAG);
47dfff3b
MA
2004 return (SET_ERROR(EINVAL));
2005 }
2006
2007 /*
2008 * Note: resume point will be checked when we process the first WRITE
2009 * record.
2010 */
2011
2012 /* check that the origin matches */
2013 val = 0;
2014 (void) zap_lookup(dp->dp_meta_objset, ds->ds_object,
2015 DS_FIELD_RESUME_FROMGUID, sizeof (val), 1, &val);
2016 if (drrb->drr_fromguid != val) {
b5256303 2017 dsl_dataset_rele_flags(ds, dsflags, FTAG);
47dfff3b
MA
2018 return (SET_ERROR(EINVAL));
2019 }
2020
b5256303 2021 dsl_dataset_rele_flags(ds, dsflags, FTAG);
47dfff3b
MA
2022 return (0);
2023}
2024
2025static void
2026dmu_recv_resume_begin_sync(void *arg, dmu_tx_t *tx)
2027{
2028 dmu_recv_begin_arg_t *drba = arg;
2029 dsl_pool_t *dp = dmu_tx_pool(tx);
2030 const char *tofs = drba->drba_cookie->drc_tofs;
b5256303
TC
2031 struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
2032 uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
47dfff3b 2033 dsl_dataset_t *ds;
b5256303
TC
2034 objset_t *os;
2035 ds_hold_flags_t dsflags = 0;
47dfff3b 2036 uint64_t dsobj;
eca7b760
IK
2037 /* 6 extra bytes for /%recv */
2038 char recvname[ZFS_MAX_DATASET_NAME_LEN + 6];
47dfff3b
MA
2039
2040 (void) snprintf(recvname, sizeof (recvname), "%s/%s",
2041 tofs, recv_clone_name);
2042
b5256303
TC
2043 if (featureflags & DMU_BACKUP_FEATURE_RAW) {
2044 drba->drba_cookie->drc_raw = B_TRUE;
2045 } else {
2046 dsflags |= DS_HOLD_FLAG_DECRYPT;
2047 }
2048
2049 if (dsl_dataset_hold_flags(dp, recvname, dsflags, FTAG, &ds) != 0) {
47dfff3b 2050 /* %recv does not exist; continue in tofs */
b5256303 2051 VERIFY0(dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds));
47dfff3b
MA
2052 drba->drba_cookie->drc_newfs = B_TRUE;
2053 }
2054
2055 /* clear the inconsistent flag so that we can own it */
2056 ASSERT(DS_IS_INCONSISTENT(ds));
2057 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2058 dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT;
2059 dsobj = ds->ds_object;
b5256303 2060 dsl_dataset_rele_flags(ds, dsflags, FTAG);
47dfff3b 2061
b5256303
TC
2062 VERIFY0(dsl_dataset_own_obj(dp, dsobj, dsflags, dmu_recv_tag, &ds));
2063 VERIFY0(dmu_objset_from_ds(ds, &os));
47dfff3b
MA
2064
2065 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2066 dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_INCONSISTENT;
2067
cc9bb3e5 2068 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
2637dda8
TC
2069 ASSERT(!BP_IS_HOLE(dsl_dataset_get_blkptr(ds)) ||
2070 drba->drba_cookie->drc_raw);
cc9bb3e5 2071 rrw_exit(&ds->ds_bp_rwlock, FTAG);
47dfff3b
MA
2072
2073 drba->drba_cookie->drc_ds = ds;
2074
2075 spa_history_log_internal_ds(ds, "resume receive", tx, "");
2076}
2077
34dc7c2f
BB
2078/*
2079 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
2080 * succeeds; otherwise we will leak the holds on the datasets.
2081 */
2082int
47dfff3b
MA
2083dmu_recv_begin(char *tofs, char *tosnap, dmu_replay_record_t *drr_begin,
2084 boolean_t force, boolean_t resumable, char *origin, dmu_recv_cookie_t *drc)
34dc7c2f 2085{
13fe0198 2086 dmu_recv_begin_arg_t drba = { 0 };
34dc7c2f
BB
2087
2088 bzero(drc, sizeof (dmu_recv_cookie_t));
47dfff3b
MA
2089 drc->drc_drr_begin = drr_begin;
2090 drc->drc_drrb = &drr_begin->drr_u.drr_begin;
34dc7c2f 2091 drc->drc_tosnap = tosnap;
13fe0198 2092 drc->drc_tofs = tofs;
34dc7c2f 2093 drc->drc_force = force;
47dfff3b 2094 drc->drc_resumable = resumable;
788eb90c 2095 drc->drc_cred = CRED();
48fbb9dd 2096 drc->drc_clone = (origin != NULL);
34dc7c2f 2097
47dfff3b 2098 if (drc->drc_drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) {
13fe0198 2099 drc->drc_byteswap = B_TRUE;
a6255b7f 2100 (void) fletcher_4_incremental_byteswap(drr_begin,
13fe0198 2101 sizeof (dmu_replay_record_t), &drc->drc_cksum);
47dfff3b
MA
2102 byteswap_record(drr_begin);
2103 } else if (drc->drc_drrb->drr_magic == DMU_BACKUP_MAGIC) {
a6255b7f 2104 (void) fletcher_4_incremental_native(drr_begin,
13fe0198 2105 sizeof (dmu_replay_record_t), &drc->drc_cksum);
47dfff3b
MA
2106 } else {
2107 return (SET_ERROR(EINVAL));
34dc7c2f
BB
2108 }
2109
13fe0198
MA
2110 drba.drba_origin = origin;
2111 drba.drba_cookie = drc;
2112 drba.drba_cred = CRED();
2113
47dfff3b
MA
2114 if (DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo) &
2115 DMU_BACKUP_FEATURE_RESUMING) {
2116 return (dsl_sync_task(tofs,
2117 dmu_recv_resume_begin_check, dmu_recv_resume_begin_sync,
2118 &drba, 5, ZFS_SPACE_CHECK_NORMAL));
2119 } else {
2120 return (dsl_sync_task(tofs,
2121 dmu_recv_begin_check, dmu_recv_begin_sync,
2122 &drba, 5, ZFS_SPACE_CHECK_NORMAL));
2123 }
34dc7c2f
BB
2124}
2125
fcff0f35
PD
2126struct receive_record_arg {
2127 dmu_replay_record_t header;
2128 void *payload; /* Pointer to a buffer containing the payload */
2129 /*
2130 * If the record is a write, pointer to the arc_buf_t containing the
2131 * payload.
2132 */
b5256303 2133 arc_buf_t *arc_buf;
fcff0f35 2134 int payload_size;
47dfff3b 2135 uint64_t bytes_read; /* bytes read from stream when record created */
fcff0f35
PD
2136 boolean_t eos_marker; /* Marks the end of the stream */
2137 bqueue_node_t node;
2138};
2139
2140struct receive_writer_arg {
37f8a883 2141 objset_t *os;
13fe0198 2142 boolean_t byteswap;
fcff0f35 2143 bqueue_t q;
47dfff3b 2144
fcff0f35
PD
2145 /*
2146 * These three args are used to signal to the main thread that we're
2147 * done.
2148 */
2149 kmutex_t mutex;
2150 kcondvar_t cv;
2151 boolean_t done;
47dfff3b 2152
fcff0f35
PD
2153 int err;
2154 /* A map from guid to dataset to help handle dedup'd streams. */
2155 avl_tree_t *guid_to_ds_map;
47dfff3b 2156 boolean_t resumable;
9b840763 2157 boolean_t raw;
48fbb9dd
FG
2158 uint64_t last_object;
2159 uint64_t last_offset;
2160 uint64_t max_object; /* highest object ID referenced in stream */
47dfff3b 2161 uint64_t bytes_read; /* bytes read when current record created */
fcff0f35 2162};
37f8a883 2163
e6d3a843
PD
2164struct objlist {
2165 list_t list; /* List of struct receive_objnode. */
2166 /*
2167 * Last object looked up. Used to assert that objects are being looked
2168 * up in ascending order.
2169 */
2170 uint64_t last_lookup;
2171};
2172
2173struct receive_objnode {
2174 list_node_t node;
2175 uint64_t object;
2176};
2177
fcff0f35
PD
2178struct receive_arg {
2179 objset_t *os;
2180 vnode_t *vp; /* The vnode to read the stream from */
2181 uint64_t voff; /* The current offset in the stream */
47dfff3b 2182 uint64_t bytes_read;
fcff0f35
PD
2183 /*
2184 * A record that has had its payload read in, but hasn't yet been handed
2185 * off to the worker thread.
2186 */
2187 struct receive_record_arg *rrd;
2188 /* A record that has had its header read in, but not its payload. */
2189 struct receive_record_arg *next_rrd;
34dc7c2f 2190 zio_cksum_t cksum;
37f8a883 2191 zio_cksum_t prev_cksum;
fcff0f35
PD
2192 int err;
2193 boolean_t byteswap;
9b840763 2194 boolean_t raw;
b5256303 2195 uint64_t featureflags;
fcff0f35 2196 /* Sorted list of objects not to issue prefetches for. */
e6d3a843 2197 struct objlist ignore_objlist;
34dc7c2f
BB
2198};
2199
428870ff
BB
2200typedef struct guid_map_entry {
2201 uint64_t guid;
b5256303 2202 boolean_t raw;
428870ff
BB
2203 dsl_dataset_t *gme_ds;
2204 avl_node_t avlnode;
2205} guid_map_entry_t;
2206
2207static int
2208guid_compare(const void *arg1, const void *arg2)
2209{
ee36c709
GN
2210 const guid_map_entry_t *gmep1 = (const guid_map_entry_t *)arg1;
2211 const guid_map_entry_t *gmep2 = (const guid_map_entry_t *)arg2;
428870ff 2212
ee36c709 2213 return (AVL_CMP(gmep1->guid, gmep2->guid));
428870ff
BB
2214}
2215
572e2857
BB
2216static void
2217free_guid_map_onexit(void *arg)
2218{
2219 avl_tree_t *ca = arg;
2220 void *cookie = NULL;
2221 guid_map_entry_t *gmep;
2222
2223 while ((gmep = avl_destroy_nodes(ca, &cookie)) != NULL) {
13fe0198 2224 dsl_dataset_long_rele(gmep->gme_ds, gmep);
b5256303
TC
2225 dsl_dataset_rele_flags(gmep->gme_ds,
2226 (gmep->raw) ? 0 : DS_HOLD_FLAG_DECRYPT, gmep);
572e2857
BB
2227 kmem_free(gmep, sizeof (guid_map_entry_t));
2228 }
2229 avl_destroy(ca);
2230 kmem_free(ca, sizeof (avl_tree_t));
2231}
2232
37f8a883 2233static int
fcff0f35 2234receive_read(struct receive_arg *ra, int len, void *buf)
34dc7c2f 2235{
34dc7c2f
BB
2236 int done = 0;
2237
f8866f8a
ER
2238 /*
2239 * The code doesn't rely on this (lengths being multiples of 8). See
2240 * comment in dump_bytes.
2241 */
b5256303
TC
2242 ASSERT(len % 8 == 0 ||
2243 (ra->featureflags & DMU_BACKUP_FEATURE_RAW) != 0);
34dc7c2f
BB
2244
2245 while (done < len) {
2246 ssize_t resid;
2247
2248 ra->err = vn_rdwr(UIO_READ, ra->vp,
37f8a883 2249 (char *)buf + done, len - done,
34dc7c2f
BB
2250 ra->voff, UIO_SYSSPACE, FAPPEND,
2251 RLIM64_INFINITY, CRED(), &resid);
2252
47dfff3b
MA
2253 if (resid == len - done) {
2254 /*
2255 * Note: ECKSUM indicates that the receive
2256 * was interrupted and can potentially be resumed.
2257 */
2258 ra->err = SET_ERROR(ECKSUM);
2259 }
34dc7c2f
BB
2260 ra->voff += len - done - resid;
2261 done = len - resid;
13fe0198 2262 if (ra->err != 0)
37f8a883 2263 return (ra->err);
34dc7c2f
BB
2264 }
2265
47dfff3b
MA
2266 ra->bytes_read += len;
2267
34dc7c2f 2268 ASSERT3U(done, ==, len);
37f8a883 2269 return (0);
34dc7c2f
BB
2270}
2271
60948de1 2272noinline static void
37f8a883 2273byteswap_record(dmu_replay_record_t *drr)
34dc7c2f
BB
2274{
2275#define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
2276#define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
2277 drr->drr_type = BSWAP_32(drr->drr_type);
2278 drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen);
37f8a883 2279
34dc7c2f
BB
2280 switch (drr->drr_type) {
2281 case DRR_BEGIN:
2282 DO64(drr_begin.drr_magic);
428870ff 2283 DO64(drr_begin.drr_versioninfo);
34dc7c2f
BB
2284 DO64(drr_begin.drr_creation_time);
2285 DO32(drr_begin.drr_type);
2286 DO32(drr_begin.drr_flags);
2287 DO64(drr_begin.drr_toguid);
2288 DO64(drr_begin.drr_fromguid);
2289 break;
2290 case DRR_OBJECT:
2291 DO64(drr_object.drr_object);
34dc7c2f
BB
2292 DO32(drr_object.drr_type);
2293 DO32(drr_object.drr_bonustype);
2294 DO32(drr_object.drr_blksz);
2295 DO32(drr_object.drr_bonuslen);
b5256303 2296 DO32(drr_object.drr_raw_bonuslen);
428870ff 2297 DO64(drr_object.drr_toguid);
ae76f45c 2298 DO64(drr_object.drr_maxblkid);
34dc7c2f
BB
2299 break;
2300 case DRR_FREEOBJECTS:
2301 DO64(drr_freeobjects.drr_firstobj);
2302 DO64(drr_freeobjects.drr_numobjs);
428870ff 2303 DO64(drr_freeobjects.drr_toguid);
34dc7c2f
BB
2304 break;
2305 case DRR_WRITE:
2306 DO64(drr_write.drr_object);
2307 DO32(drr_write.drr_type);
2308 DO64(drr_write.drr_offset);
2aa34383 2309 DO64(drr_write.drr_logical_size);
428870ff 2310 DO64(drr_write.drr_toguid);
37f8a883 2311 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write.drr_key.ddk_cksum);
428870ff 2312 DO64(drr_write.drr_key.ddk_prop);
2aa34383 2313 DO64(drr_write.drr_compressed_size);
428870ff
BB
2314 break;
2315 case DRR_WRITE_BYREF:
2316 DO64(drr_write_byref.drr_object);
2317 DO64(drr_write_byref.drr_offset);
2318 DO64(drr_write_byref.drr_length);
2319 DO64(drr_write_byref.drr_toguid);
2320 DO64(drr_write_byref.drr_refguid);
2321 DO64(drr_write_byref.drr_refobject);
2322 DO64(drr_write_byref.drr_refoffset);
37f8a883
MA
2323 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write_byref.
2324 drr_key.ddk_cksum);
428870ff 2325 DO64(drr_write_byref.drr_key.ddk_prop);
34dc7c2f 2326 break;
9b67f605
MA
2327 case DRR_WRITE_EMBEDDED:
2328 DO64(drr_write_embedded.drr_object);
2329 DO64(drr_write_embedded.drr_offset);
2330 DO64(drr_write_embedded.drr_length);
2331 DO64(drr_write_embedded.drr_toguid);
2332 DO32(drr_write_embedded.drr_lsize);
2333 DO32(drr_write_embedded.drr_psize);
2334 break;
34dc7c2f
BB
2335 case DRR_FREE:
2336 DO64(drr_free.drr_object);
2337 DO64(drr_free.drr_offset);
2338 DO64(drr_free.drr_length);
428870ff
BB
2339 DO64(drr_free.drr_toguid);
2340 break;
2341 case DRR_SPILL:
2342 DO64(drr_spill.drr_object);
2343 DO64(drr_spill.drr_length);
2344 DO64(drr_spill.drr_toguid);
b5256303
TC
2345 DO64(drr_spill.drr_compressed_size);
2346 DO32(drr_spill.drr_type);
2347 break;
2348 case DRR_OBJECT_RANGE:
2349 DO64(drr_object_range.drr_firstobj);
2350 DO64(drr_object_range.drr_numslots);
2351 DO64(drr_object_range.drr_toguid);
34dc7c2f
BB
2352 break;
2353 case DRR_END:
428870ff 2354 DO64(drr_end.drr_toguid);
37f8a883 2355 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_end.drr_checksum);
34dc7c2f 2356 break;
e75c13c3
BB
2357 default:
2358 break;
34dc7c2f 2359 }
37f8a883
MA
2360
2361 if (drr->drr_type != DRR_BEGIN) {
2362 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_checksum.drr_checksum);
2363 }
2364
34dc7c2f
BB
2365#undef DO64
2366#undef DO32
2367}
2368
6c59307a
MA
2369static inline uint8_t
2370deduce_nblkptr(dmu_object_type_t bonus_type, uint64_t bonus_size)
2371{
2372 if (bonus_type == DMU_OT_SA) {
2373 return (1);
2374 } else {
2375 return (1 +
50c957f7
NB
2376 ((DN_OLD_MAX_BONUSLEN -
2377 MIN(DN_OLD_MAX_BONUSLEN, bonus_size)) >> SPA_BLKPTRSHIFT));
6c59307a
MA
2378 }
2379}
2380
47dfff3b
MA
2381static void
2382save_resume_state(struct receive_writer_arg *rwa,
2383 uint64_t object, uint64_t offset, dmu_tx_t *tx)
2384{
2385 int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
2386
2387 if (!rwa->resumable)
2388 return;
2389
2390 /*
2391 * We use ds_resume_bytes[] != 0 to indicate that we need to
2392 * update this on disk, so it must not be 0.
2393 */
2394 ASSERT(rwa->bytes_read != 0);
2395
2396 /*
2397 * We only resume from write records, which have a valid
2398 * (non-meta-dnode) object number.
2399 */
2400 ASSERT(object != 0);
2401
2402 /*
2403 * For resuming to work correctly, we must receive records in order,
2404 * sorted by object,offset. This is checked by the callers, but
2405 * assert it here for good measure.
2406 */
2407 ASSERT3U(object, >=, rwa->os->os_dsl_dataset->ds_resume_object[txgoff]);
2408 ASSERT(object != rwa->os->os_dsl_dataset->ds_resume_object[txgoff] ||
2409 offset >= rwa->os->os_dsl_dataset->ds_resume_offset[txgoff]);
2410 ASSERT3U(rwa->bytes_read, >=,
2411 rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff]);
2412
2413 rwa->os->os_dsl_dataset->ds_resume_object[txgoff] = object;
2414 rwa->os->os_dsl_dataset->ds_resume_offset[txgoff] = offset;
2415 rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff] = rwa->bytes_read;
2416}
2417
60948de1 2418noinline static int
fcff0f35 2419receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
4ea3f864 2420 void *data)
34dc7c2f 2421{
6c59307a 2422 dmu_object_info_t doi;
34dc7c2f 2423 dmu_tx_t *tx;
6c59307a
MA
2424 uint64_t object;
2425 int err;
34dc7c2f 2426
34dc7c2f 2427 if (drro->drr_type == DMU_OT_NONE ||
9ae529ec
CS
2428 !DMU_OT_IS_VALID(drro->drr_type) ||
2429 !DMU_OT_IS_VALID(drro->drr_bonustype) ||
428870ff 2430 drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS ||
34dc7c2f
BB
2431 drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS ||
2432 P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) ||
2433 drro->drr_blksz < SPA_MINBLOCKSIZE ||
fcff0f35 2434 drro->drr_blksz > spa_maxblocksize(dmu_objset_spa(rwa->os)) ||
50c957f7 2435 drro->drr_bonuslen >
73aac4aa
NB
2436 DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(rwa->os))) ||
2437 drro->drr_dn_slots >
2438 (spa_maxdnodesize(dmu_objset_spa(rwa->os)) >> DNODE_SHIFT)) {
2e528b49 2439 return (SET_ERROR(EINVAL));
34dc7c2f
BB
2440 }
2441
9b840763 2442 if (rwa->raw) {
b5256303
TC
2443 if (drro->drr_raw_bonuslen < drro->drr_bonuslen ||
2444 drro->drr_indblkshift > SPA_MAXBLOCKSHIFT ||
2445 drro->drr_nlevels > DN_MAX_LEVELS ||
2446 drro->drr_nblkptr > DN_MAX_NBLKPTR ||
2447 DN_SLOTS_TO_BONUSLEN(drro->drr_dn_slots) <
2448 drro->drr_raw_bonuslen)
2449 return (SET_ERROR(EINVAL));
2450 } else {
2451 if (drro->drr_flags != 0 || drro->drr_raw_bonuslen != 0 ||
2452 drro->drr_indblkshift != 0 || drro->drr_nlevels != 0 ||
2453 drro->drr_nblkptr != 0)
2454 return (SET_ERROR(EINVAL));
2455 }
2456
fcff0f35 2457 err = dmu_object_info(rwa->os, drro->drr_object, &doi);
047116ac 2458 if (err != 0 && err != ENOENT && err != EEXIST)
2e528b49 2459 return (SET_ERROR(EINVAL));
9babb374 2460
48fbb9dd
FG
2461 if (drro->drr_object > rwa->max_object)
2462 rwa->max_object = drro->drr_object;
2463
6c59307a
MA
2464 /*
2465 * If we are losing blkptrs or changing the block size this must
2466 * be a new file instance. We must clear out the previous file
2467 * contents before we can change this type of metadata in the dnode.
b5256303
TC
2468 * Raw receives will also check that the indirect structure of the
2469 * dnode hasn't changed.
6c59307a
MA
2470 */
2471 if (err == 0) {
b5256303
TC
2472 uint32_t indblksz = drro->drr_indblkshift ?
2473 1ULL << drro->drr_indblkshift : 0;
2474 int nblkptr = deduce_nblkptr(drro->drr_bonustype,
6c59307a
MA
2475 drro->drr_bonuslen);
2476
047116ac
TC
2477 object = drro->drr_object;
2478
b5256303 2479 /* nblkptr will be bounded by the bonus size and type */
9b840763 2480 if (rwa->raw && nblkptr != drro->drr_nblkptr)
b5256303
TC
2481 return (SET_ERROR(EINVAL));
2482
ae76f45c
TC
2483 if (rwa->raw &&
2484 (drro->drr_blksz != doi.doi_data_block_size ||
b5256303 2485 nblkptr < doi.doi_nblkptr ||
ae76f45c 2486 indblksz != doi.doi_metadata_block_size ||
047116ac
TC
2487 drro->drr_nlevels < doi.doi_indirection ||
2488 drro->drr_dn_slots != doi.doi_dnodesize >> DNODE_SHIFT)) {
ae76f45c
TC
2489 err = dmu_free_long_range_raw(rwa->os,
2490 drro->drr_object, 0, DMU_OBJECT_END);
2491 if (err != 0)
2492 return (SET_ERROR(EINVAL));
2493 } else if (drro->drr_blksz != doi.doi_data_block_size ||
047116ac
TC
2494 nblkptr < doi.doi_nblkptr ||
2495 drro->drr_dn_slots != doi.doi_dnodesize >> DNODE_SHIFT) {
fcff0f35 2496 err = dmu_free_long_range(rwa->os, drro->drr_object,
6c59307a
MA
2497 0, DMU_OBJECT_END);
2498 if (err != 0)
2499 return (SET_ERROR(EINVAL));
34dc7c2f 2500 }
047116ac
TC
2501
2502 /*
2503 * The dmu does not currently support decreasing nlevels
2504 * on an object. For non-raw sends, this does not matter
2505 * and the new object can just use the previous one's nlevels.
2506 * For raw sends, however, the structure of the received dnode
2507 * (including nlevels) must match that of the send side.
2508 * Therefore, instead of using dmu_object_reclaim(), we must
2509 * free the object completely and call dmu_object_claim_dnsize()
2510 * instead.
2511 */
2512 if ((rwa->raw && drro->drr_nlevels < doi.doi_indirection) ||
2513 drro->drr_dn_slots != doi.doi_dnodesize >> DNODE_SHIFT) {
2514 if (rwa->raw) {
2515 err = dmu_free_long_object_raw(rwa->os,
2516 drro->drr_object);
2517 } else {
2518 err = dmu_free_long_object(rwa->os,
2519 drro->drr_object);
2520 }
2521 if (err != 0)
2522 return (SET_ERROR(EINVAL));
2523
2524 txg_wait_synced(dmu_objset_pool(rwa->os), 0);
2525 object = DMU_NEW_OBJECT;
2526 }
2527 } else if (err == EEXIST) {
2528 /*
2529 * The object requested is currently an interior slot of a
2530 * multi-slot dnode. This will be resolved when the next txg
2531 * is synced out, since the send stream will have told us
2532 * to free this slot when we freed the associated dnode
2533 * earlier in the stream.
2534 */
2535 txg_wait_synced(dmu_objset_pool(rwa->os), 0);
2536 object = drro->drr_object;
2537 } else {
2538 /* object is free and we are about to allocate a new one */
2539 object = DMU_NEW_OBJECT;
2540 }
2541
2542 /*
2543 * If this is a multi-slot dnode there is a chance that this
2544 * object will expand into a slot that is already used by
2545 * another object from the previous snapshot. We must free
2546 * these objects before we attempt to allocate the new dnode.
2547 */
2548 if (drro->drr_dn_slots > 1) {
5121c4fb
TC
2549 boolean_t need_sync = B_FALSE;
2550
047116ac
TC
2551 for (uint64_t slot = drro->drr_object + 1;
2552 slot < drro->drr_object + drro->drr_dn_slots;
2553 slot++) {
2554 dmu_object_info_t slot_doi;
2555
2556 err = dmu_object_info(rwa->os, slot, &slot_doi);
2557 if (err == ENOENT || err == EEXIST)
2558 continue;
2559 else if (err != 0)
2560 return (err);
2561
2562 if (rwa->raw)
2563 err = dmu_free_long_object_raw(rwa->os, slot);
2564 else
2565 err = dmu_free_long_object(rwa->os, slot);
2566
2567 if (err != 0)
2568 return (err);
5121c4fb
TC
2569
2570 need_sync = B_TRUE;
047116ac
TC
2571 }
2572
5121c4fb
TC
2573 if (need_sync)
2574 txg_wait_synced(dmu_objset_pool(rwa->os), 0);
6c59307a
MA
2575 }
2576
fcff0f35 2577 tx = dmu_tx_create(rwa->os);
6c59307a 2578 dmu_tx_hold_bonus(tx, object);
b5256303 2579 dmu_tx_hold_write(tx, object, 0, 0);
6c59307a
MA
2580 err = dmu_tx_assign(tx, TXG_WAIT);
2581 if (err != 0) {
2582 dmu_tx_abort(tx);
2583 return (err);
2584 }
2585
2586 if (object == DMU_NEW_OBJECT) {
2587 /* currently free, want to be allocated */
50c957f7 2588 err = dmu_object_claim_dnsize(rwa->os, drro->drr_object,
34dc7c2f 2589 drro->drr_type, drro->drr_blksz,
50c957f7
NB
2590 drro->drr_bonustype, drro->drr_bonuslen,
2591 drro->drr_dn_slots << DNODE_SHIFT, tx);
6c59307a
MA
2592 } else if (drro->drr_type != doi.doi_type ||
2593 drro->drr_blksz != doi.doi_data_block_size ||
2594 drro->drr_bonustype != doi.doi_bonus_type ||
e0dd0a32 2595 drro->drr_bonuslen != doi.doi_bonus_size) {
6c59307a 2596 /* currently allocated, but with different properties */
e0dd0a32 2597 err = dmu_object_reclaim(rwa->os, drro->drr_object,
34dc7c2f 2598 drro->drr_type, drro->drr_blksz,
e0dd0a32 2599 drro->drr_bonustype, drro->drr_bonuslen, tx);
34dc7c2f 2600 }
13fe0198 2601 if (err != 0) {
9b840763 2602 dmu_tx_commit(tx);
2e528b49 2603 return (SET_ERROR(EINVAL));
428870ff 2604 }
9babb374 2605
9b840763
TC
2606 if (rwa->raw)
2607 VERIFY0(dmu_object_dirty_raw(rwa->os, drro->drr_object, tx));
2608
fcff0f35 2609 dmu_object_set_checksum(rwa->os, drro->drr_object,
37f8a883 2610 drro->drr_checksumtype, tx);
fcff0f35 2611 dmu_object_set_compress(rwa->os, drro->drr_object,
37f8a883 2612 drro->drr_compress, tx);
34dc7c2f 2613
b5256303 2614 /* handle more restrictive dnode structuring for raw recvs */
9b840763 2615 if (rwa->raw) {
b5256303
TC
2616 /*
2617 * Set the indirect block shift and nlevels. This will not fail
2618 * because we ensured all of the blocks were free earlier if
2619 * this is a new object.
2620 */
2621 VERIFY0(dmu_object_set_blocksize(rwa->os, drro->drr_object,
2622 drro->drr_blksz, drro->drr_indblkshift, tx));
2623 VERIFY0(dmu_object_set_nlevels(rwa->os, drro->drr_object,
2624 drro->drr_nlevels, tx));
ae76f45c
TC
2625 VERIFY0(dmu_object_set_maxblkid(rwa->os, drro->drr_object,
2626 drro->drr_maxblkid, tx));
b5256303
TC
2627 }
2628
b128c09f 2629 if (data != NULL) {
34dc7c2f 2630 dmu_buf_t *db;
b5256303 2631 uint32_t flags = DMU_READ_NO_PREFETCH;
b128c09f 2632
9b840763 2633 if (rwa->raw)
b5256303
TC
2634 flags |= DMU_READ_NO_DECRYPT;
2635
2636 VERIFY0(dmu_bonus_hold_impl(rwa->os, drro->drr_object,
2637 FTAG, flags, &db));
34dc7c2f
BB
2638 dmu_buf_will_dirty(db, tx);
2639
2640 ASSERT3U(db->db_size, >=, drro->drr_bonuslen);
b5256303
TC
2641 bcopy(data, db->db_data, DRR_OBJECT_PAYLOAD_SIZE(drro));
2642
2643 /*
2644 * Raw bonus buffers have their byteorder determined by the
2645 * DRR_OBJECT_RANGE record.
2646 */
9b840763 2647 if (rwa->byteswap && !rwa->raw) {
9ae529ec
CS
2648 dmu_object_byteswap_t byteswap =
2649 DMU_OT_BYTESWAP(drro->drr_bonustype);
2650 dmu_ot_byteswap[byteswap].ob_func(db->db_data,
b5256303 2651 DRR_OBJECT_PAYLOAD_SIZE(drro));
34dc7c2f
BB
2652 }
2653 dmu_buf_rele(db, FTAG);
2654 }
2655 dmu_tx_commit(tx);
47dfff3b 2656
34dc7c2f
BB
2657 return (0);
2658}
2659
2660/* ARGSUSED */
60948de1 2661noinline static int
fcff0f35 2662receive_freeobjects(struct receive_writer_arg *rwa,
34dc7c2f
BB
2663 struct drr_freeobjects *drrfo)
2664{
2665 uint64_t obj;
e6d3a843 2666 int next_err = 0;
34dc7c2f
BB
2667
2668 if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj)
2e528b49 2669 return (SET_ERROR(EINVAL));
34dc7c2f 2670
50c957f7 2671 for (obj = drrfo->drr_firstobj == 0 ? 1 : drrfo->drr_firstobj;
e6d3a843
PD
2672 obj < drrfo->drr_firstobj + drrfo->drr_numobjs && next_err == 0;
2673 next_err = dmu_object_next(rwa->os, &obj, FALSE, 0)) {
50c957f7 2674 dmu_object_info_t doi;
34dc7c2f
BB
2675 int err;
2676
50c957f7 2677 err = dmu_object_info(rwa->os, obj, &doi);
39f56627 2678 if (err == ENOENT)
34dc7c2f 2679 continue;
39f56627 2680 else if (err != 0)
50c957f7 2681 return (err);
34dc7c2f 2682
440a3eb9
TC
2683 if (rwa->raw)
2684 err = dmu_free_long_object_raw(rwa->os, obj);
2685 else
2686 err = dmu_free_long_object(rwa->os, obj);
2687
13fe0198 2688 if (err != 0)
34dc7c2f 2689 return (err);
48fbb9dd
FG
2690
2691 if (obj > rwa->max_object)
2692 rwa->max_object = obj;
34dc7c2f 2693 }
e6d3a843
PD
2694 if (next_err != ESRCH)
2695 return (next_err);
34dc7c2f
BB
2696 return (0);
2697}
2698
60948de1 2699noinline static int
fcff0f35 2700receive_write(struct receive_writer_arg *rwa, struct drr_write *drrw,
4ea3f864 2701 arc_buf_t *abuf)
34dc7c2f 2702{
34dc7c2f 2703 int err;
440a3eb9
TC
2704 dmu_tx_t *tx;
2705 dnode_t *dn;
34dc7c2f 2706
2aa34383 2707 if (drrw->drr_offset + drrw->drr_logical_size < drrw->drr_offset ||
9ae529ec 2708 !DMU_OT_IS_VALID(drrw->drr_type))
2e528b49 2709 return (SET_ERROR(EINVAL));
34dc7c2f 2710
47dfff3b
MA
2711 /*
2712 * For resuming to work, records must be in increasing order
2713 * by (object, offset).
2714 */
2715 if (drrw->drr_object < rwa->last_object ||
2716 (drrw->drr_object == rwa->last_object &&
2717 drrw->drr_offset < rwa->last_offset)) {
2718 return (SET_ERROR(EINVAL));
2719 }
2720 rwa->last_object = drrw->drr_object;
2721 rwa->last_offset = drrw->drr_offset;
2722
48fbb9dd
FG
2723 if (rwa->last_object > rwa->max_object)
2724 rwa->max_object = rwa->last_object;
2725
fcff0f35 2726 if (dmu_object_info(rwa->os, drrw->drr_object, NULL) != 0)
88904bb3
MA
2727 return (SET_ERROR(EINVAL));
2728
fcff0f35 2729 tx = dmu_tx_create(rwa->os);
34dc7c2f 2730 dmu_tx_hold_write(tx, drrw->drr_object,
2aa34383 2731 drrw->drr_offset, drrw->drr_logical_size);
34dc7c2f 2732 err = dmu_tx_assign(tx, TXG_WAIT);
13fe0198 2733 if (err != 0) {
34dc7c2f
BB
2734 dmu_tx_abort(tx);
2735 return (err);
2736 }
9b840763
TC
2737
2738 if (rwa->raw)
2739 VERIFY0(dmu_object_dirty_raw(rwa->os, drrw->drr_object, tx));
2740
b5256303
TC
2741 if (rwa->byteswap && !arc_is_encrypted(abuf) &&
2742 arc_get_compression(abuf) == ZIO_COMPRESS_OFF) {
9ae529ec
CS
2743 dmu_object_byteswap_t byteswap =
2744 DMU_OT_BYTESWAP(drrw->drr_type);
37f8a883 2745 dmu_ot_byteswap[byteswap].ob_func(abuf->b_data,
2aa34383 2746 DRR_WRITE_PAYLOAD_SIZE(drrw));
9ae529ec 2747 }
37f8a883 2748
440a3eb9
TC
2749 VERIFY0(dnode_hold(rwa->os, drrw->drr_object, FTAG, &dn));
2750 dmu_assign_arcbuf_by_dnode(dn, drrw->drr_offset, abuf, tx);
2751 dnode_rele(dn, FTAG);
47dfff3b
MA
2752
2753 /*
2754 * Note: If the receive fails, we want the resume stream to start
2755 * with the same record that we last successfully received (as opposed
2756 * to the next record), so that we can verify that we are
2757 * resuming from the correct location.
2758 */
2759 save_resume_state(rwa, drrw->drr_object, drrw->drr_offset, tx);
34dc7c2f 2760 dmu_tx_commit(tx);
47dfff3b 2761
34dc7c2f
BB
2762 return (0);
2763}
2764
428870ff
BB
2765/*
2766 * Handle a DRR_WRITE_BYREF record. This record is used in dedup'ed
2767 * streams to refer to a copy of the data that is already on the
2768 * system because it came in earlier in the stream. This function
2769 * finds the earlier copy of the data, and uses that copy instead of
2770 * data from the stream to fulfill this write.
2771 */
2772static int
fcff0f35
PD
2773receive_write_byref(struct receive_writer_arg *rwa,
2774 struct drr_write_byref *drrwbr)
428870ff
BB
2775{
2776 dmu_tx_t *tx;
2777 int err;
2778 guid_map_entry_t gmesrch;
2779 guid_map_entry_t *gmep;
9b67f605 2780 avl_index_t where;
428870ff 2781 objset_t *ref_os = NULL;
b5256303 2782 int flags = DMU_READ_PREFETCH;
428870ff
BB
2783 dmu_buf_t *dbp;
2784
2785 if (drrwbr->drr_offset + drrwbr->drr_length < drrwbr->drr_offset)
2e528b49 2786 return (SET_ERROR(EINVAL));
428870ff
BB
2787
2788 /*
2789 * If the GUID of the referenced dataset is different from the
2790 * GUID of the target dataset, find the referenced dataset.
2791 */
2792 if (drrwbr->drr_toguid != drrwbr->drr_refguid) {
2793 gmesrch.guid = drrwbr->drr_refguid;
fcff0f35 2794 if ((gmep = avl_find(rwa->guid_to_ds_map, &gmesrch,
428870ff 2795 &where)) == NULL) {
2e528b49 2796 return (SET_ERROR(EINVAL));
428870ff
BB
2797 }
2798 if (dmu_objset_from_ds(gmep->gme_ds, &ref_os))
2e528b49 2799 return (SET_ERROR(EINVAL));
428870ff 2800 } else {
fcff0f35 2801 ref_os = rwa->os;
428870ff
BB
2802 }
2803
48fbb9dd
FG
2804 if (drrwbr->drr_object > rwa->max_object)
2805 rwa->max_object = drrwbr->drr_object;
2806
9b840763 2807 if (rwa->raw)
b5256303 2808 flags |= DMU_READ_NO_DECRYPT;
b5256303
TC
2809
2810 /* may return either a regular db or an encrypted one */
c65aa5b2 2811 err = dmu_buf_hold(ref_os, drrwbr->drr_refobject,
b5256303 2812 drrwbr->drr_refoffset, FTAG, &dbp, flags);
9b67f605 2813 if (err != 0)
428870ff
BB
2814 return (err);
2815
fcff0f35 2816 tx = dmu_tx_create(rwa->os);
428870ff
BB
2817
2818 dmu_tx_hold_write(tx, drrwbr->drr_object,
2819 drrwbr->drr_offset, drrwbr->drr_length);
2820 err = dmu_tx_assign(tx, TXG_WAIT);
13fe0198 2821 if (err != 0) {
428870ff
BB
2822 dmu_tx_abort(tx);
2823 return (err);
2824 }
b5256303 2825
9b840763
TC
2826 if (rwa->raw) {
2827 VERIFY0(dmu_object_dirty_raw(rwa->os, drrwbr->drr_object, tx));
b5256303
TC
2828 dmu_copy_from_buf(rwa->os, drrwbr->drr_object,
2829 drrwbr->drr_offset, dbp, tx);
2830 } else {
2831 dmu_write(rwa->os, drrwbr->drr_object,
2832 drrwbr->drr_offset, drrwbr->drr_length, dbp->db_data, tx);
2833 }
428870ff 2834 dmu_buf_rele(dbp, FTAG);
47dfff3b
MA
2835
2836 /* See comment in restore_write. */
2837 save_resume_state(rwa, drrwbr->drr_object, drrwbr->drr_offset, tx);
428870ff
BB
2838 dmu_tx_commit(tx);
2839 return (0);
2840}
2841
9b67f605 2842static int
fcff0f35 2843receive_write_embedded(struct receive_writer_arg *rwa,
47dfff3b 2844 struct drr_write_embedded *drrwe, void *data)
9b67f605
MA
2845{
2846 dmu_tx_t *tx;
2847 int err;
9b67f605 2848
47dfff3b 2849 if (drrwe->drr_offset + drrwe->drr_length < drrwe->drr_offset)
ecb2b7dc 2850 return (SET_ERROR(EINVAL));
9b67f605 2851
47dfff3b 2852 if (drrwe->drr_psize > BPE_PAYLOAD_SIZE)
ecb2b7dc 2853 return (SET_ERROR(EINVAL));
9b67f605 2854
47dfff3b 2855 if (drrwe->drr_etype >= NUM_BP_EMBEDDED_TYPES)
ecb2b7dc 2856 return (SET_ERROR(EINVAL));
47dfff3b 2857 if (drrwe->drr_compression >= ZIO_COMPRESS_FUNCTIONS)
ecb2b7dc 2858 return (SET_ERROR(EINVAL));
440a3eb9
TC
2859 if (rwa->raw)
2860 return (SET_ERROR(EINVAL));
9b67f605 2861
48fbb9dd
FG
2862 if (drrwe->drr_object > rwa->max_object)
2863 rwa->max_object = drrwe->drr_object;
2864
fcff0f35 2865 tx = dmu_tx_create(rwa->os);
9b67f605 2866
47dfff3b
MA
2867 dmu_tx_hold_write(tx, drrwe->drr_object,
2868 drrwe->drr_offset, drrwe->drr_length);
9b67f605
MA
2869 err = dmu_tx_assign(tx, TXG_WAIT);
2870 if (err != 0) {
2871 dmu_tx_abort(tx);
2872 return (err);
2873 }
2874
47dfff3b
MA
2875 dmu_write_embedded(rwa->os, drrwe->drr_object,
2876 drrwe->drr_offset, data, drrwe->drr_etype,
2877 drrwe->drr_compression, drrwe->drr_lsize, drrwe->drr_psize,
fcff0f35 2878 rwa->byteswap ^ ZFS_HOST_BYTEORDER, tx);
9b67f605 2879
47dfff3b
MA
2880 /* See comment in restore_write. */
2881 save_resume_state(rwa, drrwe->drr_object, drrwe->drr_offset, tx);
9b67f605
MA
2882 dmu_tx_commit(tx);
2883 return (0);
2884}
2885
428870ff 2886static int
fcff0f35 2887receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs,
b5256303 2888 arc_buf_t *abuf)
428870ff
BB
2889{
2890 dmu_tx_t *tx;
428870ff
BB
2891 dmu_buf_t *db, *db_spill;
2892 int err;
2893
2894 if (drrs->drr_length < SPA_MINBLOCKSIZE ||
fcff0f35 2895 drrs->drr_length > spa_maxblocksize(dmu_objset_spa(rwa->os)))
2e528b49 2896 return (SET_ERROR(EINVAL));
428870ff 2897
9b840763 2898 if (rwa->raw) {
b5256303
TC
2899 if (!DMU_OT_IS_VALID(drrs->drr_type) ||
2900 drrs->drr_compressiontype >= ZIO_COMPRESS_FUNCTIONS ||
2901 drrs->drr_compressed_size == 0)
2902 return (SET_ERROR(EINVAL));
2903 }
2904
fcff0f35 2905 if (dmu_object_info(rwa->os, drrs->drr_object, NULL) != 0)
2e528b49 2906 return (SET_ERROR(EINVAL));
428870ff 2907
48fbb9dd
FG
2908 if (drrs->drr_object > rwa->max_object)
2909 rwa->max_object = drrs->drr_object;
2910
fcff0f35 2911 VERIFY0(dmu_bonus_hold(rwa->os, drrs->drr_object, FTAG, &db));
428870ff
BB
2912 if ((err = dmu_spill_hold_by_bonus(db, FTAG, &db_spill)) != 0) {
2913 dmu_buf_rele(db, FTAG);
2914 return (err);
2915 }
2916
fcff0f35 2917 tx = dmu_tx_create(rwa->os);
428870ff
BB
2918
2919 dmu_tx_hold_spill(tx, db->db_object);
2920
2921 err = dmu_tx_assign(tx, TXG_WAIT);
13fe0198 2922 if (err != 0) {
428870ff
BB
2923 dmu_buf_rele(db, FTAG);
2924 dmu_buf_rele(db_spill, FTAG);
2925 dmu_tx_abort(tx);
2926 return (err);
2927 }
047116ac 2928
ae76f45c 2929 if (rwa->raw) {
9b840763 2930 VERIFY0(dmu_object_dirty_raw(rwa->os, drrs->drr_object, tx));
ae76f45c
TC
2931 dmu_buf_will_change_crypt_params(db_spill, tx);
2932 } else {
2933 dmu_buf_will_dirty(db_spill, tx);
2934 }
428870ff
BB
2935
2936 if (db_spill->db_size < drrs->drr_length)
2937 VERIFY(0 == dbuf_spill_set_blksz(db_spill,
2938 drrs->drr_length, tx));
440a3eb9 2939 dbuf_assign_arcbuf((dmu_buf_impl_t *)db_spill, abuf, tx);
428870ff
BB
2940
2941 dmu_buf_rele(db, FTAG);
2942 dmu_buf_rele(db_spill, FTAG);
2943
2944 dmu_tx_commit(tx);
2945 return (0);
2946}
2947
34dc7c2f 2948/* ARGSUSED */
60948de1 2949noinline static int
fcff0f35 2950receive_free(struct receive_writer_arg *rwa, struct drr_free *drrf)
34dc7c2f 2951{
34dc7c2f
BB
2952 int err;
2953
ee45fbd8 2954 if (drrf->drr_length != DMU_OBJECT_END &&
34dc7c2f 2955 drrf->drr_offset + drrf->drr_length < drrf->drr_offset)
2e528b49 2956 return (SET_ERROR(EINVAL));
34dc7c2f 2957
fcff0f35 2958 if (dmu_object_info(rwa->os, drrf->drr_object, NULL) != 0)
2e528b49 2959 return (SET_ERROR(EINVAL));
34dc7c2f 2960
48fbb9dd
FG
2961 if (drrf->drr_object > rwa->max_object)
2962 rwa->max_object = drrf->drr_object;
2963
440a3eb9
TC
2964 if (rwa->raw) {
2965 err = dmu_free_long_range_raw(rwa->os, drrf->drr_object,
2966 drrf->drr_offset, drrf->drr_length);
2967 } else {
2968 err = dmu_free_long_range(rwa->os, drrf->drr_object,
2969 drrf->drr_offset, drrf->drr_length);
2970 }
fcff0f35 2971
34dc7c2f
BB
2972 return (err);
2973}
2974
b5256303
TC
2975static int
2976receive_object_range(struct receive_writer_arg *rwa,
2977 struct drr_object_range *drror)
2978{
2979 int ret;
2980 dmu_tx_t *tx;
2981 dnode_t *mdn = NULL;
2982 dmu_buf_t *db = NULL;
2983 uint64_t offset;
2984
2985 /*
2986 * By default, we assume this block is in our native format
2987 * (ZFS_HOST_BYTEORDER). We then take into account whether
2988 * the send stream is byteswapped (rwa->byteswap). Finally,
2989 * we need to byteswap again if this particular block was
2990 * in non-native format on the send side.
2991 */
2992 boolean_t byteorder = ZFS_HOST_BYTEORDER ^ rwa->byteswap ^
2993 !!DRR_IS_RAW_BYTESWAPPED(drror->drr_flags);
2994
2995 /*
2996 * Since dnode block sizes are constant, we should not need to worry
2997 * about making sure that the dnode block size is the same on the
2998 * sending and receiving sides for the time being. For non-raw sends,
2999 * this does not matter (and in fact we do not send a DRR_OBJECT_RANGE
3000 * record at all). Raw sends require this record type because the
3001 * encryption parameters are used to protect an entire block of bonus
3002 * buffers. If the size of dnode blocks ever becomes variable,
3003 * handling will need to be added to ensure that dnode block sizes
3004 * match on the sending and receiving side.
3005 */
3006 if (drror->drr_numslots != DNODES_PER_BLOCK ||
3007 P2PHASE(drror->drr_firstobj, DNODES_PER_BLOCK) != 0 ||
9b840763 3008 !rwa->raw)
b5256303
TC
3009 return (SET_ERROR(EINVAL));
3010
48fbb9dd
FG
3011 if (drror->drr_firstobj > rwa->max_object)
3012 rwa->max_object = drror->drr_firstobj;
3013
b5256303
TC
3014 offset = drror->drr_firstobj * sizeof (dnode_phys_t);
3015 mdn = DMU_META_DNODE(rwa->os);
3016
3017 tx = dmu_tx_create(rwa->os);
3018 ret = dmu_tx_assign(tx, TXG_WAIT);
3019 if (ret != 0) {
3020 dmu_tx_abort(tx);
3021 return (ret);
3022 }
3023
3024 ret = dmu_buf_hold_by_dnode(mdn, offset, FTAG, &db,
3025 DMU_READ_PREFETCH | DMU_READ_NO_DECRYPT);
3026 if (ret != 0) {
3027 dmu_tx_commit(tx);
3028 return (ret);
3029 }
3030
3031 /*
3032 * Convert the buffer associated with this range of dnodes to a
3033 * raw buffer. This ensures that it will be written out as a raw
3034 * buffer when we fill in the dnode objects in future records.
3035 * Since we are commiting this tx now, it is technically possible
3036 * for the dnode block to end up on-disk with the incorrect MAC.
3037 * Despite this, the dataset is marked as inconsistent so no other
3038 * code paths (apart from scrubs) will attempt to read this data.
3039 * Scrubs will not be effected by this either since scrubs only
3040 * read raw data and do not attempt to check the MAC.
3041 */
3042 dmu_convert_to_raw(db, byteorder, drror->drr_salt, drror->drr_iv,
3043 drror->drr_mac, tx);
3044 dmu_buf_rele(db, FTAG);
3045 dmu_tx_commit(tx);
3046 return (0);
3047}
3048
13fe0198
MA
3049/* used to destroy the drc_ds on error */
3050static void
3051dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc)
3052{
2637dda8 3053 dsl_dataset_t *ds = drc->drc_ds;
b5256303
TC
3054 ds_hold_flags_t dsflags = (drc->drc_raw) ? 0 : DS_HOLD_FLAG_DECRYPT;
3055
3056 /*
3057 * Wait for the txg sync before cleaning up the receive. For
3058 * resumable receives, this ensures that our resume state has
3059 * been written out to disk. For raw receives, this ensures
3060 * that the user accounting code will not attempt to do anything
3061 * after we stopped receiving the dataset.
3062 */
2637dda8 3063 txg_wait_synced(ds->ds_dir->dd_pool, 0);
b5256303 3064
2637dda8
TC
3065 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
3066 if (drc->drc_resumable && !BP_IS_HOLE(dsl_dataset_get_blkptr(ds))) {
3067 rrw_exit(&ds->ds_bp_rwlock, FTAG);
3068 dsl_dataset_disown(ds, dsflags, dmu_recv_tag);
47dfff3b 3069 } else {
eca7b760 3070 char name[ZFS_MAX_DATASET_NAME_LEN];
2637dda8
TC
3071 rrw_exit(&ds->ds_bp_rwlock, FTAG);
3072 dsl_dataset_name(ds, name);
3073 dsl_dataset_disown(ds, dsflags, dmu_recv_tag);
47dfff3b
MA
3074 (void) dsl_destroy_head(name);
3075 }
13fe0198
MA
3076}
3077
37f8a883 3078static void
fcff0f35 3079receive_cksum(struct receive_arg *ra, int len, void *buf)
37f8a883
MA
3080{
3081 if (ra->byteswap) {
a6255b7f 3082 (void) fletcher_4_incremental_byteswap(buf, len, &ra->cksum);
37f8a883 3083 } else {
a6255b7f 3084 (void) fletcher_4_incremental_native(buf, len, &ra->cksum);
37f8a883
MA
3085 }
3086}
3087
3088/*
fcff0f35
PD
3089 * Read the payload into a buffer of size len, and update the current record's
3090 * payload field.
3091 * Allocate ra->next_rrd and read the next record's header into
3092 * ra->next_rrd->header.
37f8a883
MA
3093 * Verify checksum of payload and next record.
3094 */
3095static int
fcff0f35 3096receive_read_payload_and_next_header(struct receive_arg *ra, int len, void *buf)
37f8a883
MA
3097{
3098 int err;
3099 zio_cksum_t cksum_orig;
3100 zio_cksum_t *cksump;
3101
3102 if (len != 0) {
fcff0f35 3103 ASSERT3U(len, <=, SPA_MAXBLOCKSIZE);
47dfff3b 3104 err = receive_read(ra, len, buf);
37f8a883
MA
3105 if (err != 0)
3106 return (err);
47dfff3b
MA
3107 receive_cksum(ra, len, buf);
3108
3109 /* note: rrd is NULL when reading the begin record's payload */
3110 if (ra->rrd != NULL) {
3111 ra->rrd->payload = buf;
3112 ra->rrd->payload_size = len;
3113 ra->rrd->bytes_read = ra->bytes_read;
3114 }
37f8a883
MA
3115 }
3116
3117 ra->prev_cksum = ra->cksum;
3118
fcff0f35
PD
3119 ra->next_rrd = kmem_zalloc(sizeof (*ra->next_rrd), KM_SLEEP);
3120 err = receive_read(ra, sizeof (ra->next_rrd->header),
3121 &ra->next_rrd->header);
47dfff3b 3122 ra->next_rrd->bytes_read = ra->bytes_read;
b5256303 3123
fcff0f35
PD
3124 if (err != 0) {
3125 kmem_free(ra->next_rrd, sizeof (*ra->next_rrd));
3126 ra->next_rrd = NULL;
37f8a883 3127 return (err);
fcff0f35
PD
3128 }
3129 if (ra->next_rrd->header.drr_type == DRR_BEGIN) {
3130 kmem_free(ra->next_rrd, sizeof (*ra->next_rrd));
3131 ra->next_rrd = NULL;
37f8a883 3132 return (SET_ERROR(EINVAL));
fcff0f35 3133 }
37f8a883
MA
3134
3135 /*
3136 * Note: checksum is of everything up to but not including the
3137 * checksum itself.
3138 */
3139 ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
3140 ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
fcff0f35 3141 receive_cksum(ra,
37f8a883 3142 offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
fcff0f35 3143 &ra->next_rrd->header);
37f8a883 3144
fcff0f35
PD
3145 cksum_orig = ra->next_rrd->header.drr_u.drr_checksum.drr_checksum;
3146 cksump = &ra->next_rrd->header.drr_u.drr_checksum.drr_checksum;
37f8a883
MA
3147
3148 if (ra->byteswap)
fcff0f35 3149 byteswap_record(&ra->next_rrd->header);
37f8a883
MA
3150
3151 if ((!ZIO_CHECKSUM_IS_ZERO(cksump)) &&
fcff0f35
PD
3152 !ZIO_CHECKSUM_EQUAL(ra->cksum, *cksump)) {
3153 kmem_free(ra->next_rrd, sizeof (*ra->next_rrd));
3154 ra->next_rrd = NULL;
37f8a883 3155 return (SET_ERROR(ECKSUM));
fcff0f35 3156 }
37f8a883 3157
fcff0f35 3158 receive_cksum(ra, sizeof (cksum_orig), &cksum_orig);
37f8a883
MA
3159
3160 return (0);
3161}
3162
e6d3a843
PD
3163static void
3164objlist_create(struct objlist *list)
3165{
3166 list_create(&list->list, sizeof (struct receive_objnode),
3167 offsetof(struct receive_objnode, node));
3168 list->last_lookup = 0;
3169}
3170
3171static void
3172objlist_destroy(struct objlist *list)
3173{
1c27024e 3174 for (struct receive_objnode *n = list_remove_head(&list->list);
e6d3a843
PD
3175 n != NULL; n = list_remove_head(&list->list)) {
3176 kmem_free(n, sizeof (*n));
3177 }
3178 list_destroy(&list->list);
3179}
3180
3181/*
3182 * This function looks through the objlist to see if the specified object number
3183 * is contained in the objlist. In the process, it will remove all object
3184 * numbers in the list that are smaller than the specified object number. Thus,
3185 * any lookup of an object number smaller than a previously looked up object
3186 * number will always return false; therefore, all lookups should be done in
3187 * ascending order.
3188 */
3189static boolean_t
3190objlist_exists(struct objlist *list, uint64_t object)
3191{
3192 struct receive_objnode *node = list_head(&list->list);
3193 ASSERT3U(object, >=, list->last_lookup);
3194 list->last_lookup = object;
3195 while (node != NULL && node->object < object) {
3196 VERIFY3P(node, ==, list_remove_head(&list->list));
3197 kmem_free(node, sizeof (*node));
3198 node = list_head(&list->list);
3199 }
3200 return (node != NULL && node->object == object);
3201}
3202
3203/*
3204 * The objlist is a list of object numbers stored in ascending order. However,
3205 * the insertion of new object numbers does not seek out the correct location to
3206 * store a new object number; instead, it appends it to the list for simplicity.
3207 * Thus, any users must take care to only insert new object numbers in ascending
3208 * order.
3209 */
3210static void
3211objlist_insert(struct objlist *list, uint64_t object)
3212{
3213 struct receive_objnode *node = kmem_zalloc(sizeof (*node), KM_SLEEP);
3214 node->object = object;
3215#ifdef ZFS_DEBUG
3216 {
3217 struct receive_objnode *last_object = list_tail(&list->list);
3218 uint64_t last_objnum = (last_object != NULL ? last_object->object : 0);
3219 ASSERT3U(node->object, >, last_objnum);
3220 }
3221#endif
3222 list_insert_tail(&list->list, node);
3223}
3224
fcff0f35
PD
3225/*
3226 * Issue the prefetch reads for any necessary indirect blocks.
3227 *
3228 * We use the object ignore list to tell us whether or not to issue prefetches
3229 * for a given object. We do this for both correctness (in case the blocksize
3230 * of an object has changed) and performance (if the object doesn't exist, don't
3231 * needlessly try to issue prefetches). We also trim the list as we go through
3232 * the stream to prevent it from growing to an unbounded size.
3233 *
3234 * The object numbers within will always be in sorted order, and any write
3235 * records we see will also be in sorted order, but they're not sorted with
3236 * respect to each other (i.e. we can get several object records before
3237 * receiving each object's write records). As a result, once we've reached a
3238 * given object number, we can safely remove any reference to lower object
3239 * numbers in the ignore list. In practice, we receive up to 32 object records
3240 * before receiving write records, so the list can have up to 32 nodes in it.
3241 */
3242/* ARGSUSED */
3243static void
3244receive_read_prefetch(struct receive_arg *ra,
3245 uint64_t object, uint64_t offset, uint64_t length)
3246{
e6d3a843 3247 if (!objlist_exists(&ra->ignore_objlist, object)) {
fcff0f35
PD
3248 dmu_prefetch(ra->os, object, 1, offset, length,
3249 ZIO_PRIORITY_SYNC_READ);
3250 }
3251}
3252
3253/*
3254 * Read records off the stream, issuing any necessary prefetches.
3255 */
37f8a883 3256static int
fcff0f35 3257receive_read_record(struct receive_arg *ra)
37f8a883
MA
3258{
3259 int err;
3260
fcff0f35 3261 switch (ra->rrd->header.drr_type) {
37f8a883
MA
3262 case DRR_OBJECT:
3263 {
fcff0f35 3264 struct drr_object *drro = &ra->rrd->header.drr_u.drr_object;
b5256303 3265 uint32_t size = DRR_OBJECT_PAYLOAD_SIZE(drro);
fcff0f35
PD
3266 void *buf = kmem_zalloc(size, KM_SLEEP);
3267 dmu_object_info_t doi;
b5256303 3268
fcff0f35
PD
3269 err = receive_read_payload_and_next_header(ra, size, buf);
3270 if (err != 0) {
3271 kmem_free(buf, size);
37f8a883 3272 return (err);
fcff0f35
PD
3273 }
3274 err = dmu_object_info(ra->os, drro->drr_object, &doi);
3275 /*
3276 * See receive_read_prefetch for an explanation why we're
3277 * storing this object in the ignore_obj_list.
3278 */
047116ac 3279 if (err == ENOENT || err == EEXIST ||
fcff0f35 3280 (err == 0 && doi.doi_data_block_size != drro->drr_blksz)) {
e6d3a843 3281 objlist_insert(&ra->ignore_objlist, drro->drr_object);
fcff0f35
PD
3282 err = 0;
3283 }
3284 return (err);
37f8a883
MA
3285 }
3286 case DRR_FREEOBJECTS:
3287 {
fcff0f35
PD
3288 err = receive_read_payload_and_next_header(ra, 0, NULL);
3289 return (err);
37f8a883
MA
3290 }
3291 case DRR_WRITE:
3292 {
fcff0f35 3293 struct drr_write *drrw = &ra->rrd->header.drr_u.drr_write;
2aa34383
DK
3294 arc_buf_t *abuf;
3295 boolean_t is_meta = DMU_OT_IS_METADATA(drrw->drr_type);
b5256303 3296
9b840763 3297 if (ra->raw) {
b5256303
TC
3298 boolean_t byteorder = ZFS_HOST_BYTEORDER ^
3299 !!DRR_IS_RAW_BYTESWAPPED(drrw->drr_flags) ^
3300 ra->byteswap;
3301
3302 abuf = arc_loan_raw_buf(dmu_objset_spa(ra->os),
3303 drrw->drr_object, byteorder, drrw->drr_salt,
3304 drrw->drr_iv, drrw->drr_mac, drrw->drr_type,
3305 drrw->drr_compressed_size, drrw->drr_logical_size,
3306 drrw->drr_compressiontype);
3307 } else if (DRR_WRITE_COMPRESSED(drrw)) {
2aa34383
DK
3308 ASSERT3U(drrw->drr_compressed_size, >, 0);
3309 ASSERT3U(drrw->drr_logical_size, >=,
3310 drrw->drr_compressed_size);
3311 ASSERT(!is_meta);
3312 abuf = arc_loan_compressed_buf(
3313 dmu_objset_spa(ra->os),
3314 drrw->drr_compressed_size, drrw->drr_logical_size,
3315 drrw->drr_compressiontype);
3316 } else {
3317 abuf = arc_loan_buf(dmu_objset_spa(ra->os),
3318 is_meta, drrw->drr_logical_size);
3319 }
37f8a883 3320
fcff0f35 3321 err = receive_read_payload_and_next_header(ra,
2aa34383 3322 DRR_WRITE_PAYLOAD_SIZE(drrw), abuf->b_data);
fcff0f35 3323 if (err != 0) {
37f8a883 3324 dmu_return_arcbuf(abuf);
fcff0f35
PD
3325 return (err);
3326 }
b5256303 3327 ra->rrd->arc_buf = abuf;
fcff0f35 3328 receive_read_prefetch(ra, drrw->drr_object, drrw->drr_offset,
2aa34383 3329 drrw->drr_logical_size);
37f8a883
MA
3330 return (err);
3331 }
3332 case DRR_WRITE_BYREF:
3333 {
fcff0f35
PD
3334 struct drr_write_byref *drrwb =
3335 &ra->rrd->header.drr_u.drr_write_byref;
3336 err = receive_read_payload_and_next_header(ra, 0, NULL);
3337 receive_read_prefetch(ra, drrwb->drr_object, drrwb->drr_offset,
3338 drrwb->drr_length);
3339 return (err);
37f8a883
MA
3340 }
3341 case DRR_WRITE_EMBEDDED:
3342 {
3343 struct drr_write_embedded *drrwe =
fcff0f35
PD
3344 &ra->rrd->header.drr_u.drr_write_embedded;
3345 uint32_t size = P2ROUNDUP(drrwe->drr_psize, 8);
3346 void *buf = kmem_zalloc(size, KM_SLEEP);
3347
3348 err = receive_read_payload_and_next_header(ra, size, buf);
3349 if (err != 0) {
3350 kmem_free(buf, size);
37f8a883 3351 return (err);
fcff0f35
PD
3352 }
3353
3354 receive_read_prefetch(ra, drrwe->drr_object, drrwe->drr_offset,
3355 drrwe->drr_length);
3356 return (err);
37f8a883
MA
3357 }
3358 case DRR_FREE:
3359 {
fcff0f35
PD
3360 /*
3361 * It might be beneficial to prefetch indirect blocks here, but
3362 * we don't really have the data to decide for sure.
3363 */
3364 err = receive_read_payload_and_next_header(ra, 0, NULL);
3365 return (err);
37f8a883
MA
3366 }
3367 case DRR_END:
3368 {
fcff0f35 3369 struct drr_end *drre = &ra->rrd->header.drr_u.drr_end;
37f8a883 3370 if (!ZIO_CHECKSUM_EQUAL(ra->prev_cksum, drre->drr_checksum))
47dfff3b 3371 return (SET_ERROR(ECKSUM));
37f8a883
MA
3372 return (0);
3373 }
3374 case DRR_SPILL:
3375 {
fcff0f35 3376 struct drr_spill *drrs = &ra->rrd->header.drr_u.drr_spill;
b5256303
TC
3377 arc_buf_t *abuf;
3378 int len = DRR_SPILL_PAYLOAD_SIZE(drrs);
3379
3380 /* DRR_SPILL records are either raw or uncompressed */
9b840763 3381 if (ra->raw) {
b5256303
TC
3382 boolean_t byteorder = ZFS_HOST_BYTEORDER ^
3383 !!DRR_IS_RAW_BYTESWAPPED(drrs->drr_flags) ^
3384 ra->byteswap;
3385
3386 abuf = arc_loan_raw_buf(dmu_objset_spa(ra->os),
3387 drrs->drr_object, byteorder, drrs->drr_salt,
3388 drrs->drr_iv, drrs->drr_mac, drrs->drr_type,
3389 drrs->drr_compressed_size, drrs->drr_length,
3390 drrs->drr_compressiontype);
3391 } else {
3392 abuf = arc_loan_buf(dmu_objset_spa(ra->os),
3393 DMU_OT_IS_METADATA(drrs->drr_type),
3394 drrs->drr_length);
3395 }
3396
3397 err = receive_read_payload_and_next_header(ra, len,
3398 abuf->b_data);
3399 if (err != 0) {
3400 dmu_return_arcbuf(abuf);
3401 return (err);
3402 }
3403 ra->rrd->arc_buf = abuf;
3404 return (err);
3405 }
3406 case DRR_OBJECT_RANGE:
3407 {
3408 err = receive_read_payload_and_next_header(ra, 0, NULL);
fcff0f35
PD
3409 return (err);
3410 }
3411 default:
3412 return (SET_ERROR(EINVAL));
3413 }
3414}
3415
6a8ee4f7
NB
3416static void
3417dprintf_drr(struct receive_record_arg *rrd, int err)
3418{
3419 switch (rrd->header.drr_type) {
3420 case DRR_OBJECT:
3421 {
3422 struct drr_object *drro = &rrd->header.drr_u.drr_object;
3423 dprintf("drr_type = OBJECT obj = %llu type = %u "
3424 "bonustype = %u blksz = %u bonuslen = %u cksumtype = %u "
3425 "compress = %u dn_slots = %u err = %d\n",
3426 drro->drr_object, drro->drr_type, drro->drr_bonustype,
3427 drro->drr_blksz, drro->drr_bonuslen,
3428 drro->drr_checksumtype, drro->drr_compress,
3429 drro->drr_dn_slots, err);
3430 break;
3431 }
3432 case DRR_FREEOBJECTS:
3433 {
3434 struct drr_freeobjects *drrfo =
3435 &rrd->header.drr_u.drr_freeobjects;
3436 dprintf("drr_type = FREEOBJECTS firstobj = %llu "
3437 "numobjs = %llu err = %d\n",
3438 drrfo->drr_firstobj, drrfo->drr_numobjs, err);
3439 break;
3440 }
3441 case DRR_WRITE:
3442 {
3443 struct drr_write *drrw = &rrd->header.drr_u.drr_write;
3444 dprintf("drr_type = WRITE obj = %llu type = %u offset = %llu "
3445 "lsize = %llu cksumtype = %u cksumflags = %u "
3446 "compress = %u psize = %llu err = %d\n",
3447 drrw->drr_object, drrw->drr_type, drrw->drr_offset,
3448 drrw->drr_logical_size, drrw->drr_checksumtype,
b5256303 3449 drrw->drr_flags, drrw->drr_compressiontype,
6a8ee4f7
NB
3450 drrw->drr_compressed_size, err);
3451 break;
3452 }
3453 case DRR_WRITE_BYREF:
3454 {
3455 struct drr_write_byref *drrwbr =
3456 &rrd->header.drr_u.drr_write_byref;
3457 dprintf("drr_type = WRITE_BYREF obj = %llu offset = %llu "
3458 "length = %llu toguid = %llx refguid = %llx "
3459 "refobject = %llu refoffset = %llu cksumtype = %u "
3460 "cksumflags = %u err = %d\n",
3461 drrwbr->drr_object, drrwbr->drr_offset,
3462 drrwbr->drr_length, drrwbr->drr_toguid,
3463 drrwbr->drr_refguid, drrwbr->drr_refobject,
3464 drrwbr->drr_refoffset, drrwbr->drr_checksumtype,
b5256303 3465 drrwbr->drr_flags, err);
6a8ee4f7
NB
3466 break;
3467 }
3468 case DRR_WRITE_EMBEDDED:
3469 {
3470 struct drr_write_embedded *drrwe =
3471 &rrd->header.drr_u.drr_write_embedded;
3472 dprintf("drr_type = WRITE_EMBEDDED obj = %llu offset = %llu "
3473 "length = %llu compress = %u etype = %u lsize = %u "
3474 "psize = %u err = %d\n",
3475 drrwe->drr_object, drrwe->drr_offset, drrwe->drr_length,
3476 drrwe->drr_compression, drrwe->drr_etype,
3477 drrwe->drr_lsize, drrwe->drr_psize, err);
3478 break;
3479 }
3480 case DRR_FREE:
3481 {
3482 struct drr_free *drrf = &rrd->header.drr_u.drr_free;
3483 dprintf("drr_type = FREE obj = %llu offset = %llu "
3484 "length = %lld err = %d\n",
3485 drrf->drr_object, drrf->drr_offset, drrf->drr_length,
3486 err);
3487 break;
3488 }
3489 case DRR_SPILL:
3490 {
3491 struct drr_spill *drrs = &rrd->header.drr_u.drr_spill;
3492 dprintf("drr_type = SPILL obj = %llu length = %llu "
3493 "err = %d\n", drrs->drr_object, drrs->drr_length, err);
3494 break;
3495 }
3496 default:
3497 return;
3498 }
3499}
3500
fcff0f35
PD
3501/*
3502 * Commit the records to the pool.
3503 */
3504static int
3505receive_process_record(struct receive_writer_arg *rwa,
3506 struct receive_record_arg *rrd)
3507{
3508 int err;
3509
47dfff3b
MA
3510 /* Processing in order, therefore bytes_read should be increasing. */
3511 ASSERT3U(rrd->bytes_read, >=, rwa->bytes_read);
3512 rwa->bytes_read = rrd->bytes_read;
3513
fcff0f35
PD
3514 switch (rrd->header.drr_type) {
3515 case DRR_OBJECT:
3516 {
3517 struct drr_object *drro = &rrd->header.drr_u.drr_object;
3518 err = receive_object(rwa, drro, rrd->payload);
3519 kmem_free(rrd->payload, rrd->payload_size);
3520 rrd->payload = NULL;
6a8ee4f7 3521 break;
fcff0f35
PD
3522 }
3523 case DRR_FREEOBJECTS:
3524 {
3525 struct drr_freeobjects *drrfo =
3526 &rrd->header.drr_u.drr_freeobjects;
6a8ee4f7
NB
3527 err = receive_freeobjects(rwa, drrfo);
3528 break;
fcff0f35
PD
3529 }
3530 case DRR_WRITE:
3531 {
3532 struct drr_write *drrw = &rrd->header.drr_u.drr_write;
b5256303 3533 err = receive_write(rwa, drrw, rrd->arc_buf);
fcff0f35
PD
3534 /* if receive_write() is successful, it consumes the arc_buf */
3535 if (err != 0)
b5256303
TC
3536 dmu_return_arcbuf(rrd->arc_buf);
3537 rrd->arc_buf = NULL;
fcff0f35 3538 rrd->payload = NULL;
6a8ee4f7 3539 break;
fcff0f35
PD
3540 }
3541 case DRR_WRITE_BYREF:
3542 {
3543 struct drr_write_byref *drrwbr =
3544 &rrd->header.drr_u.drr_write_byref;
6a8ee4f7
NB
3545 err = receive_write_byref(rwa, drrwbr);
3546 break;
fcff0f35
PD
3547 }
3548 case DRR_WRITE_EMBEDDED:
3549 {
3550 struct drr_write_embedded *drrwe =
3551 &rrd->header.drr_u.drr_write_embedded;
3552 err = receive_write_embedded(rwa, drrwe, rrd->payload);
3553 kmem_free(rrd->payload, rrd->payload_size);
3554 rrd->payload = NULL;
6a8ee4f7 3555 break;
fcff0f35
PD
3556 }
3557 case DRR_FREE:
3558 {
3559 struct drr_free *drrf = &rrd->header.drr_u.drr_free;
6a8ee4f7
NB
3560 err = receive_free(rwa, drrf);
3561 break;
fcff0f35
PD
3562 }
3563 case DRR_SPILL:
3564 {
3565 struct drr_spill *drrs = &rrd->header.drr_u.drr_spill;
b5256303
TC
3566 err = receive_spill(rwa, drrs, rrd->arc_buf);
3567 /* if receive_spill() is successful, it consumes the arc_buf */
3568 if (err != 0)
3569 dmu_return_arcbuf(rrd->arc_buf);
3570 rrd->arc_buf = NULL;
fcff0f35 3571 rrd->payload = NULL;
6a8ee4f7 3572 break;
37f8a883 3573 }
b5256303
TC
3574 case DRR_OBJECT_RANGE:
3575 {
3576 struct drr_object_range *drror =
3577 &rrd->header.drr_u.drr_object_range;
3578 return (receive_object_range(rwa, drror));
3579 }
37f8a883
MA
3580 default:
3581 return (SET_ERROR(EINVAL));
3582 }
6a8ee4f7
NB
3583
3584 if (err != 0)
3585 dprintf_drr(rrd, err);
3586
3587 return (err);
37f8a883
MA
3588}
3589
34dc7c2f 3590/*
fcff0f35
PD
3591 * dmu_recv_stream's worker thread; pull records off the queue, and then call
3592 * receive_process_record When we're done, signal the main thread and exit.
3593 */
3594static void
3595receive_writer_thread(void *arg)
3596{
3597 struct receive_writer_arg *rwa = arg;
3598 struct receive_record_arg *rrd;
3e635ac1
TC
3599 fstrans_cookie_t cookie = spl_fstrans_mark();
3600
fcff0f35
PD
3601 for (rrd = bqueue_dequeue(&rwa->q); !rrd->eos_marker;
3602 rrd = bqueue_dequeue(&rwa->q)) {
3603 /*
3604 * If there's an error, the main thread will stop putting things
3605 * on the queue, but we need to clear everything in it before we
3606 * can exit.
3607 */
3608 if (rwa->err == 0) {
3609 rwa->err = receive_process_record(rwa, rrd);
b5256303
TC
3610 } else if (rrd->arc_buf != NULL) {
3611 dmu_return_arcbuf(rrd->arc_buf);
3612 rrd->arc_buf = NULL;
fcff0f35
PD
3613 rrd->payload = NULL;
3614 } else if (rrd->payload != NULL) {
3615 kmem_free(rrd->payload, rrd->payload_size);
3616 rrd->payload = NULL;
3617 }
3618 kmem_free(rrd, sizeof (*rrd));
3619 }
3620 kmem_free(rrd, sizeof (*rrd));
3621 mutex_enter(&rwa->mutex);
3622 rwa->done = B_TRUE;
3623 cv_signal(&rwa->cv);
3624 mutex_exit(&rwa->mutex);
3e635ac1 3625 spl_fstrans_unmark(cookie);
34a6b428 3626 thread_exit();
fcff0f35
PD
3627}
3628
47dfff3b
MA
3629static int
3630resume_check(struct receive_arg *ra, nvlist_t *begin_nvl)
3631{
3632 uint64_t val;
3633 objset_t *mos = dmu_objset_pool(ra->os)->dp_meta_objset;
3634 uint64_t dsobj = dmu_objset_id(ra->os);
3635 uint64_t resume_obj, resume_off;
3636
3637 if (nvlist_lookup_uint64(begin_nvl,
3638 "resume_object", &resume_obj) != 0 ||
3639 nvlist_lookup_uint64(begin_nvl,
3640 "resume_offset", &resume_off) != 0) {
3641 return (SET_ERROR(EINVAL));
3642 }
3643 VERIFY0(zap_lookup(mos, dsobj,
3644 DS_FIELD_RESUME_OBJECT, sizeof (val), 1, &val));
3645 if (resume_obj != val)
3646 return (SET_ERROR(EINVAL));
3647 VERIFY0(zap_lookup(mos, dsobj,
3648 DS_FIELD_RESUME_OFFSET, sizeof (val), 1, &val));
3649 if (resume_off != val)
3650 return (SET_ERROR(EINVAL));
3651
3652 return (0);
3653}
3654
fcff0f35
PD
3655/*
3656 * Read in the stream's records, one by one, and apply them to the pool. There
3657 * are two threads involved; the thread that calls this function will spin up a
3658 * worker thread, read the records off the stream one by one, and issue
3659 * prefetches for any necessary indirect blocks. It will then push the records
3660 * onto an internal blocking queue. The worker thread will pull the records off
3661 * the queue, and actually write the data into the DMU. This way, the worker
3662 * thread doesn't have to wait for reads to complete, since everything it needs
3663 * (the indirect blocks) will be prefetched.
3664 *
34dc7c2f
BB
3665 * NB: callers *must* call dmu_recv_end() if this succeeds.
3666 */
3667int
572e2857
BB
3668dmu_recv_stream(dmu_recv_cookie_t *drc, vnode_t *vp, offset_t *voffp,
3669 int cleanup_fd, uint64_t *action_handlep)
34dc7c2f 3670{
37f8a883 3671 int err = 0;
04bc4610
NB
3672 struct receive_arg *ra;
3673 struct receive_writer_arg *rwa;
428870ff 3674 int featureflags;
47dfff3b
MA
3675 uint32_t payloadlen;
3676 void *payload;
3677 nvlist_t *begin_nvl = NULL;
34dc7c2f 3678
04bc4610
NB
3679 ra = kmem_zalloc(sizeof (*ra), KM_SLEEP);
3680 rwa = kmem_zalloc(sizeof (*rwa), KM_SLEEP);
3681
3682 ra->byteswap = drc->drc_byteswap;
9b840763 3683 ra->raw = drc->drc_raw;
04bc4610
NB
3684 ra->cksum = drc->drc_cksum;
3685 ra->vp = vp;
3686 ra->voff = *voffp;
47dfff3b
MA
3687
3688 if (dsl_dataset_is_zapified(drc->drc_ds)) {
3689 (void) zap_lookup(drc->drc_ds->ds_dir->dd_pool->dp_meta_objset,
3690 drc->drc_ds->ds_object, DS_FIELD_RESUME_BYTES,
3691 sizeof (ra->bytes_read), 1, &ra->bytes_read);
3692 }
3693
e6d3a843 3694 objlist_create(&ra->ignore_objlist);
34dc7c2f
BB
3695
3696 /* these were verified in dmu_recv_begin */
13fe0198 3697 ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo), ==,
428870ff 3698 DMU_SUBSTREAM);
13fe0198 3699 ASSERT3U(drc->drc_drrb->drr_type, <, DMU_OST_NUMTYPES);
34dc7c2f
BB
3700
3701 /*
3702 * Open the objset we are modifying.
3703 */
04bc4610 3704 VERIFY0(dmu_objset_from_ds(drc->drc_ds, &ra->os));
34dc7c2f 3705
d683ddbb 3706 ASSERT(dsl_dataset_phys(drc->drc_ds)->ds_flags & DS_FLAG_INCONSISTENT);
34dc7c2f 3707
428870ff 3708 featureflags = DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo);
b5256303 3709 ra->featureflags = featureflags;
428870ff 3710
9b840763
TC
3711 /* embedded data is incompatible with encrypted datasets */
3712 if (ra->os->os_encrypted &&
3713 (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)) {
3714 err = SET_ERROR(EINVAL);
3715 goto out;
3716 }
3717
428870ff
BB
3718 /* if this stream is dedup'ed, set up the avl tree for guid mapping */
3719 if (featureflags & DMU_BACKUP_FEATURE_DEDUP) {
572e2857
BB
3720 minor_t minor;
3721
3722 if (cleanup_fd == -1) {
9b840763 3723 err = SET_ERROR(EBADF);
572e2857
BB
3724 goto out;
3725 }
9b840763
TC
3726 err = zfs_onexit_fd_hold(cleanup_fd, &minor);
3727 if (err != 0) {
572e2857
BB
3728 cleanup_fd = -1;
3729 goto out;
3730 }
3731
3732 if (*action_handlep == 0) {
04bc4610 3733 rwa->guid_to_ds_map =
572e2857 3734 kmem_alloc(sizeof (avl_tree_t), KM_SLEEP);
04bc4610 3735 avl_create(rwa->guid_to_ds_map, guid_compare,
572e2857
BB
3736 sizeof (guid_map_entry_t),
3737 offsetof(guid_map_entry_t, avlnode));
37f8a883 3738 err = zfs_onexit_add_cb(minor,
04bc4610 3739 free_guid_map_onexit, rwa->guid_to_ds_map,
572e2857 3740 action_handlep);
9b840763 3741 if (err != 0)
572e2857
BB
3742 goto out;
3743 } else {
37f8a883 3744 err = zfs_onexit_cb_data(minor, *action_handlep,
04bc4610 3745 (void **)&rwa->guid_to_ds_map);
9b840763 3746 if (err != 0)
572e2857
BB
3747 goto out;
3748 }
8d35c149 3749
04bc4610 3750 drc->drc_guid_to_ds_map = rwa->guid_to_ds_map;
428870ff
BB
3751 }
3752
47dfff3b
MA
3753 payloadlen = drc->drc_drr_begin->drr_payloadlen;
3754 payload = NULL;
3755 if (payloadlen != 0)
3756 payload = kmem_alloc(payloadlen, KM_SLEEP);
3757
3758 err = receive_read_payload_and_next_header(ra, payloadlen, payload);
3759 if (err != 0) {
3760 if (payloadlen != 0)
3761 kmem_free(payload, payloadlen);
37f8a883 3762 goto out;
47dfff3b
MA
3763 }
3764 if (payloadlen != 0) {
3765 err = nvlist_unpack(payload, payloadlen, &begin_nvl, KM_SLEEP);
3766 kmem_free(payload, payloadlen);
3767 if (err != 0)
3768 goto out;
3769 }
3770
b5256303
TC
3771 /* handle DSL encryption key payload */
3772 if (featureflags & DMU_BACKUP_FEATURE_RAW) {
3773 nvlist_t *keynvl = NULL;
3774
3775 ASSERT(ra->os->os_encrypted);
3776 ASSERT(drc->drc_raw);
3777
3778 err = nvlist_lookup_nvlist(begin_nvl, "crypt_keydata", &keynvl);
3779 if (err != 0)
3780 goto out;
3781
3782 err = dsl_crypto_recv_key(spa_name(ra->os->os_spa),
3783 drc->drc_ds->ds_object, drc->drc_drrb->drr_type,
3784 keynvl);
3785 if (err != 0)
3786 goto out;
3787 }
3788
47dfff3b
MA
3789 if (featureflags & DMU_BACKUP_FEATURE_RESUMING) {
3790 err = resume_check(ra, begin_nvl);
3791 if (err != 0)
3792 goto out;
3793 }
37f8a883 3794
04bc4610 3795 (void) bqueue_init(&rwa->q, zfs_recv_queue_length,
fcff0f35 3796 offsetof(struct receive_record_arg, node));
04bc4610
NB
3797 cv_init(&rwa->cv, NULL, CV_DEFAULT, NULL);
3798 mutex_init(&rwa->mutex, NULL, MUTEX_DEFAULT, NULL);
3799 rwa->os = ra->os;
3800 rwa->byteswap = drc->drc_byteswap;
47dfff3b 3801 rwa->resumable = drc->drc_resumable;
9b840763 3802 rwa->raw = drc->drc_raw;
fcff0f35 3803
04bc4610 3804 (void) thread_create(NULL, 0, receive_writer_thread, rwa, 0, curproc,
fcff0f35
PD
3805 TS_RUN, minclsyspri);
3806 /*
04bc4610 3807 * We're reading rwa->err without locks, which is safe since we are the
fcff0f35
PD
3808 * only reader, and the worker thread is the only writer. It's ok if we
3809 * miss a write for an iteration or two of the loop, since the writer
3810 * thread will keep freeing records we send it until we send it an eos
3811 * marker.
3812 *
04bc4610 3813 * We can leave this loop in 3 ways: First, if rwa->err is
fcff0f35
PD
3814 * non-zero. In that case, the writer thread will free the rrd we just
3815 * pushed. Second, if we're interrupted; in that case, either it's the
47dfff3b 3816 * first loop and ra->rrd was never allocated, or it's later and ra->rrd
fcff0f35
PD
3817 * has been handed off to the writer thread who will free it. Finally,
3818 * if receive_read_record fails or we're at the end of the stream, then
04bc4610 3819 * we free ra->rrd and exit.
fcff0f35 3820 */
04bc4610 3821 while (rwa->err == 0) {
34dc7c2f 3822 if (issig(JUSTLOOKING) && issig(FORREAL)) {
37f8a883
MA
3823 err = SET_ERROR(EINTR);
3824 break;
34dc7c2f
BB
3825 }
3826
04bc4610
NB
3827 ASSERT3P(ra->rrd, ==, NULL);
3828 ra->rrd = ra->next_rrd;
3829 ra->next_rrd = NULL;
3830 /* Allocates and loads header into ra->next_rrd */
3831 err = receive_read_record(ra);
34dc7c2f 3832
04bc4610
NB
3833 if (ra->rrd->header.drr_type == DRR_END || err != 0) {
3834 kmem_free(ra->rrd, sizeof (*ra->rrd));
3835 ra->rrd = NULL;
428870ff 3836 break;
fcff0f35
PD
3837 }
3838
04bc4610
NB
3839 bqueue_enqueue(&rwa->q, ra->rrd,
3840 sizeof (struct receive_record_arg) + ra->rrd->payload_size);
3841 ra->rrd = NULL;
fcff0f35 3842 }
04bc4610
NB
3843 if (ra->next_rrd == NULL)
3844 ra->next_rrd = kmem_zalloc(sizeof (*ra->next_rrd), KM_SLEEP);
3845 ra->next_rrd->eos_marker = B_TRUE;
3846 bqueue_enqueue(&rwa->q, ra->next_rrd, 1);
fcff0f35 3847
04bc4610
NB
3848 mutex_enter(&rwa->mutex);
3849 while (!rwa->done) {
3850 cv_wait(&rwa->cv, &rwa->mutex);
34dc7c2f 3851 }
04bc4610 3852 mutex_exit(&rwa->mutex);
fcff0f35 3853
48fbb9dd
FG
3854 /*
3855 * If we are receiving a full stream as a clone, all object IDs which
3856 * are greater than the maximum ID referenced in the stream are
3857 * by definition unused and must be freed.
3858 */
3859 if (drc->drc_clone && drc->drc_drrb->drr_fromguid == 0) {
3860 uint64_t obj = rwa->max_object + 1;
3861 int free_err = 0;
3862 int next_err = 0;
3863
3864 while (next_err == 0) {
ae76f45c
TC
3865 if (drc->drc_raw) {
3866 free_err = dmu_free_long_object_raw(rwa->os,
3867 obj);
3868 } else {
3869 free_err = dmu_free_long_object(rwa->os, obj);
3870 }
48fbb9dd
FG
3871 if (free_err != 0 && free_err != ENOENT)
3872 break;
3873
3874 next_err = dmu_object_next(rwa->os, &obj, FALSE, 0);
3875 }
3876
3877 if (err == 0) {
3878 if (free_err != 0 && free_err != ENOENT)
3879 err = free_err;
3880 else if (next_err != ESRCH)
3881 err = next_err;
3882 }
3883 }
3884
04bc4610
NB
3885 cv_destroy(&rwa->cv);
3886 mutex_destroy(&rwa->mutex);
3887 bqueue_destroy(&rwa->q);
fcff0f35 3888 if (err == 0)
04bc4610 3889 err = rwa->err;
34dc7c2f
BB
3890
3891out:
47dfff3b 3892 nvlist_free(begin_nvl);
572e2857
BB
3893 if ((featureflags & DMU_BACKUP_FEATURE_DEDUP) && (cleanup_fd != -1))
3894 zfs_onexit_fd_rele(cleanup_fd);
3895
37f8a883 3896 if (err != 0) {
34dc7c2f 3897 /*
47dfff3b
MA
3898 * Clean up references. If receive is not resumable,
3899 * destroy what we created, so we don't leave it in
3900 * the inconsistent state.
34dc7c2f 3901 */
13fe0198 3902 dmu_recv_cleanup_ds(drc);
34dc7c2f
BB
3903 }
3904
04bc4610 3905 *voffp = ra->voff;
e6d3a843 3906 objlist_destroy(&ra->ignore_objlist);
04bc4610
NB
3907 kmem_free(ra, sizeof (*ra));
3908 kmem_free(rwa, sizeof (*rwa));
37f8a883 3909 return (err);
34dc7c2f
BB
3910}
3911
34dc7c2f 3912static int
13fe0198 3913dmu_recv_end_check(void *arg, dmu_tx_t *tx)
34dc7c2f 3914{
13fe0198
MA
3915 dmu_recv_cookie_t *drc = arg;
3916 dsl_pool_t *dp = dmu_tx_pool(tx);
3917 int error;
3918
3919 ASSERT3P(drc->drc_ds->ds_owner, ==, dmu_recv_tag);
3920
3921 if (!drc->drc_newfs) {
3922 dsl_dataset_t *origin_head;
3923
3924 error = dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head);
3925 if (error != 0)
3926 return (error);
19580676
MA
3927 if (drc->drc_force) {
3928 /*
3929 * We will destroy any snapshots in tofs (i.e. before
3930 * origin_head) that are after the origin (which is
3931 * the snap before drc_ds, because drc_ds can not
3932 * have any snaps of its own).
3933 */
d683ddbb
JG
3934 uint64_t obj;
3935
3936 obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
3937 while (obj !=
3938 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
19580676
MA
3939 dsl_dataset_t *snap;
3940 error = dsl_dataset_hold_obj(dp, obj, FTAG,
3941 &snap);
3942 if (error != 0)
b6640117 3943 break;
19580676
MA
3944 if (snap->ds_dir != origin_head->ds_dir)
3945 error = SET_ERROR(EINVAL);
3946 if (error == 0) {
3947 error = dsl_destroy_snapshot_check_impl(
3948 snap, B_FALSE);
3949 }
d683ddbb 3950 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
19580676
MA
3951 dsl_dataset_rele(snap, FTAG);
3952 if (error != 0)
b6640117
AG
3953 break;
3954 }
3955 if (error != 0) {
3956 dsl_dataset_rele(origin_head, FTAG);
3957 return (error);
19580676
MA
3958 }
3959 }
13fe0198 3960 error = dsl_dataset_clone_swap_check_impl(drc->drc_ds,
831baf06 3961 origin_head, drc->drc_force, drc->drc_owner, tx);
13fe0198
MA
3962 if (error != 0) {
3963 dsl_dataset_rele(origin_head, FTAG);
3964 return (error);
3965 }
3966 error = dsl_dataset_snapshot_check_impl(origin_head,
788eb90c 3967 drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred);
13fe0198
MA
3968 dsl_dataset_rele(origin_head, FTAG);
3969 if (error != 0)
3970 return (error);
34dc7c2f 3971
13fe0198
MA
3972 error = dsl_destroy_head_check_impl(drc->drc_ds, 1);
3973 } else {
3974 error = dsl_dataset_snapshot_check_impl(drc->drc_ds,
788eb90c 3975 drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred);
13fe0198
MA
3976 }
3977 return (error);
34dc7c2f
BB
3978}
3979
3980static void
13fe0198 3981dmu_recv_end_sync(void *arg, dmu_tx_t *tx)
34dc7c2f 3982{
13fe0198
MA
3983 dmu_recv_cookie_t *drc = arg;
3984 dsl_pool_t *dp = dmu_tx_pool(tx);
b5256303 3985 boolean_t encrypted = drc->drc_ds->ds_dir->dd_crypto_obj != 0;
13fe0198
MA
3986
3987 spa_history_log_internal_ds(drc->drc_ds, "finish receiving",
3988 tx, "snap=%s", drc->drc_tosnap);
3989
3990 if (!drc->drc_newfs) {
3991 dsl_dataset_t *origin_head;
3992
3993 VERIFY0(dsl_dataset_hold(dp, drc->drc_tofs, FTAG,
3994 &origin_head));
19580676
MA
3995
3996 if (drc->drc_force) {
3997 /*
3998 * Destroy any snapshots of drc_tofs (origin_head)
3999 * after the origin (the snap before drc_ds).
4000 */
d683ddbb
JG
4001 uint64_t obj;
4002
4003 obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
4004 while (obj !=
4005 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
19580676
MA
4006 dsl_dataset_t *snap;
4007 VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG,
4008 &snap));
4009 ASSERT3P(snap->ds_dir, ==, origin_head->ds_dir);
d683ddbb 4010 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
19580676
MA
4011 dsl_destroy_snapshot_sync_impl(snap,
4012 B_FALSE, tx);
4013 dsl_dataset_rele(snap, FTAG);
4014 }
4015 }
4016 VERIFY3P(drc->drc_ds->ds_prev, ==,
4017 origin_head->ds_prev);
4018
13fe0198
MA
4019 dsl_dataset_clone_swap_sync_impl(drc->drc_ds,
4020 origin_head, tx);
4021 dsl_dataset_snapshot_sync_impl(origin_head,
4022 drc->drc_tosnap, tx);
4023
4024 /* set snapshot's creation time and guid */
4025 dmu_buf_will_dirty(origin_head->ds_prev->ds_dbuf, tx);
d683ddbb 4026 dsl_dataset_phys(origin_head->ds_prev)->ds_creation_time =
13fe0198 4027 drc->drc_drrb->drr_creation_time;
d683ddbb 4028 dsl_dataset_phys(origin_head->ds_prev)->ds_guid =
13fe0198 4029 drc->drc_drrb->drr_toguid;
d683ddbb 4030 dsl_dataset_phys(origin_head->ds_prev)->ds_flags &=
13fe0198
MA
4031 ~DS_FLAG_INCONSISTENT;
4032
4033 dmu_buf_will_dirty(origin_head->ds_dbuf, tx);
d683ddbb
JG
4034 dsl_dataset_phys(origin_head)->ds_flags &=
4035 ~DS_FLAG_INCONSISTENT;
13fe0198 4036
2e0e443a
GM
4037 drc->drc_newsnapobj =
4038 dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
4039
13fe0198
MA
4040 dsl_dataset_rele(origin_head, FTAG);
4041 dsl_destroy_head_sync_impl(drc->drc_ds, tx);
831baf06
KW
4042
4043 if (drc->drc_owner != NULL)
4044 VERIFY3P(origin_head->ds_owner, ==, drc->drc_owner);
13fe0198
MA
4045 } else {
4046 dsl_dataset_t *ds = drc->drc_ds;
34dc7c2f 4047
13fe0198 4048 dsl_dataset_snapshot_sync_impl(ds, drc->drc_tosnap, tx);
34dc7c2f 4049
13fe0198
MA
4050 /* set snapshot's creation time and guid */
4051 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
d683ddbb 4052 dsl_dataset_phys(ds->ds_prev)->ds_creation_time =
13fe0198 4053 drc->drc_drrb->drr_creation_time;
d683ddbb
JG
4054 dsl_dataset_phys(ds->ds_prev)->ds_guid =
4055 drc->drc_drrb->drr_toguid;
4056 dsl_dataset_phys(ds->ds_prev)->ds_flags &=
4057 ~DS_FLAG_INCONSISTENT;
34dc7c2f 4058
13fe0198 4059 dmu_buf_will_dirty(ds->ds_dbuf, tx);
d683ddbb 4060 dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT;
47dfff3b
MA
4061 if (dsl_dataset_has_resume_receive_state(ds)) {
4062 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
4063 DS_FIELD_RESUME_FROMGUID, tx);
4064 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
4065 DS_FIELD_RESUME_OBJECT, tx);
4066 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
4067 DS_FIELD_RESUME_OFFSET, tx);
4068 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
4069 DS_FIELD_RESUME_BYTES, tx);
4070 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
4071 DS_FIELD_RESUME_TOGUID, tx);
4072 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
4073 DS_FIELD_RESUME_TONAME, tx);
4074 }
2e0e443a
GM
4075 drc->drc_newsnapobj =
4076 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj;
13fe0198 4077 }
a0bd735a 4078 zvol_create_minors(dp->dp_spa, drc->drc_tofs, B_TRUE);
b5256303 4079
13fe0198
MA
4080 /*
4081 * Release the hold from dmu_recv_begin. This must be done before
b5256303
TC
4082 * we return to open context, so that when we free the dataset's dnode
4083 * we can evict its bonus buffer. Since the dataset may be destroyed
4084 * at this point (and therefore won't have a valid pointer to the spa)
4085 * we release the key mapping manually here while we do have a valid
4086 * pointer, if it exists.
13fe0198 4087 */
b5256303
TC
4088 if (!drc->drc_raw && encrypted) {
4089 (void) spa_keystore_remove_mapping(dmu_tx_pool(tx)->dp_spa,
4090 drc->drc_ds->ds_object, drc->drc_ds);
4091 }
4092 dsl_dataset_disown(drc->drc_ds, 0, dmu_recv_tag);
13fe0198 4093 drc->drc_ds = NULL;
34dc7c2f
BB
4094}
4095
8d35c149 4096static int
b5256303
TC
4097add_ds_to_guidmap(const char *name, avl_tree_t *guid_map, uint64_t snapobj,
4098 boolean_t raw)
8d35c149 4099{
13fe0198 4100 dsl_pool_t *dp;
8d35c149
AS
4101 dsl_dataset_t *snapds;
4102 guid_map_entry_t *gmep;
b5256303 4103 ds_hold_flags_t dsflags = (raw) ? 0 : DS_HOLD_FLAG_DECRYPT;
8d35c149
AS
4104 int err;
4105
4106 ASSERT(guid_map != NULL);
4107
13fe0198
MA
4108 err = dsl_pool_hold(name, FTAG, &dp);
4109 if (err != 0)
4110 return (err);
7ec09286 4111 gmep = kmem_alloc(sizeof (*gmep), KM_SLEEP);
b5256303 4112 err = dsl_dataset_hold_obj_flags(dp, snapobj, dsflags, gmep, &snapds);
8d35c149 4113 if (err == 0) {
d683ddbb 4114 gmep->guid = dsl_dataset_phys(snapds)->ds_guid;
b5256303 4115 gmep->raw = raw;
8d35c149
AS
4116 gmep->gme_ds = snapds;
4117 avl_add(guid_map, gmep);
13fe0198 4118 dsl_dataset_long_hold(snapds, gmep);
7ec09286
MA
4119 } else {
4120 kmem_free(gmep, sizeof (*gmep));
8d35c149
AS
4121 }
4122
13fe0198 4123 dsl_pool_rele(dp, FTAG);
8d35c149
AS
4124 return (err);
4125}
4126
13fe0198
MA
4127static int dmu_recv_end_modified_blocks = 3;
4128
428870ff
BB
4129static int
4130dmu_recv_existing_end(dmu_recv_cookie_t *drc)
34dc7c2f 4131{
13fe0198 4132#ifdef _KERNEL
13fe0198
MA
4133 /*
4134 * We will be destroying the ds; make sure its origin is unmounted if
4135 * necessary.
4136 */
eca7b760 4137 char name[ZFS_MAX_DATASET_NAME_LEN];
13fe0198
MA
4138 dsl_dataset_name(drc->drc_ds, name);
4139 zfs_destroy_unmount_origin(name);
13fe0198 4140#endif
34dc7c2f 4141
2e0e443a 4142 return (dsl_sync_task(drc->drc_tofs,
13fe0198 4143 dmu_recv_end_check, dmu_recv_end_sync, drc,
2e0e443a 4144 dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL));
34dc7c2f 4145}
428870ff
BB
4146
4147static int
4148dmu_recv_new_end(dmu_recv_cookie_t *drc)
2e0e443a
GM
4149{
4150 return (dsl_sync_task(drc->drc_tofs,
4151 dmu_recv_end_check, dmu_recv_end_sync, drc,
4152 dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL));
4153}
4154
4155int
4156dmu_recv_end(dmu_recv_cookie_t *drc, void *owner)
428870ff 4157{
13fe0198 4158 int error;
428870ff 4159
2e0e443a
GM
4160 drc->drc_owner = owner;
4161
4162 if (drc->drc_newfs)
4163 error = dmu_recv_new_end(drc);
4164 else
4165 error = dmu_recv_existing_end(drc);
428870ff 4166
13fe0198
MA
4167 if (error != 0) {
4168 dmu_recv_cleanup_ds(drc);
4169 } else if (drc->drc_guid_to_ds_map != NULL) {
b5256303
TC
4170 (void) add_ds_to_guidmap(drc->drc_tofs, drc->drc_guid_to_ds_map,
4171 drc->drc_newsnapobj, drc->drc_raw);
428870ff 4172 }
13fe0198 4173 return (error);
428870ff
BB
4174}
4175
ea97f8ce
MA
4176/*
4177 * Return TRUE if this objset is currently being received into.
4178 */
4179boolean_t
4180dmu_objset_is_receiving(objset_t *os)
4181{
4182 return (os->os_dsl_dataset != NULL &&
4183 os->os_dsl_dataset->ds_owner == dmu_recv_tag);
4184}
fd8febbd
TF
4185
4186#if defined(_KERNEL)
4187module_param(zfs_send_corrupt_data, int, 0644);
4188MODULE_PARM_DESC(zfs_send_corrupt_data, "Allow sending corrupt data");
4189#endif