]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/dmu_send.c
Encryption patch follow-up
[mirror_zfs.git] / module / zfs / dmu_send.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
25 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
26 * Copyright 2014 HybridCluster. All rights reserved.
27 * Copyright 2016 RackTop Systems.
28 * Copyright (c) 2016 Actifio, Inc. All rights reserved.
29 */
30
31 #include <sys/dmu.h>
32 #include <sys/dmu_impl.h>
33 #include <sys/dmu_tx.h>
34 #include <sys/dbuf.h>
35 #include <sys/dnode.h>
36 #include <sys/zfs_context.h>
37 #include <sys/dmu_objset.h>
38 #include <sys/dmu_traverse.h>
39 #include <sys/dsl_dataset.h>
40 #include <sys/dsl_dir.h>
41 #include <sys/dsl_prop.h>
42 #include <sys/dsl_pool.h>
43 #include <sys/dsl_synctask.h>
44 #include <sys/spa_impl.h>
45 #include <sys/zfs_ioctl.h>
46 #include <sys/zap.h>
47 #include <sys/zio_checksum.h>
48 #include <sys/zfs_znode.h>
49 #include <zfs_fletcher.h>
50 #include <sys/avl.h>
51 #include <sys/ddt.h>
52 #include <sys/zfs_onexit.h>
53 #include <sys/dmu_send.h>
54 #include <sys/dsl_destroy.h>
55 #include <sys/blkptr.h>
56 #include <sys/dsl_bookmark.h>
57 #include <sys/zfeature.h>
58 #include <sys/bqueue.h>
59 #include <sys/zvol.h>
60 #include <sys/policy.h>
61
62 /* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */
63 int zfs_send_corrupt_data = B_FALSE;
64 int zfs_send_queue_length = 16 * 1024 * 1024;
65 int zfs_recv_queue_length = 16 * 1024 * 1024;
66 /* Set this tunable to FALSE to disable setting of DRR_FLAG_FREERECORDS */
67 int zfs_send_set_freerecords_bit = B_TRUE;
68
69 static char *dmu_recv_tag = "dmu_recv_tag";
70 const char *recv_clone_name = "%recv";
71
72 #define BP_SPAN(datablkszsec, indblkshift, level) \
73 (((uint64_t)datablkszsec) << (SPA_MINBLOCKSHIFT + \
74 (level) * (indblkshift - SPA_BLKPTRSHIFT)))
75
76 static void byteswap_record(dmu_replay_record_t *drr);
77
78 struct send_thread_arg {
79 bqueue_t q;
80 dsl_dataset_t *ds; /* Dataset to traverse */
81 uint64_t fromtxg; /* Traverse from this txg */
82 int flags; /* flags to pass to traverse_dataset */
83 int error_code;
84 boolean_t cancel;
85 zbookmark_phys_t resume;
86 };
87
88 struct send_block_record {
89 boolean_t eos_marker; /* Marks the end of the stream */
90 blkptr_t bp;
91 zbookmark_phys_t zb;
92 uint8_t indblkshift;
93 uint16_t datablkszsec;
94 bqueue_node_t ln;
95 };
96
97 typedef struct dump_bytes_io {
98 dmu_sendarg_t *dbi_dsp;
99 void *dbi_buf;
100 int dbi_len;
101 } dump_bytes_io_t;
102
103 static void
104 dump_bytes_cb(void *arg)
105 {
106 dump_bytes_io_t *dbi = (dump_bytes_io_t *)arg;
107 dmu_sendarg_t *dsp = dbi->dbi_dsp;
108 dsl_dataset_t *ds = dmu_objset_ds(dsp->dsa_os);
109 ssize_t resid; /* have to get resid to get detailed errno */
110
111 /*
112 * The code does not rely on len being a multiple of 8. We keep
113 * this assertion because of the corresponding assertion in
114 * receive_read(). Keeping this assertion ensures that we do not
115 * inadvertently break backwards compatibility (causing the assertion
116 * in receive_read() to trigger on old software). Newer feature flags
117 * (such as raw send) may break this assertion since they were
118 * introduced after the requirement was made obsolete.
119 */
120
121 ASSERT(dbi->dbi_len % 8 == 0 ||
122 (dsp->dsa_featureflags & DMU_BACKUP_FEATURE_RAW) != 0);
123
124 dsp->dsa_err = vn_rdwr(UIO_WRITE, dsp->dsa_vp,
125 (caddr_t)dbi->dbi_buf, dbi->dbi_len,
126 0, UIO_SYSSPACE, FAPPEND, RLIM64_INFINITY, CRED(), &resid);
127
128 mutex_enter(&ds->ds_sendstream_lock);
129 *dsp->dsa_off += dbi->dbi_len;
130 mutex_exit(&ds->ds_sendstream_lock);
131 }
132
133 static int
134 dump_bytes(dmu_sendarg_t *dsp, void *buf, int len)
135 {
136 dump_bytes_io_t dbi;
137
138 dbi.dbi_dsp = dsp;
139 dbi.dbi_buf = buf;
140 dbi.dbi_len = len;
141
142 #if defined(HAVE_LARGE_STACKS)
143 dump_bytes_cb(&dbi);
144 #else
145 /*
146 * The vn_rdwr() call is performed in a taskq to ensure that there is
147 * always enough stack space to write safely to the target filesystem.
148 * The ZIO_TYPE_FREE threads are used because there can be a lot of
149 * them and they are used in vdev_file.c for a similar purpose.
150 */
151 spa_taskq_dispatch_sync(dmu_objset_spa(dsp->dsa_os), ZIO_TYPE_FREE,
152 ZIO_TASKQ_ISSUE, dump_bytes_cb, &dbi, TQ_SLEEP);
153 #endif /* HAVE_LARGE_STACKS */
154
155 return (dsp->dsa_err);
156 }
157
158 /*
159 * For all record types except BEGIN, fill in the checksum (overlaid in
160 * drr_u.drr_checksum.drr_checksum). The checksum verifies everything
161 * up to the start of the checksum itself.
162 */
163 static int
164 dump_record(dmu_sendarg_t *dsp, void *payload, int payload_len)
165 {
166 ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
167 ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
168 (void) fletcher_4_incremental_native(dsp->dsa_drr,
169 offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
170 &dsp->dsa_zc);
171 if (dsp->dsa_drr->drr_type == DRR_BEGIN) {
172 dsp->dsa_sent_begin = B_TRUE;
173 } else {
174 ASSERT(ZIO_CHECKSUM_IS_ZERO(&dsp->dsa_drr->drr_u.
175 drr_checksum.drr_checksum));
176 dsp->dsa_drr->drr_u.drr_checksum.drr_checksum = dsp->dsa_zc;
177 }
178 if (dsp->dsa_drr->drr_type == DRR_END) {
179 dsp->dsa_sent_end = B_TRUE;
180 }
181 (void) fletcher_4_incremental_native(&dsp->dsa_drr->
182 drr_u.drr_checksum.drr_checksum,
183 sizeof (zio_cksum_t), &dsp->dsa_zc);
184 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0)
185 return (SET_ERROR(EINTR));
186 if (payload_len != 0) {
187 (void) fletcher_4_incremental_native(payload, payload_len,
188 &dsp->dsa_zc);
189 if (dump_bytes(dsp, payload, payload_len) != 0)
190 return (SET_ERROR(EINTR));
191 }
192 return (0);
193 }
194
195 /*
196 * Fill in the drr_free struct, or perform aggregation if the previous record is
197 * also a free record, and the two are adjacent.
198 *
199 * Note that we send free records even for a full send, because we want to be
200 * able to receive a full send as a clone, which requires a list of all the free
201 * and freeobject records that were generated on the source.
202 */
203 static int
204 dump_free(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
205 uint64_t length)
206 {
207 struct drr_free *drrf = &(dsp->dsa_drr->drr_u.drr_free);
208
209 /*
210 * When we receive a free record, dbuf_free_range() assumes
211 * that the receiving system doesn't have any dbufs in the range
212 * being freed. This is always true because there is a one-record
213 * constraint: we only send one WRITE record for any given
214 * object,offset. We know that the one-record constraint is
215 * true because we always send data in increasing order by
216 * object,offset.
217 *
218 * If the increasing-order constraint ever changes, we should find
219 * another way to assert that the one-record constraint is still
220 * satisfied.
221 */
222 ASSERT(object > dsp->dsa_last_data_object ||
223 (object == dsp->dsa_last_data_object &&
224 offset > dsp->dsa_last_data_offset));
225
226 if (length != -1ULL && offset + length < offset)
227 length = -1ULL;
228
229 /*
230 * If there is a pending op, but it's not PENDING_FREE, push it out,
231 * since free block aggregation can only be done for blocks of the
232 * same type (i.e., DRR_FREE records can only be aggregated with
233 * other DRR_FREE records. DRR_FREEOBJECTS records can only be
234 * aggregated with other DRR_FREEOBJECTS records.
235 */
236 if (dsp->dsa_pending_op != PENDING_NONE &&
237 dsp->dsa_pending_op != PENDING_FREE) {
238 if (dump_record(dsp, NULL, 0) != 0)
239 return (SET_ERROR(EINTR));
240 dsp->dsa_pending_op = PENDING_NONE;
241 }
242
243 if (dsp->dsa_pending_op == PENDING_FREE) {
244 /*
245 * There should never be a PENDING_FREE if length is -1
246 * (because dump_dnode is the only place where this
247 * function is called with a -1, and only after flushing
248 * any pending record).
249 */
250 ASSERT(length != -1ULL);
251 /*
252 * Check to see whether this free block can be aggregated
253 * with pending one.
254 */
255 if (drrf->drr_object == object && drrf->drr_offset +
256 drrf->drr_length == offset) {
257 drrf->drr_length += length;
258 return (0);
259 } else {
260 /* not a continuation. Push out pending record */
261 if (dump_record(dsp, NULL, 0) != 0)
262 return (SET_ERROR(EINTR));
263 dsp->dsa_pending_op = PENDING_NONE;
264 }
265 }
266 /* create a FREE record and make it pending */
267 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
268 dsp->dsa_drr->drr_type = DRR_FREE;
269 drrf->drr_object = object;
270 drrf->drr_offset = offset;
271 drrf->drr_length = length;
272 drrf->drr_toguid = dsp->dsa_toguid;
273 if (length == -1ULL) {
274 if (dump_record(dsp, NULL, 0) != 0)
275 return (SET_ERROR(EINTR));
276 } else {
277 dsp->dsa_pending_op = PENDING_FREE;
278 }
279
280 return (0);
281 }
282
283 static int
284 dump_write(dmu_sendarg_t *dsp, dmu_object_type_t type, uint64_t object,
285 uint64_t offset, int lsize, int psize, const blkptr_t *bp, void *data)
286 {
287 uint64_t payload_size;
288 boolean_t raw = (dsp->dsa_featureflags & DMU_BACKUP_FEATURE_RAW);
289 struct drr_write *drrw = &(dsp->dsa_drr->drr_u.drr_write);
290
291 /*
292 * We send data in increasing object, offset order.
293 * See comment in dump_free() for details.
294 */
295 ASSERT(object > dsp->dsa_last_data_object ||
296 (object == dsp->dsa_last_data_object &&
297 offset > dsp->dsa_last_data_offset));
298 dsp->dsa_last_data_object = object;
299 dsp->dsa_last_data_offset = offset + lsize - 1;
300
301 /*
302 * If there is any kind of pending aggregation (currently either
303 * a grouping of free objects or free blocks), push it out to
304 * the stream, since aggregation can't be done across operations
305 * of different types.
306 */
307 if (dsp->dsa_pending_op != PENDING_NONE) {
308 if (dump_record(dsp, NULL, 0) != 0)
309 return (SET_ERROR(EINTR));
310 dsp->dsa_pending_op = PENDING_NONE;
311 }
312 /* write a WRITE record */
313 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
314 dsp->dsa_drr->drr_type = DRR_WRITE;
315 drrw->drr_object = object;
316 drrw->drr_type = type;
317 drrw->drr_offset = offset;
318 drrw->drr_toguid = dsp->dsa_toguid;
319 drrw->drr_logical_size = lsize;
320
321 /* only set the compression fields if the buf is compressed or raw */
322 if (raw || lsize != psize) {
323 ASSERT(!BP_IS_EMBEDDED(bp));
324 ASSERT3S(psize, >, 0);
325
326 if (raw) {
327 ASSERT(BP_IS_PROTECTED(bp));
328
329 /*
330 * This is a raw protected block so we need to pass
331 * along everything the receiving side will need to
332 * interpret this block, including the byteswap, salt,
333 * IV, and MAC.
334 */
335 if (BP_SHOULD_BYTESWAP(bp))
336 drrw->drr_flags |= DRR_RAW_BYTESWAP;
337 zio_crypt_decode_params_bp(bp, drrw->drr_salt,
338 drrw->drr_iv);
339 zio_crypt_decode_mac_bp(bp, drrw->drr_mac);
340 } else {
341 /* this is a compressed block */
342 ASSERT(dsp->dsa_featureflags &
343 DMU_BACKUP_FEATURE_COMPRESSED);
344 ASSERT(!BP_SHOULD_BYTESWAP(bp));
345 ASSERT(!DMU_OT_IS_METADATA(BP_GET_TYPE(bp)));
346 ASSERT3U(BP_GET_COMPRESS(bp), !=, ZIO_COMPRESS_OFF);
347 ASSERT3S(lsize, >=, psize);
348 }
349
350 /* set fields common to compressed and raw sends */
351 drrw->drr_compressiontype = BP_GET_COMPRESS(bp);
352 drrw->drr_compressed_size = psize;
353 payload_size = drrw->drr_compressed_size;
354 } else {
355 payload_size = drrw->drr_logical_size;
356 }
357
358 if (bp == NULL || BP_IS_EMBEDDED(bp) || (BP_IS_PROTECTED(bp) && !raw)) {
359 /*
360 * There's no pre-computed checksum for partial-block writes,
361 * embedded BP's, or encrypted BP's that are being sent as
362 * plaintext, so (like fletcher4-checkummed blocks) userland
363 * will have to compute a dedup-capable checksum itself.
364 */
365 drrw->drr_checksumtype = ZIO_CHECKSUM_OFF;
366 } else {
367 drrw->drr_checksumtype = BP_GET_CHECKSUM(bp);
368 if (zio_checksum_table[drrw->drr_checksumtype].ci_flags &
369 ZCHECKSUM_FLAG_DEDUP)
370 drrw->drr_flags |= DRR_CHECKSUM_DEDUP;
371 DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp));
372 DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp));
373 DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp));
374 DDK_SET_CRYPT(&drrw->drr_key, BP_IS_PROTECTED(bp));
375 drrw->drr_key.ddk_cksum = bp->blk_cksum;
376 }
377
378 if (dump_record(dsp, data, payload_size) != 0)
379 return (SET_ERROR(EINTR));
380 return (0);
381 }
382
383 static int
384 dump_write_embedded(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
385 int blksz, const blkptr_t *bp)
386 {
387 char buf[BPE_PAYLOAD_SIZE];
388 struct drr_write_embedded *drrw =
389 &(dsp->dsa_drr->drr_u.drr_write_embedded);
390
391 if (dsp->dsa_pending_op != PENDING_NONE) {
392 if (dump_record(dsp, NULL, 0) != 0)
393 return (SET_ERROR(EINTR));
394 dsp->dsa_pending_op = PENDING_NONE;
395 }
396
397 ASSERT(BP_IS_EMBEDDED(bp));
398
399 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
400 dsp->dsa_drr->drr_type = DRR_WRITE_EMBEDDED;
401 drrw->drr_object = object;
402 drrw->drr_offset = offset;
403 drrw->drr_length = blksz;
404 drrw->drr_toguid = dsp->dsa_toguid;
405 drrw->drr_compression = BP_GET_COMPRESS(bp);
406 drrw->drr_etype = BPE_GET_ETYPE(bp);
407 drrw->drr_lsize = BPE_GET_LSIZE(bp);
408 drrw->drr_psize = BPE_GET_PSIZE(bp);
409
410 decode_embedded_bp_compressed(bp, buf);
411
412 if (dump_record(dsp, buf, P2ROUNDUP(drrw->drr_psize, 8)) != 0)
413 return (SET_ERROR(EINTR));
414 return (0);
415 }
416
417 static int
418 dump_spill(dmu_sendarg_t *dsp, const blkptr_t *bp, uint64_t object, void *data)
419 {
420 struct drr_spill *drrs = &(dsp->dsa_drr->drr_u.drr_spill);
421 uint64_t blksz = BP_GET_LSIZE(bp);
422
423 if (dsp->dsa_pending_op != PENDING_NONE) {
424 if (dump_record(dsp, NULL, 0) != 0)
425 return (SET_ERROR(EINTR));
426 dsp->dsa_pending_op = PENDING_NONE;
427 }
428
429 /* write a SPILL record */
430 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
431 dsp->dsa_drr->drr_type = DRR_SPILL;
432 drrs->drr_object = object;
433 drrs->drr_length = blksz;
434 drrs->drr_toguid = dsp->dsa_toguid;
435
436 /* handle raw send fields */
437 if (dsp->dsa_featureflags & DMU_BACKUP_FEATURE_RAW) {
438 ASSERT(BP_IS_PROTECTED(bp));
439
440 if (BP_SHOULD_BYTESWAP(bp))
441 drrs->drr_flags |= DRR_RAW_BYTESWAP;
442 drrs->drr_compressiontype = BP_GET_COMPRESS(bp);
443 drrs->drr_compressed_size = BP_GET_PSIZE(bp);
444 zio_crypt_decode_params_bp(bp, drrs->drr_salt, drrs->drr_iv);
445 zio_crypt_decode_mac_bp(bp, drrs->drr_mac);
446 }
447
448 if (dump_record(dsp, data, blksz) != 0)
449 return (SET_ERROR(EINTR));
450 return (0);
451 }
452
453 static int
454 dump_freeobjects(dmu_sendarg_t *dsp, uint64_t firstobj, uint64_t numobjs)
455 {
456 struct drr_freeobjects *drrfo = &(dsp->dsa_drr->drr_u.drr_freeobjects);
457 uint64_t maxobj = DNODES_PER_BLOCK *
458 (DMU_META_DNODE(dsp->dsa_os)->dn_maxblkid + 1);
459
460 /*
461 * ZoL < 0.7 does not handle large FREEOBJECTS records correctly,
462 * leading to zfs recv never completing. to avoid this issue, don't
463 * send FREEOBJECTS records for object IDs which cannot exist on the
464 * receiving side.
465 */
466 if (maxobj > 0) {
467 if (maxobj < firstobj)
468 return (0);
469
470 if (maxobj < firstobj + numobjs)
471 numobjs = maxobj - firstobj;
472 }
473
474 /*
475 * If there is a pending op, but it's not PENDING_FREEOBJECTS,
476 * push it out, since free block aggregation can only be done for
477 * blocks of the same type (i.e., DRR_FREE records can only be
478 * aggregated with other DRR_FREE records. DRR_FREEOBJECTS records
479 * can only be aggregated with other DRR_FREEOBJECTS records.
480 */
481 if (dsp->dsa_pending_op != PENDING_NONE &&
482 dsp->dsa_pending_op != PENDING_FREEOBJECTS) {
483 if (dump_record(dsp, NULL, 0) != 0)
484 return (SET_ERROR(EINTR));
485 dsp->dsa_pending_op = PENDING_NONE;
486 }
487 if (dsp->dsa_pending_op == PENDING_FREEOBJECTS) {
488 /*
489 * See whether this free object array can be aggregated
490 * with pending one
491 */
492 if (drrfo->drr_firstobj + drrfo->drr_numobjs == firstobj) {
493 drrfo->drr_numobjs += numobjs;
494 return (0);
495 } else {
496 /* can't be aggregated. Push out pending record */
497 if (dump_record(dsp, NULL, 0) != 0)
498 return (SET_ERROR(EINTR));
499 dsp->dsa_pending_op = PENDING_NONE;
500 }
501 }
502
503 /* write a FREEOBJECTS record */
504 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
505 dsp->dsa_drr->drr_type = DRR_FREEOBJECTS;
506 drrfo->drr_firstobj = firstobj;
507 drrfo->drr_numobjs = numobjs;
508 drrfo->drr_toguid = dsp->dsa_toguid;
509
510 dsp->dsa_pending_op = PENDING_FREEOBJECTS;
511
512 return (0);
513 }
514
515 static int
516 dump_dnode(dmu_sendarg_t *dsp, const blkptr_t *bp, uint64_t object,
517 dnode_phys_t *dnp)
518 {
519 struct drr_object *drro = &(dsp->dsa_drr->drr_u.drr_object);
520 int bonuslen;
521
522 if (object < dsp->dsa_resume_object) {
523 /*
524 * Note: when resuming, we will visit all the dnodes in
525 * the block of dnodes that we are resuming from. In
526 * this case it's unnecessary to send the dnodes prior to
527 * the one we are resuming from. We should be at most one
528 * block's worth of dnodes behind the resume point.
529 */
530 ASSERT3U(dsp->dsa_resume_object - object, <,
531 1 << (DNODE_BLOCK_SHIFT - DNODE_SHIFT));
532 return (0);
533 }
534
535 if (dnp == NULL || dnp->dn_type == DMU_OT_NONE)
536 return (dump_freeobjects(dsp, object, 1));
537
538 if (dsp->dsa_pending_op != PENDING_NONE) {
539 if (dump_record(dsp, NULL, 0) != 0)
540 return (SET_ERROR(EINTR));
541 dsp->dsa_pending_op = PENDING_NONE;
542 }
543
544 /* write an OBJECT record */
545 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
546 dsp->dsa_drr->drr_type = DRR_OBJECT;
547 drro->drr_object = object;
548 drro->drr_type = dnp->dn_type;
549 drro->drr_bonustype = dnp->dn_bonustype;
550 drro->drr_blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
551 drro->drr_bonuslen = dnp->dn_bonuslen;
552 drro->drr_dn_slots = dnp->dn_extra_slots + 1;
553 drro->drr_checksumtype = dnp->dn_checksum;
554 drro->drr_compress = dnp->dn_compress;
555 drro->drr_toguid = dsp->dsa_toguid;
556
557 if (!(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
558 drro->drr_blksz > SPA_OLD_MAXBLOCKSIZE)
559 drro->drr_blksz = SPA_OLD_MAXBLOCKSIZE;
560
561 bonuslen = P2ROUNDUP(dnp->dn_bonuslen, 8);
562
563 if ((dsp->dsa_featureflags & DMU_BACKUP_FEATURE_RAW)) {
564 ASSERT(BP_IS_ENCRYPTED(bp));
565
566 if (BP_SHOULD_BYTESWAP(bp))
567 drro->drr_flags |= DRR_RAW_BYTESWAP;
568
569 /* needed for reconstructing dnp on recv side */
570 drro->drr_indblkshift = dnp->dn_indblkshift;
571 drro->drr_nlevels = dnp->dn_nlevels;
572 drro->drr_nblkptr = dnp->dn_nblkptr;
573
574 /*
575 * Since we encrypt the entire bonus area, the (raw) part
576 * beyond the bonuslen is actually nonzero, so we need
577 * to send it.
578 */
579 if (bonuslen != 0) {
580 drro->drr_raw_bonuslen = DN_MAX_BONUS_LEN(dnp);
581 bonuslen = drro->drr_raw_bonuslen;
582 }
583 }
584
585 if (dump_record(dsp, DN_BONUS(dnp), bonuslen) != 0)
586 return (SET_ERROR(EINTR));
587
588 /* Free anything past the end of the file. */
589 if (dump_free(dsp, object, (dnp->dn_maxblkid + 1) *
590 (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), -1ULL) != 0)
591 return (SET_ERROR(EINTR));
592 if (dsp->dsa_err != 0)
593 return (SET_ERROR(EINTR));
594 return (0);
595 }
596
597 static int
598 dump_object_range(dmu_sendarg_t *dsp, const blkptr_t *bp, uint64_t firstobj,
599 uint64_t numslots)
600 {
601 struct drr_object_range *drror =
602 &(dsp->dsa_drr->drr_u.drr_object_range);
603
604 /* we only use this record type for raw sends */
605 ASSERT(BP_IS_PROTECTED(bp));
606 ASSERT(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_RAW);
607 ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
608 ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_DNODE);
609 ASSERT0(BP_GET_LEVEL(bp));
610
611 if (dsp->dsa_pending_op != PENDING_NONE) {
612 if (dump_record(dsp, NULL, 0) != 0)
613 return (SET_ERROR(EINTR));
614 dsp->dsa_pending_op = PENDING_NONE;
615 }
616
617 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
618 dsp->dsa_drr->drr_type = DRR_OBJECT_RANGE;
619 drror->drr_firstobj = firstobj;
620 drror->drr_numslots = numslots;
621 drror->drr_toguid = dsp->dsa_toguid;
622 if (BP_SHOULD_BYTESWAP(bp))
623 drror->drr_flags |= DRR_RAW_BYTESWAP;
624 zio_crypt_decode_params_bp(bp, drror->drr_salt, drror->drr_iv);
625 zio_crypt_decode_mac_bp(bp, drror->drr_mac);
626
627 if (dump_record(dsp, NULL, 0) != 0)
628 return (SET_ERROR(EINTR));
629 return (0);
630 }
631
632 static boolean_t
633 backup_do_embed(dmu_sendarg_t *dsp, const blkptr_t *bp)
634 {
635 if (!BP_IS_EMBEDDED(bp))
636 return (B_FALSE);
637
638 /*
639 * Compression function must be legacy, or explicitly enabled.
640 */
641 if ((BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_LEGACY_FUNCTIONS &&
642 !(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_LZ4)))
643 return (B_FALSE);
644
645 /*
646 * Embed type must be explicitly enabled.
647 */
648 switch (BPE_GET_ETYPE(bp)) {
649 case BP_EMBEDDED_TYPE_DATA:
650 if (dsp->dsa_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)
651 return (B_TRUE);
652 break;
653 default:
654 return (B_FALSE);
655 }
656 return (B_FALSE);
657 }
658
659 /*
660 * This is the callback function to traverse_dataset that acts as the worker
661 * thread for dmu_send_impl.
662 */
663 /*ARGSUSED*/
664 static int
665 send_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
666 const zbookmark_phys_t *zb, const struct dnode_phys *dnp, void *arg)
667 {
668 struct send_thread_arg *sta = arg;
669 struct send_block_record *record;
670 uint64_t record_size;
671 int err = 0;
672
673 ASSERT(zb->zb_object == DMU_META_DNODE_OBJECT ||
674 zb->zb_object >= sta->resume.zb_object);
675 ASSERT3P(sta->ds, !=, NULL);
676
677 if (sta->cancel)
678 return (SET_ERROR(EINTR));
679
680 if (bp == NULL) {
681 ASSERT3U(zb->zb_level, ==, ZB_DNODE_LEVEL);
682 return (0);
683 } else if (zb->zb_level < 0) {
684 return (0);
685 }
686
687 record = kmem_zalloc(sizeof (struct send_block_record), KM_SLEEP);
688 record->eos_marker = B_FALSE;
689 record->bp = *bp;
690 record->zb = *zb;
691 record->indblkshift = dnp->dn_indblkshift;
692 record->datablkszsec = dnp->dn_datablkszsec;
693 record_size = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
694 bqueue_enqueue(&sta->q, record, record_size);
695
696 return (err);
697 }
698
699 /*
700 * This function kicks off the traverse_dataset. It also handles setting the
701 * error code of the thread in case something goes wrong, and pushes the End of
702 * Stream record when the traverse_dataset call has finished. If there is no
703 * dataset to traverse, the thread immediately pushes End of Stream marker.
704 */
705 static void
706 send_traverse_thread(void *arg)
707 {
708 struct send_thread_arg *st_arg = arg;
709 int err;
710 struct send_block_record *data;
711 fstrans_cookie_t cookie = spl_fstrans_mark();
712
713 if (st_arg->ds != NULL) {
714 err = traverse_dataset_resume(st_arg->ds,
715 st_arg->fromtxg, &st_arg->resume,
716 st_arg->flags, send_cb, st_arg);
717
718 if (err != EINTR)
719 st_arg->error_code = err;
720 }
721 data = kmem_zalloc(sizeof (*data), KM_SLEEP);
722 data->eos_marker = B_TRUE;
723 bqueue_enqueue(&st_arg->q, data, 1);
724 spl_fstrans_unmark(cookie);
725 thread_exit();
726 }
727
728 /*
729 * This function actually handles figuring out what kind of record needs to be
730 * dumped, reading the data (which has hopefully been prefetched), and calling
731 * the appropriate helper function.
732 */
733 static int
734 do_dump(dmu_sendarg_t *dsa, struct send_block_record *data)
735 {
736 dsl_dataset_t *ds = dmu_objset_ds(dsa->dsa_os);
737 const blkptr_t *bp = &data->bp;
738 const zbookmark_phys_t *zb = &data->zb;
739 uint8_t indblkshift = data->indblkshift;
740 uint16_t dblkszsec = data->datablkszsec;
741 spa_t *spa = ds->ds_dir->dd_pool->dp_spa;
742 dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE;
743 int err = 0;
744 uint64_t dnobj;
745
746 ASSERT3U(zb->zb_level, >=, 0);
747
748 ASSERT(zb->zb_object == DMU_META_DNODE_OBJECT ||
749 zb->zb_object >= dsa->dsa_resume_object);
750
751 /*
752 * All bps of an encrypted os should have the encryption bit set.
753 * If this is not true it indicates tampering and we report an error.
754 */
755 if (dsa->dsa_os->os_encrypted &&
756 !BP_IS_HOLE(bp) && !BP_USES_CRYPT(bp)) {
757 spa_log_error(spa, zb);
758 zfs_panic_recover("unencrypted block in encrypted "
759 "object set %llu", ds->ds_object);
760 return (SET_ERROR(EIO));
761 }
762
763 if (zb->zb_object != DMU_META_DNODE_OBJECT &&
764 DMU_OBJECT_IS_SPECIAL(zb->zb_object)) {
765 return (0);
766 } else if (BP_IS_HOLE(bp) &&
767 zb->zb_object == DMU_META_DNODE_OBJECT) {
768 uint64_t span = BP_SPAN(dblkszsec, indblkshift, zb->zb_level);
769 uint64_t dnobj = (zb->zb_blkid * span) >> DNODE_SHIFT;
770 err = dump_freeobjects(dsa, dnobj, span >> DNODE_SHIFT);
771 } else if (BP_IS_HOLE(bp)) {
772 uint64_t span = BP_SPAN(dblkszsec, indblkshift, zb->zb_level);
773 uint64_t offset = zb->zb_blkid * span;
774 err = dump_free(dsa, zb->zb_object, offset, span);
775 } else if (zb->zb_level > 0 || type == DMU_OT_OBJSET) {
776 return (0);
777 } else if (type == DMU_OT_DNODE) {
778 dnode_phys_t *blk;
779 int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
780 arc_flags_t aflags = ARC_FLAG_WAIT;
781 arc_buf_t *abuf;
782 enum zio_flag zioflags = ZIO_FLAG_CANFAIL;
783 int i;
784
785 if (dsa->dsa_featureflags & DMU_BACKUP_FEATURE_RAW) {
786 ASSERT(BP_IS_ENCRYPTED(bp));
787 ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
788 zioflags |= ZIO_FLAG_RAW;
789 }
790
791 ASSERT0(zb->zb_level);
792
793 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
794 ZIO_PRIORITY_ASYNC_READ, zioflags, &aflags, zb) != 0)
795 return (SET_ERROR(EIO));
796
797 blk = abuf->b_data;
798 dnobj = zb->zb_blkid * epb;
799
800 /*
801 * Raw sends require sending encryption parameters for the
802 * block of dnodes. Regular sends do not need to send this
803 * info.
804 */
805 if (dsa->dsa_featureflags & DMU_BACKUP_FEATURE_RAW) {
806 ASSERT(arc_is_encrypted(abuf));
807 err = dump_object_range(dsa, bp, dnobj, epb);
808 }
809
810 if (err == 0) {
811 for (i = 0; i < epb; i += blk[i].dn_extra_slots + 1) {
812 err = dump_dnode(dsa, bp, dnobj + i, blk + i);
813 if (err != 0)
814 break;
815 }
816 }
817 arc_buf_destroy(abuf, &abuf);
818 } else if (type == DMU_OT_SA) {
819 arc_flags_t aflags = ARC_FLAG_WAIT;
820 arc_buf_t *abuf;
821 enum zio_flag zioflags = ZIO_FLAG_CANFAIL;
822
823 if (dsa->dsa_featureflags & DMU_BACKUP_FEATURE_RAW) {
824 ASSERT(BP_IS_PROTECTED(bp));
825 zioflags |= ZIO_FLAG_RAW;
826 }
827
828 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
829 ZIO_PRIORITY_ASYNC_READ, zioflags, &aflags, zb) != 0)
830 return (SET_ERROR(EIO));
831
832 err = dump_spill(dsa, bp, zb->zb_object, abuf->b_data);
833 arc_buf_destroy(abuf, &abuf);
834 } else if (backup_do_embed(dsa, bp)) {
835 /* it's an embedded level-0 block of a regular object */
836 int blksz = dblkszsec << SPA_MINBLOCKSHIFT;
837 ASSERT0(zb->zb_level);
838 err = dump_write_embedded(dsa, zb->zb_object,
839 zb->zb_blkid * blksz, blksz, bp);
840 } else {
841 /* it's a level-0 block of a regular object */
842 arc_flags_t aflags = ARC_FLAG_WAIT;
843 arc_buf_t *abuf;
844 int blksz = dblkszsec << SPA_MINBLOCKSHIFT;
845 uint64_t offset;
846
847 /*
848 * If we have large blocks stored on disk but the send flags
849 * don't allow us to send large blocks, we split the data from
850 * the arc buf into chunks.
851 */
852 boolean_t split_large_blocks = blksz > SPA_OLD_MAXBLOCKSIZE &&
853 !(dsa->dsa_featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS);
854
855 /*
856 * Raw sends require that we always get raw data as it exists
857 * on disk, so we assert that we are not splitting blocks here.
858 */
859 boolean_t request_raw =
860 (dsa->dsa_featureflags & DMU_BACKUP_FEATURE_RAW) != 0;
861
862 /*
863 * We should only request compressed data from the ARC if all
864 * the following are true:
865 * - stream compression was requested
866 * - we aren't splitting large blocks into smaller chunks
867 * - the data won't need to be byteswapped before sending
868 * - this isn't an embedded block
869 * - this isn't metadata (if receiving on a different endian
870 * system it can be byteswapped more easily)
871 */
872 boolean_t request_compressed =
873 (dsa->dsa_featureflags & DMU_BACKUP_FEATURE_COMPRESSED) &&
874 !split_large_blocks && !BP_SHOULD_BYTESWAP(bp) &&
875 !BP_IS_EMBEDDED(bp) && !DMU_OT_IS_METADATA(BP_GET_TYPE(bp));
876
877 IMPLY(request_raw, !split_large_blocks);
878 IMPLY(request_raw, BP_IS_PROTECTED(bp));
879 ASSERT0(zb->zb_level);
880 ASSERT(zb->zb_object > dsa->dsa_resume_object ||
881 (zb->zb_object == dsa->dsa_resume_object &&
882 zb->zb_blkid * blksz >= dsa->dsa_resume_offset));
883
884 ASSERT3U(blksz, ==, BP_GET_LSIZE(bp));
885
886 enum zio_flag zioflags = ZIO_FLAG_CANFAIL;
887 if (request_raw)
888 zioflags |= ZIO_FLAG_RAW;
889 else if (request_compressed)
890 zioflags |= ZIO_FLAG_RAW_COMPRESS;
891
892 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
893 ZIO_PRIORITY_ASYNC_READ, zioflags, &aflags, zb) != 0) {
894 if (zfs_send_corrupt_data) {
895 /* Send a block filled with 0x"zfs badd bloc" */
896 abuf = arc_alloc_buf(spa, &abuf, ARC_BUFC_DATA,
897 blksz);
898 uint64_t *ptr;
899 for (ptr = abuf->b_data;
900 (char *)ptr < (char *)abuf->b_data + blksz;
901 ptr++)
902 *ptr = 0x2f5baddb10cULL;
903 } else {
904 return (SET_ERROR(EIO));
905 }
906 }
907
908 offset = zb->zb_blkid * blksz;
909
910 if (split_large_blocks) {
911 ASSERT0(arc_is_encrypted(abuf));
912 ASSERT3U(arc_get_compression(abuf), ==,
913 ZIO_COMPRESS_OFF);
914 char *buf = abuf->b_data;
915 while (blksz > 0 && err == 0) {
916 int n = MIN(blksz, SPA_OLD_MAXBLOCKSIZE);
917 err = dump_write(dsa, type, zb->zb_object,
918 offset, n, n, NULL, buf);
919 offset += n;
920 buf += n;
921 blksz -= n;
922 }
923 } else {
924 err = dump_write(dsa, type, zb->zb_object, offset,
925 blksz, arc_buf_size(abuf), bp, abuf->b_data);
926 }
927 arc_buf_destroy(abuf, &abuf);
928 }
929
930 ASSERT(err == 0 || err == EINTR);
931 return (err);
932 }
933
934 /*
935 * Pop the new data off the queue, and free the old data.
936 */
937 static struct send_block_record *
938 get_next_record(bqueue_t *bq, struct send_block_record *data)
939 {
940 struct send_block_record *tmp = bqueue_dequeue(bq);
941 kmem_free(data, sizeof (*data));
942 return (tmp);
943 }
944
945 /*
946 * Actually do the bulk of the work in a zfs send.
947 *
948 * Note: Releases dp using the specified tag.
949 */
950 static int
951 dmu_send_impl(void *tag, dsl_pool_t *dp, dsl_dataset_t *to_ds,
952 zfs_bookmark_phys_t *ancestor_zb, boolean_t is_clone,
953 boolean_t embedok, boolean_t large_block_ok, boolean_t compressok,
954 boolean_t rawok, int outfd, uint64_t resumeobj, uint64_t resumeoff,
955 vnode_t *vp, offset_t *off)
956 {
957 objset_t *os;
958 dmu_replay_record_t *drr;
959 dmu_sendarg_t *dsp;
960 int err;
961 uint64_t fromtxg = 0;
962 uint64_t featureflags = 0;
963 struct send_thread_arg to_arg;
964 void *payload = NULL;
965 size_t payload_len = 0;
966 struct send_block_record *to_data;
967
968 err = dmu_objset_from_ds(to_ds, &os);
969 if (err != 0) {
970 dsl_pool_rele(dp, tag);
971 return (err);
972 }
973
974 /*
975 * If this is a non-raw send of an encrypted ds, we can ensure that
976 * the objset_phys_t is authenticated. This is safe because this is
977 * either a snapshot or we have owned the dataset, ensuring that
978 * it can't be modified.
979 */
980 if (!rawok && os->os_encrypted &&
981 arc_is_unauthenticated(os->os_phys_buf)) {
982 err = arc_untransform(os->os_phys_buf, os->os_spa,
983 to_ds->ds_object, B_FALSE);
984 if (err != 0) {
985 dsl_pool_rele(dp, tag);
986 return (err);
987 }
988
989 ASSERT0(arc_is_unauthenticated(os->os_phys_buf));
990 }
991
992 drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
993 drr->drr_type = DRR_BEGIN;
994 drr->drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC;
995 DMU_SET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo,
996 DMU_SUBSTREAM);
997
998 bzero(&to_arg, sizeof (to_arg));
999
1000 #ifdef _KERNEL
1001 if (dmu_objset_type(os) == DMU_OST_ZFS) {
1002 uint64_t version;
1003 if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &version) != 0) {
1004 kmem_free(drr, sizeof (dmu_replay_record_t));
1005 dsl_pool_rele(dp, tag);
1006 return (SET_ERROR(EINVAL));
1007 }
1008 if (version >= ZPL_VERSION_SA) {
1009 featureflags |= DMU_BACKUP_FEATURE_SA_SPILL;
1010 }
1011 }
1012 #endif
1013
1014 /* raw sends imply large_block_ok */
1015 if ((large_block_ok || rawok) &&
1016 to_ds->ds_feature_inuse[SPA_FEATURE_LARGE_BLOCKS])
1017 featureflags |= DMU_BACKUP_FEATURE_LARGE_BLOCKS;
1018 if (to_ds->ds_feature_inuse[SPA_FEATURE_LARGE_DNODE])
1019 featureflags |= DMU_BACKUP_FEATURE_LARGE_DNODE;
1020
1021 /* encrypted datasets will not have embedded blocks */
1022 if ((embedok || rawok) && !os->os_encrypted &&
1023 spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) {
1024 featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA;
1025 }
1026
1027 /* raw send implies compressok */
1028 if (compressok || rawok)
1029 featureflags |= DMU_BACKUP_FEATURE_COMPRESSED;
1030 if (rawok && os->os_encrypted)
1031 featureflags |= DMU_BACKUP_FEATURE_RAW;
1032
1033 if ((featureflags &
1034 (DMU_BACKUP_FEATURE_EMBED_DATA | DMU_BACKUP_FEATURE_COMPRESSED |
1035 DMU_BACKUP_FEATURE_RAW)) != 0 &&
1036 spa_feature_is_active(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS)) {
1037 featureflags |= DMU_BACKUP_FEATURE_LZ4;
1038 }
1039
1040 if (resumeobj != 0 || resumeoff != 0) {
1041 featureflags |= DMU_BACKUP_FEATURE_RESUMING;
1042 }
1043
1044 DMU_SET_FEATUREFLAGS(drr->drr_u.drr_begin.drr_versioninfo,
1045 featureflags);
1046
1047 drr->drr_u.drr_begin.drr_creation_time =
1048 dsl_dataset_phys(to_ds)->ds_creation_time;
1049 drr->drr_u.drr_begin.drr_type = dmu_objset_type(os);
1050 if (is_clone)
1051 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CLONE;
1052 drr->drr_u.drr_begin.drr_toguid = dsl_dataset_phys(to_ds)->ds_guid;
1053 if (dsl_dataset_phys(to_ds)->ds_flags & DS_FLAG_CI_DATASET)
1054 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CI_DATA;
1055 if (zfs_send_set_freerecords_bit)
1056 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_FREERECORDS;
1057
1058 if (ancestor_zb != NULL) {
1059 drr->drr_u.drr_begin.drr_fromguid =
1060 ancestor_zb->zbm_guid;
1061 fromtxg = ancestor_zb->zbm_creation_txg;
1062 }
1063 dsl_dataset_name(to_ds, drr->drr_u.drr_begin.drr_toname);
1064 if (!to_ds->ds_is_snapshot) {
1065 (void) strlcat(drr->drr_u.drr_begin.drr_toname, "@--head--",
1066 sizeof (drr->drr_u.drr_begin.drr_toname));
1067 }
1068
1069 dsp = kmem_zalloc(sizeof (dmu_sendarg_t), KM_SLEEP);
1070
1071 dsp->dsa_drr = drr;
1072 dsp->dsa_vp = vp;
1073 dsp->dsa_outfd = outfd;
1074 dsp->dsa_proc = curproc;
1075 dsp->dsa_os = os;
1076 dsp->dsa_off = off;
1077 dsp->dsa_toguid = dsl_dataset_phys(to_ds)->ds_guid;
1078 dsp->dsa_pending_op = PENDING_NONE;
1079 dsp->dsa_featureflags = featureflags;
1080 dsp->dsa_resume_object = resumeobj;
1081 dsp->dsa_resume_offset = resumeoff;
1082
1083 mutex_enter(&to_ds->ds_sendstream_lock);
1084 list_insert_head(&to_ds->ds_sendstreams, dsp);
1085 mutex_exit(&to_ds->ds_sendstream_lock);
1086
1087 dsl_dataset_long_hold(to_ds, FTAG);
1088 dsl_pool_rele(dp, tag);
1089
1090 /* handle features that require a DRR_BEGIN payload */
1091 if (featureflags &
1092 (DMU_BACKUP_FEATURE_RESUMING | DMU_BACKUP_FEATURE_RAW)) {
1093 nvlist_t *keynvl = NULL;
1094 nvlist_t *nvl = fnvlist_alloc();
1095
1096 if (featureflags & DMU_BACKUP_FEATURE_RESUMING) {
1097 dmu_object_info_t to_doi;
1098 err = dmu_object_info(os, resumeobj, &to_doi);
1099 if (err != 0) {
1100 fnvlist_free(nvl);
1101 goto out;
1102 }
1103
1104 SET_BOOKMARK(&to_arg.resume, to_ds->ds_object,
1105 resumeobj, 0,
1106 resumeoff / to_doi.doi_data_block_size);
1107
1108 fnvlist_add_uint64(nvl, "resume_object", resumeobj);
1109 fnvlist_add_uint64(nvl, "resume_offset", resumeoff);
1110 }
1111
1112 if (featureflags & DMU_BACKUP_FEATURE_RAW) {
1113 ASSERT(os->os_encrypted);
1114
1115 err = dsl_crypto_populate_key_nvlist(to_ds, &keynvl);
1116 if (err != 0) {
1117 fnvlist_free(nvl);
1118 goto out;
1119 }
1120
1121 fnvlist_add_nvlist(nvl, "crypt_keydata", keynvl);
1122 }
1123
1124 payload = fnvlist_pack(nvl, &payload_len);
1125 drr->drr_payloadlen = payload_len;
1126 fnvlist_free(keynvl);
1127 fnvlist_free(nvl);
1128 }
1129
1130 err = dump_record(dsp, payload, payload_len);
1131 fnvlist_pack_free(payload, payload_len);
1132 if (err != 0) {
1133 err = dsp->dsa_err;
1134 goto out;
1135 }
1136
1137 err = bqueue_init(&to_arg.q, zfs_send_queue_length,
1138 offsetof(struct send_block_record, ln));
1139 to_arg.error_code = 0;
1140 to_arg.cancel = B_FALSE;
1141 to_arg.ds = to_ds;
1142 to_arg.fromtxg = fromtxg;
1143 to_arg.flags = TRAVERSE_PRE | TRAVERSE_PREFETCH;
1144 if (rawok)
1145 to_arg.flags |= TRAVERSE_NO_DECRYPT;
1146 (void) thread_create(NULL, 0, send_traverse_thread, &to_arg, 0, curproc,
1147 TS_RUN, minclsyspri);
1148
1149 to_data = bqueue_dequeue(&to_arg.q);
1150
1151 while (!to_data->eos_marker && err == 0) {
1152 err = do_dump(dsp, to_data);
1153 to_data = get_next_record(&to_arg.q, to_data);
1154 if (issig(JUSTLOOKING) && issig(FORREAL))
1155 err = EINTR;
1156 }
1157
1158 if (err != 0) {
1159 to_arg.cancel = B_TRUE;
1160 while (!to_data->eos_marker) {
1161 to_data = get_next_record(&to_arg.q, to_data);
1162 }
1163 }
1164 kmem_free(to_data, sizeof (*to_data));
1165
1166 bqueue_destroy(&to_arg.q);
1167
1168 if (err == 0 && to_arg.error_code != 0)
1169 err = to_arg.error_code;
1170
1171 if (err != 0)
1172 goto out;
1173
1174 if (dsp->dsa_pending_op != PENDING_NONE)
1175 if (dump_record(dsp, NULL, 0) != 0)
1176 err = SET_ERROR(EINTR);
1177
1178 if (err != 0) {
1179 if (err == EINTR && dsp->dsa_err != 0)
1180 err = dsp->dsa_err;
1181 goto out;
1182 }
1183
1184 bzero(drr, sizeof (dmu_replay_record_t));
1185 drr->drr_type = DRR_END;
1186 drr->drr_u.drr_end.drr_checksum = dsp->dsa_zc;
1187 drr->drr_u.drr_end.drr_toguid = dsp->dsa_toguid;
1188
1189 if (dump_record(dsp, NULL, 0) != 0)
1190 err = dsp->dsa_err;
1191 out:
1192 mutex_enter(&to_ds->ds_sendstream_lock);
1193 list_remove(&to_ds->ds_sendstreams, dsp);
1194 mutex_exit(&to_ds->ds_sendstream_lock);
1195
1196 VERIFY(err != 0 || (dsp->dsa_sent_begin && dsp->dsa_sent_end));
1197
1198 kmem_free(drr, sizeof (dmu_replay_record_t));
1199 kmem_free(dsp, sizeof (dmu_sendarg_t));
1200
1201 dsl_dataset_long_rele(to_ds, FTAG);
1202
1203 return (err);
1204 }
1205
1206 int
1207 dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap,
1208 boolean_t embedok, boolean_t large_block_ok, boolean_t compressok,
1209 boolean_t rawok, int outfd, vnode_t *vp, offset_t *off)
1210 {
1211 dsl_pool_t *dp;
1212 dsl_dataset_t *ds;
1213 dsl_dataset_t *fromds = NULL;
1214 ds_hold_flags_t dsflags = (rawok) ? 0 : DS_HOLD_FLAG_DECRYPT;
1215 int err;
1216
1217 err = dsl_pool_hold(pool, FTAG, &dp);
1218 if (err != 0)
1219 return (err);
1220
1221 err = dsl_dataset_hold_obj_flags(dp, tosnap, dsflags, FTAG, &ds);
1222 if (err != 0) {
1223 dsl_pool_rele(dp, FTAG);
1224 return (err);
1225 }
1226
1227 if (fromsnap != 0) {
1228 zfs_bookmark_phys_t zb;
1229 boolean_t is_clone;
1230
1231 err = dsl_dataset_hold_obj(dp, fromsnap, FTAG, &fromds);
1232 if (err != 0) {
1233 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1234 dsl_pool_rele(dp, FTAG);
1235 return (err);
1236 }
1237 if (!dsl_dataset_is_before(ds, fromds, 0))
1238 err = SET_ERROR(EXDEV);
1239 zb.zbm_creation_time =
1240 dsl_dataset_phys(fromds)->ds_creation_time;
1241 zb.zbm_creation_txg = dsl_dataset_phys(fromds)->ds_creation_txg;
1242 zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid;
1243 is_clone = (fromds->ds_dir != ds->ds_dir);
1244 dsl_dataset_rele(fromds, FTAG);
1245 err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone,
1246 embedok, large_block_ok, compressok, rawok, outfd,
1247 0, 0, vp, off);
1248 } else {
1249 err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE,
1250 embedok, large_block_ok, compressok, rawok, outfd,
1251 0, 0, vp, off);
1252 }
1253 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1254 return (err);
1255 }
1256
1257 int
1258 dmu_send(const char *tosnap, const char *fromsnap, boolean_t embedok,
1259 boolean_t large_block_ok, boolean_t compressok, boolean_t rawok,
1260 int outfd, uint64_t resumeobj, uint64_t resumeoff, vnode_t *vp,
1261 offset_t *off)
1262 {
1263 dsl_pool_t *dp;
1264 dsl_dataset_t *ds;
1265 int err;
1266 ds_hold_flags_t dsflags = (rawok) ? 0 : DS_HOLD_FLAG_DECRYPT;
1267 boolean_t owned = B_FALSE;
1268
1269 if (fromsnap != NULL && strpbrk(fromsnap, "@#") == NULL)
1270 return (SET_ERROR(EINVAL));
1271
1272 err = dsl_pool_hold(tosnap, FTAG, &dp);
1273 if (err != 0)
1274 return (err);
1275
1276 if (strchr(tosnap, '@') == NULL && spa_writeable(dp->dp_spa)) {
1277 /*
1278 * We are sending a filesystem or volume. Ensure
1279 * that it doesn't change by owning the dataset.
1280 */
1281 err = dsl_dataset_own(dp, tosnap, dsflags, FTAG, &ds);
1282 owned = B_TRUE;
1283 } else {
1284 err = dsl_dataset_hold_flags(dp, tosnap, dsflags, FTAG, &ds);
1285 }
1286 if (err != 0) {
1287 dsl_pool_rele(dp, FTAG);
1288 return (err);
1289 }
1290
1291 if (fromsnap != NULL) {
1292 zfs_bookmark_phys_t zb;
1293 boolean_t is_clone = B_FALSE;
1294 int fsnamelen = strchr(tosnap, '@') - tosnap;
1295
1296 /*
1297 * If the fromsnap is in a different filesystem, then
1298 * mark the send stream as a clone.
1299 */
1300 if (strncmp(tosnap, fromsnap, fsnamelen) != 0 ||
1301 (fromsnap[fsnamelen] != '@' &&
1302 fromsnap[fsnamelen] != '#')) {
1303 is_clone = B_TRUE;
1304 }
1305
1306 if (strchr(fromsnap, '@')) {
1307 dsl_dataset_t *fromds;
1308 err = dsl_dataset_hold(dp, fromsnap, FTAG, &fromds);
1309 if (err == 0) {
1310 if (!dsl_dataset_is_before(ds, fromds, 0))
1311 err = SET_ERROR(EXDEV);
1312 zb.zbm_creation_time =
1313 dsl_dataset_phys(fromds)->ds_creation_time;
1314 zb.zbm_creation_txg =
1315 dsl_dataset_phys(fromds)->ds_creation_txg;
1316 zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid;
1317 is_clone = (ds->ds_dir != fromds->ds_dir);
1318 dsl_dataset_rele(fromds, FTAG);
1319 }
1320 } else {
1321 err = dsl_bookmark_lookup(dp, fromsnap, ds, &zb);
1322 }
1323 if (err != 0) {
1324 if (owned)
1325 dsl_dataset_disown(ds, dsflags, FTAG);
1326 else
1327 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1328
1329 dsl_pool_rele(dp, FTAG);
1330 return (err);
1331 }
1332 err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone,
1333 embedok, large_block_ok, compressok, rawok,
1334 outfd, resumeobj, resumeoff, vp, off);
1335 } else {
1336 err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE,
1337 embedok, large_block_ok, compressok, rawok,
1338 outfd, resumeobj, resumeoff, vp, off);
1339 }
1340 if (owned)
1341 dsl_dataset_disown(ds, dsflags, FTAG);
1342 else
1343 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1344
1345 return (err);
1346 }
1347
1348 static int
1349 dmu_adjust_send_estimate_for_indirects(dsl_dataset_t *ds, uint64_t uncompressed,
1350 uint64_t compressed, boolean_t stream_compressed, uint64_t *sizep)
1351 {
1352 int err;
1353 uint64_t size;
1354 /*
1355 * Assume that space (both on-disk and in-stream) is dominated by
1356 * data. We will adjust for indirect blocks and the copies property,
1357 * but ignore per-object space used (eg, dnodes and DRR_OBJECT records).
1358 */
1359
1360 uint64_t recordsize;
1361 uint64_t record_count;
1362 objset_t *os;
1363 VERIFY0(dmu_objset_from_ds(ds, &os));
1364
1365 /* Assume all (uncompressed) blocks are recordsize. */
1366 if (os->os_phys->os_type == DMU_OST_ZVOL) {
1367 err = dsl_prop_get_int_ds(ds,
1368 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &recordsize);
1369 } else {
1370 err = dsl_prop_get_int_ds(ds,
1371 zfs_prop_to_name(ZFS_PROP_RECORDSIZE), &recordsize);
1372 }
1373 if (err != 0)
1374 return (err);
1375 record_count = uncompressed / recordsize;
1376
1377 /*
1378 * If we're estimating a send size for a compressed stream, use the
1379 * compressed data size to estimate the stream size. Otherwise, use the
1380 * uncompressed data size.
1381 */
1382 size = stream_compressed ? compressed : uncompressed;
1383
1384 /*
1385 * Subtract out approximate space used by indirect blocks.
1386 * Assume most space is used by data blocks (non-indirect, non-dnode).
1387 * Assume no ditto blocks or internal fragmentation.
1388 *
1389 * Therefore, space used by indirect blocks is sizeof(blkptr_t) per
1390 * block.
1391 */
1392 size -= record_count * sizeof (blkptr_t);
1393
1394 /* Add in the space for the record associated with each block. */
1395 size += record_count * sizeof (dmu_replay_record_t);
1396
1397 *sizep = size;
1398
1399 return (0);
1400 }
1401
1402 int
1403 dmu_send_estimate(dsl_dataset_t *ds, dsl_dataset_t *fromds,
1404 boolean_t stream_compressed, uint64_t *sizep)
1405 {
1406 int err;
1407 uint64_t uncomp, comp;
1408
1409 ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool));
1410
1411 /* tosnap must be a snapshot */
1412 if (!ds->ds_is_snapshot)
1413 return (SET_ERROR(EINVAL));
1414
1415 /* fromsnap, if provided, must be a snapshot */
1416 if (fromds != NULL && !fromds->ds_is_snapshot)
1417 return (SET_ERROR(EINVAL));
1418
1419 /*
1420 * fromsnap must be an earlier snapshot from the same fs as tosnap,
1421 * or the origin's fs.
1422 */
1423 if (fromds != NULL && !dsl_dataset_is_before(ds, fromds, 0))
1424 return (SET_ERROR(EXDEV));
1425
1426 /* Get compressed and uncompressed size estimates of changed data. */
1427 if (fromds == NULL) {
1428 uncomp = dsl_dataset_phys(ds)->ds_uncompressed_bytes;
1429 comp = dsl_dataset_phys(ds)->ds_compressed_bytes;
1430 } else {
1431 uint64_t used;
1432 err = dsl_dataset_space_written(fromds, ds,
1433 &used, &comp, &uncomp);
1434 if (err != 0)
1435 return (err);
1436 }
1437
1438 err = dmu_adjust_send_estimate_for_indirects(ds, uncomp, comp,
1439 stream_compressed, sizep);
1440 /*
1441 * Add the size of the BEGIN and END records to the estimate.
1442 */
1443 *sizep += 2 * sizeof (dmu_replay_record_t);
1444 return (err);
1445 }
1446
1447 struct calculate_send_arg {
1448 uint64_t uncompressed;
1449 uint64_t compressed;
1450 };
1451
1452 /*
1453 * Simple callback used to traverse the blocks of a snapshot and sum their
1454 * uncompressed and compressed sizes.
1455 */
1456 /* ARGSUSED */
1457 static int
1458 dmu_calculate_send_traversal(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
1459 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
1460 {
1461 struct calculate_send_arg *space = arg;
1462 if (bp != NULL && !BP_IS_HOLE(bp)) {
1463 space->uncompressed += BP_GET_UCSIZE(bp);
1464 space->compressed += BP_GET_PSIZE(bp);
1465 }
1466 return (0);
1467 }
1468
1469 /*
1470 * Given a desination snapshot and a TXG, calculate the approximate size of a
1471 * send stream sent from that TXG. from_txg may be zero, indicating that the
1472 * whole snapshot will be sent.
1473 */
1474 int
1475 dmu_send_estimate_from_txg(dsl_dataset_t *ds, uint64_t from_txg,
1476 boolean_t stream_compressed, uint64_t *sizep)
1477 {
1478 int err;
1479 struct calculate_send_arg size = { 0 };
1480
1481 ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool));
1482
1483 /* tosnap must be a snapshot */
1484 if (!dsl_dataset_is_snapshot(ds))
1485 return (SET_ERROR(EINVAL));
1486
1487 /* verify that from_txg is before the provided snapshot was taken */
1488 if (from_txg >= dsl_dataset_phys(ds)->ds_creation_txg) {
1489 return (SET_ERROR(EXDEV));
1490 }
1491 /*
1492 * traverse the blocks of the snapshot with birth times after
1493 * from_txg, summing their uncompressed size
1494 */
1495 err = traverse_dataset(ds, from_txg,
1496 TRAVERSE_POST | TRAVERSE_NO_DECRYPT,
1497 dmu_calculate_send_traversal, &size);
1498
1499 if (err)
1500 return (err);
1501
1502 err = dmu_adjust_send_estimate_for_indirects(ds, size.uncompressed,
1503 size.compressed, stream_compressed, sizep);
1504 return (err);
1505 }
1506
1507 typedef struct dmu_recv_begin_arg {
1508 const char *drba_origin;
1509 dmu_recv_cookie_t *drba_cookie;
1510 cred_t *drba_cred;
1511 uint64_t drba_snapobj;
1512 } dmu_recv_begin_arg_t;
1513
1514 static int
1515 recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds,
1516 uint64_t fromguid)
1517 {
1518 uint64_t val;
1519 int error;
1520 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1521
1522 /* temporary clone name must not exist */
1523 error = zap_lookup(dp->dp_meta_objset,
1524 dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, recv_clone_name,
1525 8, 1, &val);
1526 if (error != ENOENT)
1527 return (error == 0 ? EBUSY : error);
1528
1529 /* new snapshot name must not exist */
1530 error = zap_lookup(dp->dp_meta_objset,
1531 dsl_dataset_phys(ds)->ds_snapnames_zapobj,
1532 drba->drba_cookie->drc_tosnap, 8, 1, &val);
1533 if (error != ENOENT)
1534 return (error == 0 ? EEXIST : error);
1535
1536 /*
1537 * Check snapshot limit before receiving. We'll recheck again at the
1538 * end, but might as well abort before receiving if we're already over
1539 * the limit.
1540 *
1541 * Note that we do not check the file system limit with
1542 * dsl_dir_fscount_check because the temporary %clones don't count
1543 * against that limit.
1544 */
1545 error = dsl_fs_ss_limit_check(ds->ds_dir, 1, ZFS_PROP_SNAPSHOT_LIMIT,
1546 NULL, drba->drba_cred);
1547 if (error != 0)
1548 return (error);
1549
1550 if (fromguid != 0) {
1551 dsl_dataset_t *snap;
1552 uint64_t obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
1553
1554 /* Find snapshot in this dir that matches fromguid. */
1555 while (obj != 0) {
1556 error = dsl_dataset_hold_obj(dp, obj, FTAG,
1557 &snap);
1558 if (error != 0)
1559 return (SET_ERROR(ENODEV));
1560 if (snap->ds_dir != ds->ds_dir) {
1561 dsl_dataset_rele(snap, FTAG);
1562 return (SET_ERROR(ENODEV));
1563 }
1564 if (dsl_dataset_phys(snap)->ds_guid == fromguid)
1565 break;
1566 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
1567 dsl_dataset_rele(snap, FTAG);
1568 }
1569 if (obj == 0)
1570 return (SET_ERROR(ENODEV));
1571
1572 if (drba->drba_cookie->drc_force) {
1573 drba->drba_snapobj = obj;
1574 } else {
1575 /*
1576 * If we are not forcing, there must be no
1577 * changes since fromsnap.
1578 */
1579 if (dsl_dataset_modified_since_snap(ds, snap)) {
1580 dsl_dataset_rele(snap, FTAG);
1581 return (SET_ERROR(ETXTBSY));
1582 }
1583 drba->drba_snapobj = ds->ds_prev->ds_object;
1584 }
1585
1586 dsl_dataset_rele(snap, FTAG);
1587 } else {
1588 /* if full, then must be forced */
1589 if (!drba->drba_cookie->drc_force)
1590 return (SET_ERROR(EEXIST));
1591
1592 /*
1593 * We don't support using zfs recv -F to blow away
1594 * encrypted filesystems. This would require the
1595 * dsl dir to point to the old encryption key and
1596 * the new one at the same time during the receive.
1597 */
1598 if (ds->ds_dir->dd_crypto_obj != 0)
1599 return (SET_ERROR(EINVAL));
1600
1601 drba->drba_snapobj = 0;
1602 }
1603
1604 return (0);
1605
1606 }
1607
1608 static int
1609 dmu_recv_begin_check(void *arg, dmu_tx_t *tx)
1610 {
1611 dmu_recv_begin_arg_t *drba = arg;
1612 dsl_pool_t *dp = dmu_tx_pool(tx);
1613 struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
1614 uint64_t fromguid = drrb->drr_fromguid;
1615 int flags = drrb->drr_flags;
1616 ds_hold_flags_t dsflags = 0;
1617 int error;
1618 uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
1619 dsl_dataset_t *ds;
1620 const char *tofs = drba->drba_cookie->drc_tofs;
1621
1622 /* already checked */
1623 ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
1624 ASSERT(!(featureflags & DMU_BACKUP_FEATURE_RESUMING));
1625
1626 if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
1627 DMU_COMPOUNDSTREAM ||
1628 drrb->drr_type >= DMU_OST_NUMTYPES ||
1629 ((flags & DRR_FLAG_CLONE) && drba->drba_origin == NULL))
1630 return (SET_ERROR(EINVAL));
1631
1632 /* Verify pool version supports SA if SA_SPILL feature set */
1633 if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) &&
1634 spa_version(dp->dp_spa) < SPA_VERSION_SA)
1635 return (SET_ERROR(ENOTSUP));
1636
1637 if (drba->drba_cookie->drc_resumable &&
1638 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EXTENSIBLE_DATASET))
1639 return (SET_ERROR(ENOTSUP));
1640
1641 /*
1642 * The receiving code doesn't know how to translate a WRITE_EMBEDDED
1643 * record to a plain WRITE record, so the pool must have the
1644 * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED
1645 * records. Same with WRITE_EMBEDDED records that use LZ4 compression.
1646 */
1647 if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) &&
1648 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA))
1649 return (SET_ERROR(ENOTSUP));
1650 if ((featureflags & DMU_BACKUP_FEATURE_LZ4) &&
1651 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS))
1652 return (SET_ERROR(ENOTSUP));
1653
1654 /*
1655 * The receiving code doesn't know how to translate large blocks
1656 * to smaller ones, so the pool must have the LARGE_BLOCKS
1657 * feature enabled if the stream has LARGE_BLOCKS. Same with
1658 * large dnodes.
1659 */
1660 if ((featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
1661 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_BLOCKS))
1662 return (SET_ERROR(ENOTSUP));
1663 if ((featureflags & DMU_BACKUP_FEATURE_LARGE_DNODE) &&
1664 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_DNODE))
1665 return (SET_ERROR(ENOTSUP));
1666
1667 if ((featureflags & DMU_BACKUP_FEATURE_RAW)) {
1668 /* raw receives require the encryption feature */
1669 if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_ENCRYPTION))
1670 return (SET_ERROR(ENOTSUP));
1671 } else {
1672 dsflags |= DS_HOLD_FLAG_DECRYPT;
1673 }
1674
1675 error = dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds);
1676 if (error == 0) {
1677 /* target fs already exists; recv into temp clone */
1678
1679 /* Can't recv a clone into an existing fs */
1680 if (flags & DRR_FLAG_CLONE || drba->drba_origin) {
1681 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1682 return (SET_ERROR(EINVAL));
1683 }
1684
1685 error = recv_begin_check_existing_impl(drba, ds, fromguid);
1686 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1687 } else if (error == ENOENT) {
1688 /* target fs does not exist; must be a full backup or clone */
1689 char buf[ZFS_MAX_DATASET_NAME_LEN];
1690
1691 /*
1692 * If it's a non-clone incremental, we are missing the
1693 * target fs, so fail the recv.
1694 */
1695 if (fromguid != 0 && !(flags & DRR_FLAG_CLONE ||
1696 drba->drba_origin))
1697 return (SET_ERROR(ENOENT));
1698
1699 /*
1700 * If we're receiving a full send as a clone, and it doesn't
1701 * contain all the necessary free records and freeobject
1702 * records, reject it.
1703 */
1704 if (fromguid == 0 && drba->drba_origin &&
1705 !(flags & DRR_FLAG_FREERECORDS))
1706 return (SET_ERROR(EINVAL));
1707
1708 /* Open the parent of tofs */
1709 ASSERT3U(strlen(tofs), <, sizeof (buf));
1710 (void) strlcpy(buf, tofs, strrchr(tofs, '/') - tofs + 1);
1711 error = dsl_dataset_hold_flags(dp, buf, dsflags, FTAG, &ds);
1712 if (error != 0)
1713 return (error);
1714
1715 /*
1716 * Check filesystem and snapshot limits before receiving. We'll
1717 * recheck snapshot limits again at the end (we create the
1718 * filesystems and increment those counts during begin_sync).
1719 */
1720 error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
1721 ZFS_PROP_FILESYSTEM_LIMIT, NULL, drba->drba_cred);
1722 if (error != 0) {
1723 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1724 return (error);
1725 }
1726
1727 error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
1728 ZFS_PROP_SNAPSHOT_LIMIT, NULL, drba->drba_cred);
1729 if (error != 0) {
1730 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1731 return (error);
1732 }
1733
1734 if (drba->drba_origin != NULL) {
1735 dsl_dataset_t *origin;
1736
1737 error = dsl_dataset_hold_flags(dp, drba->drba_origin,
1738 dsflags, FTAG, &origin);
1739 if (error != 0) {
1740 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1741 return (error);
1742 }
1743 if (!origin->ds_is_snapshot) {
1744 dsl_dataset_rele_flags(origin, dsflags, FTAG);
1745 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1746 return (SET_ERROR(EINVAL));
1747 }
1748 if (dsl_dataset_phys(origin)->ds_guid != fromguid &&
1749 fromguid != 0) {
1750 dsl_dataset_rele_flags(origin, dsflags, FTAG);
1751 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1752 return (SET_ERROR(ENODEV));
1753 }
1754 dsl_dataset_rele_flags(origin,
1755 dsflags, FTAG);
1756 }
1757 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1758 error = 0;
1759 }
1760 return (error);
1761 }
1762
1763 static void
1764 dmu_recv_begin_sync(void *arg, dmu_tx_t *tx)
1765 {
1766 dmu_recv_begin_arg_t *drba = arg;
1767 dsl_pool_t *dp = dmu_tx_pool(tx);
1768 objset_t *mos = dp->dp_meta_objset;
1769 struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
1770 const char *tofs = drba->drba_cookie->drc_tofs;
1771 uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
1772 dsl_dataset_t *ds, *newds;
1773 objset_t *os;
1774 uint64_t dsobj;
1775 ds_hold_flags_t dsflags = 0;
1776 int error;
1777 uint64_t crflags = 0;
1778 dsl_crypto_params_t *dcpp = NULL;
1779 dsl_crypto_params_t dcp = { 0 };
1780
1781 if (drrb->drr_flags & DRR_FLAG_CI_DATA)
1782 crflags |= DS_FLAG_CI_DATASET;
1783 if ((featureflags & DMU_BACKUP_FEATURE_RAW) == 0) {
1784 dsflags |= DS_HOLD_FLAG_DECRYPT;
1785 } else {
1786 dcp.cp_cmd = DCP_CMD_RAW_RECV;
1787 }
1788
1789 error = dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds);
1790 if (error == 0) {
1791 /* create temporary clone */
1792 dsl_dataset_t *snap = NULL;
1793
1794 if (drba->drba_snapobj != 0) {
1795 VERIFY0(dsl_dataset_hold_obj(dp,
1796 drba->drba_snapobj, FTAG, &snap));
1797 } else {
1798 /* we use the dcp whenever we are not making a clone */
1799 dcpp = &dcp;
1800 }
1801
1802 dsobj = dsl_dataset_create_sync(ds->ds_dir, recv_clone_name,
1803 snap, crflags, drba->drba_cred, dcpp, tx);
1804 if (drba->drba_snapobj != 0)
1805 dsl_dataset_rele(snap, FTAG);
1806 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1807 } else {
1808 dsl_dir_t *dd;
1809 const char *tail;
1810 dsl_dataset_t *origin = NULL;
1811
1812 VERIFY0(dsl_dir_hold(dp, tofs, FTAG, &dd, &tail));
1813
1814 if (drba->drba_origin != NULL) {
1815 VERIFY0(dsl_dataset_hold(dp, drba->drba_origin,
1816 FTAG, &origin));
1817 } else {
1818 /* we use the dcp whenever we are not making a clone */
1819 dcpp = &dcp;
1820 }
1821
1822 /* Create new dataset. */
1823 dsobj = dsl_dataset_create_sync(dd, strrchr(tofs, '/') + 1,
1824 origin, crflags, drba->drba_cred, dcpp, tx);
1825 if (origin != NULL)
1826 dsl_dataset_rele(origin, FTAG);
1827 dsl_dir_rele(dd, FTAG);
1828 drba->drba_cookie->drc_newfs = B_TRUE;
1829 }
1830 VERIFY0(dsl_dataset_own_obj(dp, dsobj, dsflags, dmu_recv_tag, &newds));
1831 VERIFY0(dmu_objset_from_ds(newds, &os));
1832
1833 if (drba->drba_cookie->drc_resumable) {
1834 uint64_t one = 1;
1835 uint64_t zero = 0;
1836
1837 dsl_dataset_zapify(newds, tx);
1838 if (drrb->drr_fromguid != 0) {
1839 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_FROMGUID,
1840 8, 1, &drrb->drr_fromguid, tx));
1841 }
1842 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TOGUID,
1843 8, 1, &drrb->drr_toguid, tx));
1844 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TONAME,
1845 1, strlen(drrb->drr_toname) + 1, drrb->drr_toname, tx));
1846 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OBJECT,
1847 8, 1, &one, tx));
1848 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OFFSET,
1849 8, 1, &zero, tx));
1850 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_BYTES,
1851 8, 1, &zero, tx));
1852 if (featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) {
1853 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_LARGEBLOCK,
1854 8, 1, &one, tx));
1855 }
1856 if (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) {
1857 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_EMBEDOK,
1858 8, 1, &one, tx));
1859 }
1860 if (featureflags & DMU_BACKUP_FEATURE_COMPRESSED) {
1861 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_COMPRESSOK,
1862 8, 1, &one, tx));
1863 }
1864 if (featureflags & DMU_BACKUP_FEATURE_RAW) {
1865 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_RAWOK,
1866 8, 1, &one, tx));
1867 }
1868 }
1869
1870 /*
1871 * Usually the os->os_encrypted value is tied to the presence of a
1872 * DSL Crypto Key object in the dd. However, that will not be received
1873 * until dmu_recv_stream(), so we set the value manually for now.
1874 */
1875 if (featureflags & DMU_BACKUP_FEATURE_RAW) {
1876 os->os_encrypted = B_TRUE;
1877 drba->drba_cookie->drc_raw = B_TRUE;
1878 }
1879
1880 dmu_buf_will_dirty(newds->ds_dbuf, tx);
1881 dsl_dataset_phys(newds)->ds_flags |= DS_FLAG_INCONSISTENT;
1882
1883 /*
1884 * If we actually created a non-clone, we need to create the objset
1885 * in our new dataset. If this is a raw send we postpone this until
1886 * dmu_recv_stream() so that we can allocate the metadnode with the
1887 * properties from the DRR_BEGIN payload.
1888 */
1889 rrw_enter(&newds->ds_bp_rwlock, RW_READER, FTAG);
1890 if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds)) &&
1891 (featureflags & DMU_BACKUP_FEATURE_RAW) == 0) {
1892 (void) dmu_objset_create_impl(dp->dp_spa,
1893 newds, dsl_dataset_get_blkptr(newds), drrb->drr_type, tx);
1894 }
1895 rrw_exit(&newds->ds_bp_rwlock, FTAG);
1896
1897 drba->drba_cookie->drc_ds = newds;
1898
1899 spa_history_log_internal_ds(newds, "receive", tx, "");
1900 }
1901
1902 static int
1903 dmu_recv_resume_begin_check(void *arg, dmu_tx_t *tx)
1904 {
1905 dmu_recv_begin_arg_t *drba = arg;
1906 dsl_pool_t *dp = dmu_tx_pool(tx);
1907 struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
1908 int error;
1909 ds_hold_flags_t dsflags = 0;
1910 uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
1911 dsl_dataset_t *ds;
1912 const char *tofs = drba->drba_cookie->drc_tofs;
1913 uint64_t val;
1914
1915 /* 6 extra bytes for /%recv */
1916 char recvname[ZFS_MAX_DATASET_NAME_LEN + 6];
1917
1918 /* already checked */
1919 ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
1920 ASSERT(featureflags & DMU_BACKUP_FEATURE_RESUMING);
1921
1922 if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
1923 DMU_COMPOUNDSTREAM ||
1924 drrb->drr_type >= DMU_OST_NUMTYPES)
1925 return (SET_ERROR(EINVAL));
1926
1927 /* Verify pool version supports SA if SA_SPILL feature set */
1928 if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) &&
1929 spa_version(dp->dp_spa) < SPA_VERSION_SA)
1930 return (SET_ERROR(ENOTSUP));
1931
1932 /*
1933 * The receiving code doesn't know how to translate a WRITE_EMBEDDED
1934 * record to a plain WRITE record, so the pool must have the
1935 * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED
1936 * records. Same with WRITE_EMBEDDED records that use LZ4 compression.
1937 */
1938 if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) &&
1939 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA))
1940 return (SET_ERROR(ENOTSUP));
1941 if ((featureflags & DMU_BACKUP_FEATURE_LZ4) &&
1942 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS))
1943 return (SET_ERROR(ENOTSUP));
1944
1945 /*
1946 * The receiving code doesn't know how to translate large blocks
1947 * to smaller ones, so the pool must have the LARGE_BLOCKS
1948 * feature enabled if the stream has LARGE_BLOCKS. Same with
1949 * large dnodes.
1950 */
1951 if ((featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
1952 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_BLOCKS))
1953 return (SET_ERROR(ENOTSUP));
1954 if ((featureflags & DMU_BACKUP_FEATURE_LARGE_DNODE) &&
1955 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_DNODE))
1956 return (SET_ERROR(ENOTSUP));
1957
1958 (void) snprintf(recvname, sizeof (recvname), "%s/%s",
1959 tofs, recv_clone_name);
1960
1961 if ((featureflags & DMU_BACKUP_FEATURE_RAW) == 0)
1962 dsflags |= DS_HOLD_FLAG_DECRYPT;
1963
1964 if (dsl_dataset_hold_flags(dp, recvname, dsflags, FTAG, &ds) != 0) {
1965 /* %recv does not exist; continue in tofs */
1966 error = dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds);
1967 if (error != 0)
1968 return (error);
1969 }
1970
1971 /* check that ds is marked inconsistent */
1972 if (!DS_IS_INCONSISTENT(ds)) {
1973 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1974 return (SET_ERROR(EINVAL));
1975 }
1976
1977 /* check that there is resuming data, and that the toguid matches */
1978 if (!dsl_dataset_is_zapified(ds)) {
1979 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1980 return (SET_ERROR(EINVAL));
1981 }
1982 error = zap_lookup(dp->dp_meta_objset, ds->ds_object,
1983 DS_FIELD_RESUME_TOGUID, sizeof (val), 1, &val);
1984 if (error != 0 || drrb->drr_toguid != val) {
1985 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1986 return (SET_ERROR(EINVAL));
1987 }
1988
1989 /*
1990 * Check if the receive is still running. If so, it will be owned.
1991 * Note that nothing else can own the dataset (e.g. after the receive
1992 * fails) because it will be marked inconsistent.
1993 */
1994 if (dsl_dataset_has_owner(ds)) {
1995 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1996 return (SET_ERROR(EBUSY));
1997 }
1998
1999 /* There should not be any snapshots of this fs yet. */
2000 if (ds->ds_prev != NULL && ds->ds_prev->ds_dir == ds->ds_dir) {
2001 dsl_dataset_rele_flags(ds, dsflags, FTAG);
2002 return (SET_ERROR(EINVAL));
2003 }
2004
2005 /*
2006 * Note: resume point will be checked when we process the first WRITE
2007 * record.
2008 */
2009
2010 /* check that the origin matches */
2011 val = 0;
2012 (void) zap_lookup(dp->dp_meta_objset, ds->ds_object,
2013 DS_FIELD_RESUME_FROMGUID, sizeof (val), 1, &val);
2014 if (drrb->drr_fromguid != val) {
2015 dsl_dataset_rele_flags(ds, dsflags, FTAG);
2016 return (SET_ERROR(EINVAL));
2017 }
2018
2019 dsl_dataset_rele_flags(ds, dsflags, FTAG);
2020 return (0);
2021 }
2022
2023 static void
2024 dmu_recv_resume_begin_sync(void *arg, dmu_tx_t *tx)
2025 {
2026 dmu_recv_begin_arg_t *drba = arg;
2027 dsl_pool_t *dp = dmu_tx_pool(tx);
2028 const char *tofs = drba->drba_cookie->drc_tofs;
2029 struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
2030 uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
2031 dsl_dataset_t *ds;
2032 objset_t *os;
2033 ds_hold_flags_t dsflags = 0;
2034 uint64_t dsobj;
2035 /* 6 extra bytes for /%recv */
2036 char recvname[ZFS_MAX_DATASET_NAME_LEN + 6];
2037
2038 (void) snprintf(recvname, sizeof (recvname), "%s/%s",
2039 tofs, recv_clone_name);
2040
2041 if (featureflags & DMU_BACKUP_FEATURE_RAW) {
2042 drba->drba_cookie->drc_raw = B_TRUE;
2043 } else {
2044 dsflags |= DS_HOLD_FLAG_DECRYPT;
2045 }
2046
2047 if (dsl_dataset_hold_flags(dp, recvname, dsflags, FTAG, &ds) != 0) {
2048 /* %recv does not exist; continue in tofs */
2049 VERIFY0(dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds));
2050 drba->drba_cookie->drc_newfs = B_TRUE;
2051 }
2052
2053 /* clear the inconsistent flag so that we can own it */
2054 ASSERT(DS_IS_INCONSISTENT(ds));
2055 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2056 dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT;
2057 dsobj = ds->ds_object;
2058 dsl_dataset_rele_flags(ds, dsflags, FTAG);
2059
2060 VERIFY0(dsl_dataset_own_obj(dp, dsobj, dsflags, dmu_recv_tag, &ds));
2061 VERIFY0(dmu_objset_from_ds(ds, &os));
2062
2063 dmu_buf_will_dirty(ds->ds_dbuf, tx);
2064 dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_INCONSISTENT;
2065
2066 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
2067 ASSERT(!BP_IS_HOLE(dsl_dataset_get_blkptr(ds)));
2068 rrw_exit(&ds->ds_bp_rwlock, FTAG);
2069
2070 drba->drba_cookie->drc_ds = ds;
2071
2072 spa_history_log_internal_ds(ds, "resume receive", tx, "");
2073 }
2074
2075 /*
2076 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
2077 * succeeds; otherwise we will leak the holds on the datasets.
2078 */
2079 int
2080 dmu_recv_begin(char *tofs, char *tosnap, dmu_replay_record_t *drr_begin,
2081 boolean_t force, boolean_t resumable, char *origin, dmu_recv_cookie_t *drc)
2082 {
2083 dmu_recv_begin_arg_t drba = { 0 };
2084
2085 bzero(drc, sizeof (dmu_recv_cookie_t));
2086 drc->drc_drr_begin = drr_begin;
2087 drc->drc_drrb = &drr_begin->drr_u.drr_begin;
2088 drc->drc_tosnap = tosnap;
2089 drc->drc_tofs = tofs;
2090 drc->drc_force = force;
2091 drc->drc_resumable = resumable;
2092 drc->drc_cred = CRED();
2093 drc->drc_clone = (origin != NULL);
2094
2095 if (drc->drc_drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) {
2096 drc->drc_byteswap = B_TRUE;
2097 (void) fletcher_4_incremental_byteswap(drr_begin,
2098 sizeof (dmu_replay_record_t), &drc->drc_cksum);
2099 byteswap_record(drr_begin);
2100 } else if (drc->drc_drrb->drr_magic == DMU_BACKUP_MAGIC) {
2101 (void) fletcher_4_incremental_native(drr_begin,
2102 sizeof (dmu_replay_record_t), &drc->drc_cksum);
2103 } else {
2104 return (SET_ERROR(EINVAL));
2105 }
2106
2107 drba.drba_origin = origin;
2108 drba.drba_cookie = drc;
2109 drba.drba_cred = CRED();
2110
2111 if (DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo) &
2112 DMU_BACKUP_FEATURE_RESUMING) {
2113 return (dsl_sync_task(tofs,
2114 dmu_recv_resume_begin_check, dmu_recv_resume_begin_sync,
2115 &drba, 5, ZFS_SPACE_CHECK_NORMAL));
2116 } else {
2117 return (dsl_sync_task(tofs,
2118 dmu_recv_begin_check, dmu_recv_begin_sync,
2119 &drba, 5, ZFS_SPACE_CHECK_NORMAL));
2120 }
2121 }
2122
2123 struct receive_record_arg {
2124 dmu_replay_record_t header;
2125 void *payload; /* Pointer to a buffer containing the payload */
2126 /*
2127 * If the record is a write, pointer to the arc_buf_t containing the
2128 * payload.
2129 */
2130 arc_buf_t *arc_buf;
2131 int payload_size;
2132 uint64_t bytes_read; /* bytes read from stream when record created */
2133 boolean_t eos_marker; /* Marks the end of the stream */
2134 bqueue_node_t node;
2135 };
2136
2137 struct receive_writer_arg {
2138 objset_t *os;
2139 boolean_t byteswap;
2140 bqueue_t q;
2141
2142 /*
2143 * These three args are used to signal to the main thread that we're
2144 * done.
2145 */
2146 kmutex_t mutex;
2147 kcondvar_t cv;
2148 boolean_t done;
2149
2150 int err;
2151 /* A map from guid to dataset to help handle dedup'd streams. */
2152 avl_tree_t *guid_to_ds_map;
2153 boolean_t resumable;
2154 boolean_t raw;
2155 uint64_t last_object;
2156 uint64_t last_offset;
2157 uint64_t max_object; /* highest object ID referenced in stream */
2158 uint64_t bytes_read; /* bytes read when current record created */
2159 };
2160
2161 struct objlist {
2162 list_t list; /* List of struct receive_objnode. */
2163 /*
2164 * Last object looked up. Used to assert that objects are being looked
2165 * up in ascending order.
2166 */
2167 uint64_t last_lookup;
2168 };
2169
2170 struct receive_objnode {
2171 list_node_t node;
2172 uint64_t object;
2173 };
2174
2175 struct receive_arg {
2176 objset_t *os;
2177 vnode_t *vp; /* The vnode to read the stream from */
2178 uint64_t voff; /* The current offset in the stream */
2179 uint64_t bytes_read;
2180 /*
2181 * A record that has had its payload read in, but hasn't yet been handed
2182 * off to the worker thread.
2183 */
2184 struct receive_record_arg *rrd;
2185 /* A record that has had its header read in, but not its payload. */
2186 struct receive_record_arg *next_rrd;
2187 zio_cksum_t cksum;
2188 zio_cksum_t prev_cksum;
2189 int err;
2190 boolean_t byteswap;
2191 boolean_t raw;
2192 uint64_t featureflags;
2193 /* Sorted list of objects not to issue prefetches for. */
2194 struct objlist ignore_objlist;
2195 };
2196
2197 typedef struct guid_map_entry {
2198 uint64_t guid;
2199 boolean_t raw;
2200 dsl_dataset_t *gme_ds;
2201 avl_node_t avlnode;
2202 } guid_map_entry_t;
2203
2204 static int
2205 guid_compare(const void *arg1, const void *arg2)
2206 {
2207 const guid_map_entry_t *gmep1 = (const guid_map_entry_t *)arg1;
2208 const guid_map_entry_t *gmep2 = (const guid_map_entry_t *)arg2;
2209
2210 return (AVL_CMP(gmep1->guid, gmep2->guid));
2211 }
2212
2213 static void
2214 free_guid_map_onexit(void *arg)
2215 {
2216 avl_tree_t *ca = arg;
2217 void *cookie = NULL;
2218 guid_map_entry_t *gmep;
2219
2220 while ((gmep = avl_destroy_nodes(ca, &cookie)) != NULL) {
2221 dsl_dataset_long_rele(gmep->gme_ds, gmep);
2222 dsl_dataset_rele_flags(gmep->gme_ds,
2223 (gmep->raw) ? 0 : DS_HOLD_FLAG_DECRYPT, gmep);
2224 kmem_free(gmep, sizeof (guid_map_entry_t));
2225 }
2226 avl_destroy(ca);
2227 kmem_free(ca, sizeof (avl_tree_t));
2228 }
2229
2230 static int
2231 receive_read(struct receive_arg *ra, int len, void *buf)
2232 {
2233 int done = 0;
2234
2235 /*
2236 * The code doesn't rely on this (lengths being multiples of 8). See
2237 * comment in dump_bytes.
2238 */
2239 ASSERT(len % 8 == 0 ||
2240 (ra->featureflags & DMU_BACKUP_FEATURE_RAW) != 0);
2241
2242 while (done < len) {
2243 ssize_t resid;
2244
2245 ra->err = vn_rdwr(UIO_READ, ra->vp,
2246 (char *)buf + done, len - done,
2247 ra->voff, UIO_SYSSPACE, FAPPEND,
2248 RLIM64_INFINITY, CRED(), &resid);
2249
2250 if (resid == len - done) {
2251 /*
2252 * Note: ECKSUM indicates that the receive
2253 * was interrupted and can potentially be resumed.
2254 */
2255 ra->err = SET_ERROR(ECKSUM);
2256 }
2257 ra->voff += len - done - resid;
2258 done = len - resid;
2259 if (ra->err != 0)
2260 return (ra->err);
2261 }
2262
2263 ra->bytes_read += len;
2264
2265 ASSERT3U(done, ==, len);
2266 return (0);
2267 }
2268
2269 noinline static void
2270 byteswap_record(dmu_replay_record_t *drr)
2271 {
2272 #define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
2273 #define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
2274 drr->drr_type = BSWAP_32(drr->drr_type);
2275 drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen);
2276
2277 switch (drr->drr_type) {
2278 case DRR_BEGIN:
2279 DO64(drr_begin.drr_magic);
2280 DO64(drr_begin.drr_versioninfo);
2281 DO64(drr_begin.drr_creation_time);
2282 DO32(drr_begin.drr_type);
2283 DO32(drr_begin.drr_flags);
2284 DO64(drr_begin.drr_toguid);
2285 DO64(drr_begin.drr_fromguid);
2286 break;
2287 case DRR_OBJECT:
2288 DO64(drr_object.drr_object);
2289 DO32(drr_object.drr_type);
2290 DO32(drr_object.drr_bonustype);
2291 DO32(drr_object.drr_blksz);
2292 DO32(drr_object.drr_bonuslen);
2293 DO32(drr_object.drr_raw_bonuslen);
2294 DO64(drr_object.drr_toguid);
2295 break;
2296 case DRR_FREEOBJECTS:
2297 DO64(drr_freeobjects.drr_firstobj);
2298 DO64(drr_freeobjects.drr_numobjs);
2299 DO64(drr_freeobjects.drr_toguid);
2300 break;
2301 case DRR_WRITE:
2302 DO64(drr_write.drr_object);
2303 DO32(drr_write.drr_type);
2304 DO64(drr_write.drr_offset);
2305 DO64(drr_write.drr_logical_size);
2306 DO64(drr_write.drr_toguid);
2307 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write.drr_key.ddk_cksum);
2308 DO64(drr_write.drr_key.ddk_prop);
2309 DO64(drr_write.drr_compressed_size);
2310 break;
2311 case DRR_WRITE_BYREF:
2312 DO64(drr_write_byref.drr_object);
2313 DO64(drr_write_byref.drr_offset);
2314 DO64(drr_write_byref.drr_length);
2315 DO64(drr_write_byref.drr_toguid);
2316 DO64(drr_write_byref.drr_refguid);
2317 DO64(drr_write_byref.drr_refobject);
2318 DO64(drr_write_byref.drr_refoffset);
2319 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write_byref.
2320 drr_key.ddk_cksum);
2321 DO64(drr_write_byref.drr_key.ddk_prop);
2322 break;
2323 case DRR_WRITE_EMBEDDED:
2324 DO64(drr_write_embedded.drr_object);
2325 DO64(drr_write_embedded.drr_offset);
2326 DO64(drr_write_embedded.drr_length);
2327 DO64(drr_write_embedded.drr_toguid);
2328 DO32(drr_write_embedded.drr_lsize);
2329 DO32(drr_write_embedded.drr_psize);
2330 break;
2331 case DRR_FREE:
2332 DO64(drr_free.drr_object);
2333 DO64(drr_free.drr_offset);
2334 DO64(drr_free.drr_length);
2335 DO64(drr_free.drr_toguid);
2336 break;
2337 case DRR_SPILL:
2338 DO64(drr_spill.drr_object);
2339 DO64(drr_spill.drr_length);
2340 DO64(drr_spill.drr_toguid);
2341 DO64(drr_spill.drr_compressed_size);
2342 DO32(drr_spill.drr_type);
2343 break;
2344 case DRR_OBJECT_RANGE:
2345 DO64(drr_object_range.drr_firstobj);
2346 DO64(drr_object_range.drr_numslots);
2347 DO64(drr_object_range.drr_toguid);
2348 break;
2349 case DRR_END:
2350 DO64(drr_end.drr_toguid);
2351 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_end.drr_checksum);
2352 break;
2353 default:
2354 break;
2355 }
2356
2357 if (drr->drr_type != DRR_BEGIN) {
2358 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_checksum.drr_checksum);
2359 }
2360
2361 #undef DO64
2362 #undef DO32
2363 }
2364
2365 static inline uint8_t
2366 deduce_nblkptr(dmu_object_type_t bonus_type, uint64_t bonus_size)
2367 {
2368 if (bonus_type == DMU_OT_SA) {
2369 return (1);
2370 } else {
2371 return (1 +
2372 ((DN_OLD_MAX_BONUSLEN -
2373 MIN(DN_OLD_MAX_BONUSLEN, bonus_size)) >> SPA_BLKPTRSHIFT));
2374 }
2375 }
2376
2377 static void
2378 save_resume_state(struct receive_writer_arg *rwa,
2379 uint64_t object, uint64_t offset, dmu_tx_t *tx)
2380 {
2381 int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
2382
2383 if (!rwa->resumable)
2384 return;
2385
2386 /*
2387 * We use ds_resume_bytes[] != 0 to indicate that we need to
2388 * update this on disk, so it must not be 0.
2389 */
2390 ASSERT(rwa->bytes_read != 0);
2391
2392 /*
2393 * We only resume from write records, which have a valid
2394 * (non-meta-dnode) object number.
2395 */
2396 ASSERT(object != 0);
2397
2398 /*
2399 * For resuming to work correctly, we must receive records in order,
2400 * sorted by object,offset. This is checked by the callers, but
2401 * assert it here for good measure.
2402 */
2403 ASSERT3U(object, >=, rwa->os->os_dsl_dataset->ds_resume_object[txgoff]);
2404 ASSERT(object != rwa->os->os_dsl_dataset->ds_resume_object[txgoff] ||
2405 offset >= rwa->os->os_dsl_dataset->ds_resume_offset[txgoff]);
2406 ASSERT3U(rwa->bytes_read, >=,
2407 rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff]);
2408
2409 rwa->os->os_dsl_dataset->ds_resume_object[txgoff] = object;
2410 rwa->os->os_dsl_dataset->ds_resume_offset[txgoff] = offset;
2411 rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff] = rwa->bytes_read;
2412 }
2413
2414 noinline static int
2415 receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
2416 void *data)
2417 {
2418 dmu_object_info_t doi;
2419 dmu_tx_t *tx;
2420 uint64_t object;
2421 int err;
2422
2423 if (drro->drr_type == DMU_OT_NONE ||
2424 !DMU_OT_IS_VALID(drro->drr_type) ||
2425 !DMU_OT_IS_VALID(drro->drr_bonustype) ||
2426 drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS ||
2427 drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS ||
2428 P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) ||
2429 drro->drr_blksz < SPA_MINBLOCKSIZE ||
2430 drro->drr_blksz > spa_maxblocksize(dmu_objset_spa(rwa->os)) ||
2431 drro->drr_bonuslen >
2432 DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(rwa->os))) ||
2433 drro->drr_dn_slots >
2434 (spa_maxdnodesize(dmu_objset_spa(rwa->os)) >> DNODE_SHIFT)) {
2435 return (SET_ERROR(EINVAL));
2436 }
2437
2438 if (rwa->raw) {
2439 if (drro->drr_raw_bonuslen < drro->drr_bonuslen ||
2440 drro->drr_indblkshift > SPA_MAXBLOCKSHIFT ||
2441 drro->drr_nlevels > DN_MAX_LEVELS ||
2442 drro->drr_nblkptr > DN_MAX_NBLKPTR ||
2443 DN_SLOTS_TO_BONUSLEN(drro->drr_dn_slots) <
2444 drro->drr_raw_bonuslen)
2445 return (SET_ERROR(EINVAL));
2446 } else {
2447 if (drro->drr_flags != 0 || drro->drr_raw_bonuslen != 0 ||
2448 drro->drr_indblkshift != 0 || drro->drr_nlevels != 0 ||
2449 drro->drr_nblkptr != 0)
2450 return (SET_ERROR(EINVAL));
2451 }
2452
2453 err = dmu_object_info(rwa->os, drro->drr_object, &doi);
2454
2455 if (err != 0 && err != ENOENT)
2456 return (SET_ERROR(EINVAL));
2457 object = err == 0 ? drro->drr_object : DMU_NEW_OBJECT;
2458
2459 if (drro->drr_object > rwa->max_object)
2460 rwa->max_object = drro->drr_object;
2461
2462 /*
2463 * If we are losing blkptrs or changing the block size this must
2464 * be a new file instance. We must clear out the previous file
2465 * contents before we can change this type of metadata in the dnode.
2466 * Raw receives will also check that the indirect structure of the
2467 * dnode hasn't changed.
2468 */
2469 if (err == 0) {
2470 uint32_t indblksz = drro->drr_indblkshift ?
2471 1ULL << drro->drr_indblkshift : 0;
2472 int nblkptr = deduce_nblkptr(drro->drr_bonustype,
2473 drro->drr_bonuslen);
2474
2475 /* nblkptr will be bounded by the bonus size and type */
2476 if (rwa->raw && nblkptr != drro->drr_nblkptr)
2477 return (SET_ERROR(EINVAL));
2478
2479 if (drro->drr_blksz != doi.doi_data_block_size ||
2480 nblkptr < doi.doi_nblkptr ||
2481 (rwa->raw &&
2482 (indblksz != doi.doi_metadata_block_size ||
2483 drro->drr_nlevels < doi.doi_indirection))) {
2484 err = dmu_free_long_range(rwa->os, drro->drr_object,
2485 0, DMU_OBJECT_END);
2486 if (err != 0)
2487 return (SET_ERROR(EINVAL));
2488 }
2489 }
2490
2491 tx = dmu_tx_create(rwa->os);
2492 dmu_tx_hold_bonus(tx, object);
2493 dmu_tx_hold_write(tx, object, 0, 0);
2494 err = dmu_tx_assign(tx, TXG_WAIT);
2495 if (err != 0) {
2496 dmu_tx_abort(tx);
2497 return (err);
2498 }
2499
2500 if (object == DMU_NEW_OBJECT) {
2501 /* currently free, want to be allocated */
2502 err = dmu_object_claim_dnsize(rwa->os, drro->drr_object,
2503 drro->drr_type, drro->drr_blksz,
2504 drro->drr_bonustype, drro->drr_bonuslen,
2505 drro->drr_dn_slots << DNODE_SHIFT, tx);
2506 } else if (drro->drr_type != doi.doi_type ||
2507 drro->drr_blksz != doi.doi_data_block_size ||
2508 drro->drr_bonustype != doi.doi_bonus_type ||
2509 drro->drr_bonuslen != doi.doi_bonus_size) {
2510 /* currently allocated, but with different properties */
2511 err = dmu_object_reclaim(rwa->os, drro->drr_object,
2512 drro->drr_type, drro->drr_blksz,
2513 drro->drr_bonustype, drro->drr_bonuslen, tx);
2514 }
2515 if (err != 0) {
2516 dmu_tx_commit(tx);
2517 return (SET_ERROR(EINVAL));
2518 }
2519
2520 if (rwa->raw)
2521 VERIFY0(dmu_object_dirty_raw(rwa->os, drro->drr_object, tx));
2522
2523 dmu_object_set_checksum(rwa->os, drro->drr_object,
2524 drro->drr_checksumtype, tx);
2525 dmu_object_set_compress(rwa->os, drro->drr_object,
2526 drro->drr_compress, tx);
2527
2528 /* handle more restrictive dnode structuring for raw recvs */
2529 if (rwa->raw) {
2530 /*
2531 * Set the indirect block shift and nlevels. This will not fail
2532 * because we ensured all of the blocks were free earlier if
2533 * this is a new object.
2534 */
2535 VERIFY0(dmu_object_set_blocksize(rwa->os, drro->drr_object,
2536 drro->drr_blksz, drro->drr_indblkshift, tx));
2537 VERIFY0(dmu_object_set_nlevels(rwa->os, drro->drr_object,
2538 drro->drr_nlevels, tx));
2539 }
2540
2541 if (data != NULL) {
2542 dmu_buf_t *db;
2543 uint32_t flags = DMU_READ_NO_PREFETCH;
2544
2545 if (rwa->raw)
2546 flags |= DMU_READ_NO_DECRYPT;
2547
2548 VERIFY0(dmu_bonus_hold_impl(rwa->os, drro->drr_object,
2549 FTAG, flags, &db));
2550 dmu_buf_will_dirty(db, tx);
2551
2552 ASSERT3U(db->db_size, >=, drro->drr_bonuslen);
2553 bcopy(data, db->db_data, DRR_OBJECT_PAYLOAD_SIZE(drro));
2554
2555 /*
2556 * Raw bonus buffers have their byteorder determined by the
2557 * DRR_OBJECT_RANGE record.
2558 */
2559 if (rwa->byteswap && !rwa->raw) {
2560 dmu_object_byteswap_t byteswap =
2561 DMU_OT_BYTESWAP(drro->drr_bonustype);
2562 dmu_ot_byteswap[byteswap].ob_func(db->db_data,
2563 DRR_OBJECT_PAYLOAD_SIZE(drro));
2564 }
2565 dmu_buf_rele(db, FTAG);
2566 }
2567 dmu_tx_commit(tx);
2568
2569 return (0);
2570 }
2571
2572 /* ARGSUSED */
2573 noinline static int
2574 receive_freeobjects(struct receive_writer_arg *rwa,
2575 struct drr_freeobjects *drrfo)
2576 {
2577 uint64_t obj;
2578 int next_err = 0;
2579
2580 if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj)
2581 return (SET_ERROR(EINVAL));
2582
2583 for (obj = drrfo->drr_firstobj == 0 ? 1 : drrfo->drr_firstobj;
2584 obj < drrfo->drr_firstobj + drrfo->drr_numobjs && next_err == 0;
2585 next_err = dmu_object_next(rwa->os, &obj, FALSE, 0)) {
2586 dmu_object_info_t doi;
2587 int err;
2588
2589 err = dmu_object_info(rwa->os, obj, &doi);
2590 if (err == ENOENT)
2591 continue;
2592 else if (err != 0)
2593 return (err);
2594
2595 err = dmu_free_long_object(rwa->os, obj);
2596 if (err != 0)
2597 return (err);
2598
2599 if (obj > rwa->max_object)
2600 rwa->max_object = obj;
2601 }
2602 if (next_err != ESRCH)
2603 return (next_err);
2604 return (0);
2605 }
2606
2607 noinline static int
2608 receive_write(struct receive_writer_arg *rwa, struct drr_write *drrw,
2609 arc_buf_t *abuf)
2610 {
2611 dmu_tx_t *tx;
2612 dmu_buf_t *bonus;
2613 int err;
2614
2615 if (drrw->drr_offset + drrw->drr_logical_size < drrw->drr_offset ||
2616 !DMU_OT_IS_VALID(drrw->drr_type))
2617 return (SET_ERROR(EINVAL));
2618
2619 /*
2620 * For resuming to work, records must be in increasing order
2621 * by (object, offset).
2622 */
2623 if (drrw->drr_object < rwa->last_object ||
2624 (drrw->drr_object == rwa->last_object &&
2625 drrw->drr_offset < rwa->last_offset)) {
2626 return (SET_ERROR(EINVAL));
2627 }
2628 rwa->last_object = drrw->drr_object;
2629 rwa->last_offset = drrw->drr_offset;
2630
2631 if (rwa->last_object > rwa->max_object)
2632 rwa->max_object = rwa->last_object;
2633
2634 if (dmu_object_info(rwa->os, drrw->drr_object, NULL) != 0)
2635 return (SET_ERROR(EINVAL));
2636
2637 tx = dmu_tx_create(rwa->os);
2638
2639 dmu_tx_hold_write(tx, drrw->drr_object,
2640 drrw->drr_offset, drrw->drr_logical_size);
2641 err = dmu_tx_assign(tx, TXG_WAIT);
2642 if (err != 0) {
2643 dmu_tx_abort(tx);
2644 return (err);
2645 }
2646
2647 if (rwa->raw)
2648 VERIFY0(dmu_object_dirty_raw(rwa->os, drrw->drr_object, tx));
2649
2650 if (rwa->byteswap && !arc_is_encrypted(abuf) &&
2651 arc_get_compression(abuf) == ZIO_COMPRESS_OFF) {
2652 dmu_object_byteswap_t byteswap =
2653 DMU_OT_BYTESWAP(drrw->drr_type);
2654 dmu_ot_byteswap[byteswap].ob_func(abuf->b_data,
2655 DRR_WRITE_PAYLOAD_SIZE(drrw));
2656 }
2657
2658 /* use the bonus buf to look up the dnode in dmu_assign_arcbuf */
2659 if (dmu_bonus_hold(rwa->os, drrw->drr_object, FTAG, &bonus) != 0)
2660 return (SET_ERROR(EINVAL));
2661 dmu_assign_arcbuf(bonus, drrw->drr_offset, abuf, tx);
2662
2663 /*
2664 * Note: If the receive fails, we want the resume stream to start
2665 * with the same record that we last successfully received (as opposed
2666 * to the next record), so that we can verify that we are
2667 * resuming from the correct location.
2668 */
2669 save_resume_state(rwa, drrw->drr_object, drrw->drr_offset, tx);
2670 dmu_tx_commit(tx);
2671 dmu_buf_rele(bonus, FTAG);
2672
2673 return (0);
2674 }
2675
2676 /*
2677 * Handle a DRR_WRITE_BYREF record. This record is used in dedup'ed
2678 * streams to refer to a copy of the data that is already on the
2679 * system because it came in earlier in the stream. This function
2680 * finds the earlier copy of the data, and uses that copy instead of
2681 * data from the stream to fulfill this write.
2682 */
2683 static int
2684 receive_write_byref(struct receive_writer_arg *rwa,
2685 struct drr_write_byref *drrwbr)
2686 {
2687 dmu_tx_t *tx;
2688 int err;
2689 guid_map_entry_t gmesrch;
2690 guid_map_entry_t *gmep;
2691 avl_index_t where;
2692 objset_t *ref_os = NULL;
2693 int flags = DMU_READ_PREFETCH;
2694 dmu_buf_t *dbp;
2695
2696 if (drrwbr->drr_offset + drrwbr->drr_length < drrwbr->drr_offset)
2697 return (SET_ERROR(EINVAL));
2698
2699 /*
2700 * If the GUID of the referenced dataset is different from the
2701 * GUID of the target dataset, find the referenced dataset.
2702 */
2703 if (drrwbr->drr_toguid != drrwbr->drr_refguid) {
2704 gmesrch.guid = drrwbr->drr_refguid;
2705 if ((gmep = avl_find(rwa->guid_to_ds_map, &gmesrch,
2706 &where)) == NULL) {
2707 return (SET_ERROR(EINVAL));
2708 }
2709 if (dmu_objset_from_ds(gmep->gme_ds, &ref_os))
2710 return (SET_ERROR(EINVAL));
2711 } else {
2712 ref_os = rwa->os;
2713 }
2714
2715 if (drrwbr->drr_object > rwa->max_object)
2716 rwa->max_object = drrwbr->drr_object;
2717
2718 if (rwa->raw)
2719 flags |= DMU_READ_NO_DECRYPT;
2720
2721 /* may return either a regular db or an encrypted one */
2722 err = dmu_buf_hold(ref_os, drrwbr->drr_refobject,
2723 drrwbr->drr_refoffset, FTAG, &dbp, flags);
2724 if (err != 0)
2725 return (err);
2726
2727 tx = dmu_tx_create(rwa->os);
2728
2729 dmu_tx_hold_write(tx, drrwbr->drr_object,
2730 drrwbr->drr_offset, drrwbr->drr_length);
2731 err = dmu_tx_assign(tx, TXG_WAIT);
2732 if (err != 0) {
2733 dmu_tx_abort(tx);
2734 return (err);
2735 }
2736
2737 if (rwa->raw) {
2738 VERIFY0(dmu_object_dirty_raw(rwa->os, drrwbr->drr_object, tx));
2739 dmu_copy_from_buf(rwa->os, drrwbr->drr_object,
2740 drrwbr->drr_offset, dbp, tx);
2741 } else {
2742 dmu_write(rwa->os, drrwbr->drr_object,
2743 drrwbr->drr_offset, drrwbr->drr_length, dbp->db_data, tx);
2744 }
2745 dmu_buf_rele(dbp, FTAG);
2746
2747 /* See comment in restore_write. */
2748 save_resume_state(rwa, drrwbr->drr_object, drrwbr->drr_offset, tx);
2749 dmu_tx_commit(tx);
2750 return (0);
2751 }
2752
2753 static int
2754 receive_write_embedded(struct receive_writer_arg *rwa,
2755 struct drr_write_embedded *drrwe, void *data)
2756 {
2757 dmu_tx_t *tx;
2758 int err;
2759
2760 if (drrwe->drr_offset + drrwe->drr_length < drrwe->drr_offset)
2761 return (SET_ERROR(EINVAL));
2762
2763 if (drrwe->drr_psize > BPE_PAYLOAD_SIZE)
2764 return (SET_ERROR(EINVAL));
2765
2766 if (drrwe->drr_etype >= NUM_BP_EMBEDDED_TYPES)
2767 return (SET_ERROR(EINVAL));
2768 if (drrwe->drr_compression >= ZIO_COMPRESS_FUNCTIONS)
2769 return (SET_ERROR(EINVAL));
2770
2771 if (drrwe->drr_object > rwa->max_object)
2772 rwa->max_object = drrwe->drr_object;
2773
2774 tx = dmu_tx_create(rwa->os);
2775
2776 dmu_tx_hold_write(tx, drrwe->drr_object,
2777 drrwe->drr_offset, drrwe->drr_length);
2778 err = dmu_tx_assign(tx, TXG_WAIT);
2779 if (err != 0) {
2780 dmu_tx_abort(tx);
2781 return (err);
2782 }
2783
2784 dmu_write_embedded(rwa->os, drrwe->drr_object,
2785 drrwe->drr_offset, data, drrwe->drr_etype,
2786 drrwe->drr_compression, drrwe->drr_lsize, drrwe->drr_psize,
2787 rwa->byteswap ^ ZFS_HOST_BYTEORDER, tx);
2788
2789 /* See comment in restore_write. */
2790 save_resume_state(rwa, drrwe->drr_object, drrwe->drr_offset, tx);
2791 dmu_tx_commit(tx);
2792 return (0);
2793 }
2794
2795 static int
2796 receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs,
2797 arc_buf_t *abuf)
2798 {
2799 dmu_tx_t *tx;
2800 dmu_buf_t *db, *db_spill;
2801 int err;
2802
2803 if (drrs->drr_length < SPA_MINBLOCKSIZE ||
2804 drrs->drr_length > spa_maxblocksize(dmu_objset_spa(rwa->os)))
2805 return (SET_ERROR(EINVAL));
2806
2807 if (rwa->raw) {
2808 if (!DMU_OT_IS_VALID(drrs->drr_type) ||
2809 drrs->drr_compressiontype >= ZIO_COMPRESS_FUNCTIONS ||
2810 drrs->drr_compressed_size == 0)
2811 return (SET_ERROR(EINVAL));
2812 }
2813
2814 if (dmu_object_info(rwa->os, drrs->drr_object, NULL) != 0)
2815 return (SET_ERROR(EINVAL));
2816
2817 if (drrs->drr_object > rwa->max_object)
2818 rwa->max_object = drrs->drr_object;
2819
2820 VERIFY0(dmu_bonus_hold(rwa->os, drrs->drr_object, FTAG, &db));
2821 if ((err = dmu_spill_hold_by_bonus(db, FTAG, &db_spill)) != 0) {
2822 dmu_buf_rele(db, FTAG);
2823 return (err);
2824 }
2825
2826 tx = dmu_tx_create(rwa->os);
2827
2828 dmu_tx_hold_spill(tx, db->db_object);
2829
2830 err = dmu_tx_assign(tx, TXG_WAIT);
2831 if (err != 0) {
2832 dmu_buf_rele(db, FTAG);
2833 dmu_buf_rele(db_spill, FTAG);
2834 dmu_tx_abort(tx);
2835 return (err);
2836 }
2837 dmu_buf_will_dirty(db_spill, tx);
2838 if (rwa->raw)
2839 VERIFY0(dmu_object_dirty_raw(rwa->os, drrs->drr_object, tx));
2840
2841 if (db_spill->db_size < drrs->drr_length)
2842 VERIFY(0 == dbuf_spill_set_blksz(db_spill,
2843 drrs->drr_length, tx));
2844 dmu_assign_arcbuf_impl(db_spill, abuf, tx);
2845
2846 dmu_buf_rele(db, FTAG);
2847 dmu_buf_rele(db_spill, FTAG);
2848
2849 dmu_tx_commit(tx);
2850 return (0);
2851 }
2852
2853 /* ARGSUSED */
2854 noinline static int
2855 receive_free(struct receive_writer_arg *rwa, struct drr_free *drrf)
2856 {
2857 int err;
2858
2859 if (drrf->drr_length != -1ULL &&
2860 drrf->drr_offset + drrf->drr_length < drrf->drr_offset)
2861 return (SET_ERROR(EINVAL));
2862
2863 if (dmu_object_info(rwa->os, drrf->drr_object, NULL) != 0)
2864 return (SET_ERROR(EINVAL));
2865
2866 if (drrf->drr_object > rwa->max_object)
2867 rwa->max_object = drrf->drr_object;
2868
2869 err = dmu_free_long_range(rwa->os, drrf->drr_object,
2870 drrf->drr_offset, drrf->drr_length);
2871
2872 return (err);
2873 }
2874
2875 static int
2876 receive_object_range(struct receive_writer_arg *rwa,
2877 struct drr_object_range *drror)
2878 {
2879 int ret;
2880 dmu_tx_t *tx;
2881 dnode_t *mdn = NULL;
2882 dmu_buf_t *db = NULL;
2883 uint64_t offset;
2884
2885 /*
2886 * By default, we assume this block is in our native format
2887 * (ZFS_HOST_BYTEORDER). We then take into account whether
2888 * the send stream is byteswapped (rwa->byteswap). Finally,
2889 * we need to byteswap again if this particular block was
2890 * in non-native format on the send side.
2891 */
2892 boolean_t byteorder = ZFS_HOST_BYTEORDER ^ rwa->byteswap ^
2893 !!DRR_IS_RAW_BYTESWAPPED(drror->drr_flags);
2894
2895 /*
2896 * Since dnode block sizes are constant, we should not need to worry
2897 * about making sure that the dnode block size is the same on the
2898 * sending and receiving sides for the time being. For non-raw sends,
2899 * this does not matter (and in fact we do not send a DRR_OBJECT_RANGE
2900 * record at all). Raw sends require this record type because the
2901 * encryption parameters are used to protect an entire block of bonus
2902 * buffers. If the size of dnode blocks ever becomes variable,
2903 * handling will need to be added to ensure that dnode block sizes
2904 * match on the sending and receiving side.
2905 */
2906 if (drror->drr_numslots != DNODES_PER_BLOCK ||
2907 P2PHASE(drror->drr_firstobj, DNODES_PER_BLOCK) != 0 ||
2908 !rwa->raw)
2909 return (SET_ERROR(EINVAL));
2910
2911 if (drror->drr_firstobj > rwa->max_object)
2912 rwa->max_object = drror->drr_firstobj;
2913
2914 offset = drror->drr_firstobj * sizeof (dnode_phys_t);
2915 mdn = DMU_META_DNODE(rwa->os);
2916
2917 tx = dmu_tx_create(rwa->os);
2918 ret = dmu_tx_assign(tx, TXG_WAIT);
2919 if (ret != 0) {
2920 dmu_tx_abort(tx);
2921 return (ret);
2922 }
2923
2924 ret = dmu_buf_hold_by_dnode(mdn, offset, FTAG, &db,
2925 DMU_READ_PREFETCH | DMU_READ_NO_DECRYPT);
2926 if (ret != 0) {
2927 dmu_tx_commit(tx);
2928 return (ret);
2929 }
2930
2931 /*
2932 * Convert the buffer associated with this range of dnodes to a
2933 * raw buffer. This ensures that it will be written out as a raw
2934 * buffer when we fill in the dnode objects in future records.
2935 * Since we are commiting this tx now, it is technically possible
2936 * for the dnode block to end up on-disk with the incorrect MAC.
2937 * Despite this, the dataset is marked as inconsistent so no other
2938 * code paths (apart from scrubs) will attempt to read this data.
2939 * Scrubs will not be effected by this either since scrubs only
2940 * read raw data and do not attempt to check the MAC.
2941 */
2942 dmu_convert_to_raw(db, byteorder, drror->drr_salt, drror->drr_iv,
2943 drror->drr_mac, tx);
2944 dmu_buf_rele(db, FTAG);
2945 dmu_tx_commit(tx);
2946 return (0);
2947 }
2948
2949 /* used to destroy the drc_ds on error */
2950 static void
2951 dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc)
2952 {
2953 ds_hold_flags_t dsflags = (drc->drc_raw) ? 0 : DS_HOLD_FLAG_DECRYPT;
2954
2955 /*
2956 * Wait for the txg sync before cleaning up the receive. For
2957 * resumable receives, this ensures that our resume state has
2958 * been written out to disk. For raw receives, this ensures
2959 * that the user accounting code will not attempt to do anything
2960 * after we stopped receiving the dataset.
2961 */
2962 txg_wait_synced(drc->drc_ds->ds_dir->dd_pool, 0);
2963
2964 if (drc->drc_resumable) {
2965 dsl_dataset_disown(drc->drc_ds, dsflags, dmu_recv_tag);
2966 } else {
2967 char name[ZFS_MAX_DATASET_NAME_LEN];
2968 dsl_dataset_name(drc->drc_ds, name);
2969 dsl_dataset_disown(drc->drc_ds, dsflags, dmu_recv_tag);
2970 (void) dsl_destroy_head(name);
2971 }
2972 }
2973
2974 static void
2975 receive_cksum(struct receive_arg *ra, int len, void *buf)
2976 {
2977 if (ra->byteswap) {
2978 (void) fletcher_4_incremental_byteswap(buf, len, &ra->cksum);
2979 } else {
2980 (void) fletcher_4_incremental_native(buf, len, &ra->cksum);
2981 }
2982 }
2983
2984 /*
2985 * Read the payload into a buffer of size len, and update the current record's
2986 * payload field.
2987 * Allocate ra->next_rrd and read the next record's header into
2988 * ra->next_rrd->header.
2989 * Verify checksum of payload and next record.
2990 */
2991 static int
2992 receive_read_payload_and_next_header(struct receive_arg *ra, int len, void *buf)
2993 {
2994 int err;
2995 zio_cksum_t cksum_orig;
2996 zio_cksum_t *cksump;
2997
2998 if (len != 0) {
2999 ASSERT3U(len, <=, SPA_MAXBLOCKSIZE);
3000 err = receive_read(ra, len, buf);
3001 if (err != 0)
3002 return (err);
3003 receive_cksum(ra, len, buf);
3004
3005 /* note: rrd is NULL when reading the begin record's payload */
3006 if (ra->rrd != NULL) {
3007 ra->rrd->payload = buf;
3008 ra->rrd->payload_size = len;
3009 ra->rrd->bytes_read = ra->bytes_read;
3010 }
3011 }
3012
3013 ra->prev_cksum = ra->cksum;
3014
3015 ra->next_rrd = kmem_zalloc(sizeof (*ra->next_rrd), KM_SLEEP);
3016 err = receive_read(ra, sizeof (ra->next_rrd->header),
3017 &ra->next_rrd->header);
3018 ra->next_rrd->bytes_read = ra->bytes_read;
3019
3020 if (err != 0) {
3021 kmem_free(ra->next_rrd, sizeof (*ra->next_rrd));
3022 ra->next_rrd = NULL;
3023 return (err);
3024 }
3025 if (ra->next_rrd->header.drr_type == DRR_BEGIN) {
3026 kmem_free(ra->next_rrd, sizeof (*ra->next_rrd));
3027 ra->next_rrd = NULL;
3028 return (SET_ERROR(EINVAL));
3029 }
3030
3031 /*
3032 * Note: checksum is of everything up to but not including the
3033 * checksum itself.
3034 */
3035 ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
3036 ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
3037 receive_cksum(ra,
3038 offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
3039 &ra->next_rrd->header);
3040
3041 cksum_orig = ra->next_rrd->header.drr_u.drr_checksum.drr_checksum;
3042 cksump = &ra->next_rrd->header.drr_u.drr_checksum.drr_checksum;
3043
3044 if (ra->byteswap)
3045 byteswap_record(&ra->next_rrd->header);
3046
3047 if ((!ZIO_CHECKSUM_IS_ZERO(cksump)) &&
3048 !ZIO_CHECKSUM_EQUAL(ra->cksum, *cksump)) {
3049 kmem_free(ra->next_rrd, sizeof (*ra->next_rrd));
3050 ra->next_rrd = NULL;
3051 return (SET_ERROR(ECKSUM));
3052 }
3053
3054 receive_cksum(ra, sizeof (cksum_orig), &cksum_orig);
3055
3056 return (0);
3057 }
3058
3059 static void
3060 objlist_create(struct objlist *list)
3061 {
3062 list_create(&list->list, sizeof (struct receive_objnode),
3063 offsetof(struct receive_objnode, node));
3064 list->last_lookup = 0;
3065 }
3066
3067 static void
3068 objlist_destroy(struct objlist *list)
3069 {
3070 struct receive_objnode *n;
3071
3072 for (n = list_remove_head(&list->list);
3073 n != NULL; n = list_remove_head(&list->list)) {
3074 kmem_free(n, sizeof (*n));
3075 }
3076 list_destroy(&list->list);
3077 }
3078
3079 /*
3080 * This function looks through the objlist to see if the specified object number
3081 * is contained in the objlist. In the process, it will remove all object
3082 * numbers in the list that are smaller than the specified object number. Thus,
3083 * any lookup of an object number smaller than a previously looked up object
3084 * number will always return false; therefore, all lookups should be done in
3085 * ascending order.
3086 */
3087 static boolean_t
3088 objlist_exists(struct objlist *list, uint64_t object)
3089 {
3090 struct receive_objnode *node = list_head(&list->list);
3091 ASSERT3U(object, >=, list->last_lookup);
3092 list->last_lookup = object;
3093 while (node != NULL && node->object < object) {
3094 VERIFY3P(node, ==, list_remove_head(&list->list));
3095 kmem_free(node, sizeof (*node));
3096 node = list_head(&list->list);
3097 }
3098 return (node != NULL && node->object == object);
3099 }
3100
3101 /*
3102 * The objlist is a list of object numbers stored in ascending order. However,
3103 * the insertion of new object numbers does not seek out the correct location to
3104 * store a new object number; instead, it appends it to the list for simplicity.
3105 * Thus, any users must take care to only insert new object numbers in ascending
3106 * order.
3107 */
3108 static void
3109 objlist_insert(struct objlist *list, uint64_t object)
3110 {
3111 struct receive_objnode *node = kmem_zalloc(sizeof (*node), KM_SLEEP);
3112 node->object = object;
3113 #ifdef ZFS_DEBUG
3114 {
3115 struct receive_objnode *last_object = list_tail(&list->list);
3116 uint64_t last_objnum = (last_object != NULL ? last_object->object : 0);
3117 ASSERT3U(node->object, >, last_objnum);
3118 }
3119 #endif
3120 list_insert_tail(&list->list, node);
3121 }
3122
3123 /*
3124 * Issue the prefetch reads for any necessary indirect blocks.
3125 *
3126 * We use the object ignore list to tell us whether or not to issue prefetches
3127 * for a given object. We do this for both correctness (in case the blocksize
3128 * of an object has changed) and performance (if the object doesn't exist, don't
3129 * needlessly try to issue prefetches). We also trim the list as we go through
3130 * the stream to prevent it from growing to an unbounded size.
3131 *
3132 * The object numbers within will always be in sorted order, and any write
3133 * records we see will also be in sorted order, but they're not sorted with
3134 * respect to each other (i.e. we can get several object records before
3135 * receiving each object's write records). As a result, once we've reached a
3136 * given object number, we can safely remove any reference to lower object
3137 * numbers in the ignore list. In practice, we receive up to 32 object records
3138 * before receiving write records, so the list can have up to 32 nodes in it.
3139 */
3140 /* ARGSUSED */
3141 static void
3142 receive_read_prefetch(struct receive_arg *ra,
3143 uint64_t object, uint64_t offset, uint64_t length)
3144 {
3145 if (!objlist_exists(&ra->ignore_objlist, object)) {
3146 dmu_prefetch(ra->os, object, 1, offset, length,
3147 ZIO_PRIORITY_SYNC_READ);
3148 }
3149 }
3150
3151 /*
3152 * Read records off the stream, issuing any necessary prefetches.
3153 */
3154 static int
3155 receive_read_record(struct receive_arg *ra)
3156 {
3157 int err;
3158
3159 switch (ra->rrd->header.drr_type) {
3160 case DRR_OBJECT:
3161 {
3162 struct drr_object *drro = &ra->rrd->header.drr_u.drr_object;
3163 uint32_t size = DRR_OBJECT_PAYLOAD_SIZE(drro);
3164 void *buf = kmem_zalloc(size, KM_SLEEP);
3165 dmu_object_info_t doi;
3166
3167 err = receive_read_payload_and_next_header(ra, size, buf);
3168 if (err != 0) {
3169 kmem_free(buf, size);
3170 return (err);
3171 }
3172 err = dmu_object_info(ra->os, drro->drr_object, &doi);
3173 /*
3174 * See receive_read_prefetch for an explanation why we're
3175 * storing this object in the ignore_obj_list.
3176 */
3177 if (err == ENOENT ||
3178 (err == 0 && doi.doi_data_block_size != drro->drr_blksz)) {
3179 objlist_insert(&ra->ignore_objlist, drro->drr_object);
3180 err = 0;
3181 }
3182 return (err);
3183 }
3184 case DRR_FREEOBJECTS:
3185 {
3186 err = receive_read_payload_and_next_header(ra, 0, NULL);
3187 return (err);
3188 }
3189 case DRR_WRITE:
3190 {
3191 struct drr_write *drrw = &ra->rrd->header.drr_u.drr_write;
3192 arc_buf_t *abuf;
3193 boolean_t is_meta = DMU_OT_IS_METADATA(drrw->drr_type);
3194
3195 if (ra->raw) {
3196 boolean_t byteorder = ZFS_HOST_BYTEORDER ^
3197 !!DRR_IS_RAW_BYTESWAPPED(drrw->drr_flags) ^
3198 ra->byteswap;
3199
3200 abuf = arc_loan_raw_buf(dmu_objset_spa(ra->os),
3201 drrw->drr_object, byteorder, drrw->drr_salt,
3202 drrw->drr_iv, drrw->drr_mac, drrw->drr_type,
3203 drrw->drr_compressed_size, drrw->drr_logical_size,
3204 drrw->drr_compressiontype);
3205 } else if (DRR_WRITE_COMPRESSED(drrw)) {
3206 ASSERT3U(drrw->drr_compressed_size, >, 0);
3207 ASSERT3U(drrw->drr_logical_size, >=,
3208 drrw->drr_compressed_size);
3209 ASSERT(!is_meta);
3210 abuf = arc_loan_compressed_buf(
3211 dmu_objset_spa(ra->os),
3212 drrw->drr_compressed_size, drrw->drr_logical_size,
3213 drrw->drr_compressiontype);
3214 } else {
3215 abuf = arc_loan_buf(dmu_objset_spa(ra->os),
3216 is_meta, drrw->drr_logical_size);
3217 }
3218
3219 err = receive_read_payload_and_next_header(ra,
3220 DRR_WRITE_PAYLOAD_SIZE(drrw), abuf->b_data);
3221 if (err != 0) {
3222 dmu_return_arcbuf(abuf);
3223 return (err);
3224 }
3225 ra->rrd->arc_buf = abuf;
3226 receive_read_prefetch(ra, drrw->drr_object, drrw->drr_offset,
3227 drrw->drr_logical_size);
3228 return (err);
3229 }
3230 case DRR_WRITE_BYREF:
3231 {
3232 struct drr_write_byref *drrwb =
3233 &ra->rrd->header.drr_u.drr_write_byref;
3234 err = receive_read_payload_and_next_header(ra, 0, NULL);
3235 receive_read_prefetch(ra, drrwb->drr_object, drrwb->drr_offset,
3236 drrwb->drr_length);
3237 return (err);
3238 }
3239 case DRR_WRITE_EMBEDDED:
3240 {
3241 struct drr_write_embedded *drrwe =
3242 &ra->rrd->header.drr_u.drr_write_embedded;
3243 uint32_t size = P2ROUNDUP(drrwe->drr_psize, 8);
3244 void *buf = kmem_zalloc(size, KM_SLEEP);
3245
3246 err = receive_read_payload_and_next_header(ra, size, buf);
3247 if (err != 0) {
3248 kmem_free(buf, size);
3249 return (err);
3250 }
3251
3252 receive_read_prefetch(ra, drrwe->drr_object, drrwe->drr_offset,
3253 drrwe->drr_length);
3254 return (err);
3255 }
3256 case DRR_FREE:
3257 {
3258 /*
3259 * It might be beneficial to prefetch indirect blocks here, but
3260 * we don't really have the data to decide for sure.
3261 */
3262 err = receive_read_payload_and_next_header(ra, 0, NULL);
3263 return (err);
3264 }
3265 case DRR_END:
3266 {
3267 struct drr_end *drre = &ra->rrd->header.drr_u.drr_end;
3268 if (!ZIO_CHECKSUM_EQUAL(ra->prev_cksum, drre->drr_checksum))
3269 return (SET_ERROR(ECKSUM));
3270 return (0);
3271 }
3272 case DRR_SPILL:
3273 {
3274 struct drr_spill *drrs = &ra->rrd->header.drr_u.drr_spill;
3275 arc_buf_t *abuf;
3276 int len = DRR_SPILL_PAYLOAD_SIZE(drrs);
3277
3278 /* DRR_SPILL records are either raw or uncompressed */
3279 if (ra->raw) {
3280 boolean_t byteorder = ZFS_HOST_BYTEORDER ^
3281 !!DRR_IS_RAW_BYTESWAPPED(drrs->drr_flags) ^
3282 ra->byteswap;
3283
3284 abuf = arc_loan_raw_buf(dmu_objset_spa(ra->os),
3285 drrs->drr_object, byteorder, drrs->drr_salt,
3286 drrs->drr_iv, drrs->drr_mac, drrs->drr_type,
3287 drrs->drr_compressed_size, drrs->drr_length,
3288 drrs->drr_compressiontype);
3289 } else {
3290 abuf = arc_loan_buf(dmu_objset_spa(ra->os),
3291 DMU_OT_IS_METADATA(drrs->drr_type),
3292 drrs->drr_length);
3293 }
3294
3295 err = receive_read_payload_and_next_header(ra, len,
3296 abuf->b_data);
3297 if (err != 0) {
3298 dmu_return_arcbuf(abuf);
3299 return (err);
3300 }
3301 ra->rrd->arc_buf = abuf;
3302 return (err);
3303 }
3304 case DRR_OBJECT_RANGE:
3305 {
3306 err = receive_read_payload_and_next_header(ra, 0, NULL);
3307 return (err);
3308 }
3309 default:
3310 return (SET_ERROR(EINVAL));
3311 }
3312 }
3313
3314 static void
3315 dprintf_drr(struct receive_record_arg *rrd, int err)
3316 {
3317 switch (rrd->header.drr_type) {
3318 case DRR_OBJECT:
3319 {
3320 struct drr_object *drro = &rrd->header.drr_u.drr_object;
3321 dprintf("drr_type = OBJECT obj = %llu type = %u "
3322 "bonustype = %u blksz = %u bonuslen = %u cksumtype = %u "
3323 "compress = %u dn_slots = %u err = %d\n",
3324 drro->drr_object, drro->drr_type, drro->drr_bonustype,
3325 drro->drr_blksz, drro->drr_bonuslen,
3326 drro->drr_checksumtype, drro->drr_compress,
3327 drro->drr_dn_slots, err);
3328 break;
3329 }
3330 case DRR_FREEOBJECTS:
3331 {
3332 struct drr_freeobjects *drrfo =
3333 &rrd->header.drr_u.drr_freeobjects;
3334 dprintf("drr_type = FREEOBJECTS firstobj = %llu "
3335 "numobjs = %llu err = %d\n",
3336 drrfo->drr_firstobj, drrfo->drr_numobjs, err);
3337 break;
3338 }
3339 case DRR_WRITE:
3340 {
3341 struct drr_write *drrw = &rrd->header.drr_u.drr_write;
3342 dprintf("drr_type = WRITE obj = %llu type = %u offset = %llu "
3343 "lsize = %llu cksumtype = %u cksumflags = %u "
3344 "compress = %u psize = %llu err = %d\n",
3345 drrw->drr_object, drrw->drr_type, drrw->drr_offset,
3346 drrw->drr_logical_size, drrw->drr_checksumtype,
3347 drrw->drr_flags, drrw->drr_compressiontype,
3348 drrw->drr_compressed_size, err);
3349 break;
3350 }
3351 case DRR_WRITE_BYREF:
3352 {
3353 struct drr_write_byref *drrwbr =
3354 &rrd->header.drr_u.drr_write_byref;
3355 dprintf("drr_type = WRITE_BYREF obj = %llu offset = %llu "
3356 "length = %llu toguid = %llx refguid = %llx "
3357 "refobject = %llu refoffset = %llu cksumtype = %u "
3358 "cksumflags = %u err = %d\n",
3359 drrwbr->drr_object, drrwbr->drr_offset,
3360 drrwbr->drr_length, drrwbr->drr_toguid,
3361 drrwbr->drr_refguid, drrwbr->drr_refobject,
3362 drrwbr->drr_refoffset, drrwbr->drr_checksumtype,
3363 drrwbr->drr_flags, err);
3364 break;
3365 }
3366 case DRR_WRITE_EMBEDDED:
3367 {
3368 struct drr_write_embedded *drrwe =
3369 &rrd->header.drr_u.drr_write_embedded;
3370 dprintf("drr_type = WRITE_EMBEDDED obj = %llu offset = %llu "
3371 "length = %llu compress = %u etype = %u lsize = %u "
3372 "psize = %u err = %d\n",
3373 drrwe->drr_object, drrwe->drr_offset, drrwe->drr_length,
3374 drrwe->drr_compression, drrwe->drr_etype,
3375 drrwe->drr_lsize, drrwe->drr_psize, err);
3376 break;
3377 }
3378 case DRR_FREE:
3379 {
3380 struct drr_free *drrf = &rrd->header.drr_u.drr_free;
3381 dprintf("drr_type = FREE obj = %llu offset = %llu "
3382 "length = %lld err = %d\n",
3383 drrf->drr_object, drrf->drr_offset, drrf->drr_length,
3384 err);
3385 break;
3386 }
3387 case DRR_SPILL:
3388 {
3389 struct drr_spill *drrs = &rrd->header.drr_u.drr_spill;
3390 dprintf("drr_type = SPILL obj = %llu length = %llu "
3391 "err = %d\n", drrs->drr_object, drrs->drr_length, err);
3392 break;
3393 }
3394 default:
3395 return;
3396 }
3397 }
3398
3399 /*
3400 * Commit the records to the pool.
3401 */
3402 static int
3403 receive_process_record(struct receive_writer_arg *rwa,
3404 struct receive_record_arg *rrd)
3405 {
3406 int err;
3407
3408 /* Processing in order, therefore bytes_read should be increasing. */
3409 ASSERT3U(rrd->bytes_read, >=, rwa->bytes_read);
3410 rwa->bytes_read = rrd->bytes_read;
3411
3412 switch (rrd->header.drr_type) {
3413 case DRR_OBJECT:
3414 {
3415 struct drr_object *drro = &rrd->header.drr_u.drr_object;
3416 err = receive_object(rwa, drro, rrd->payload);
3417 kmem_free(rrd->payload, rrd->payload_size);
3418 rrd->payload = NULL;
3419 break;
3420 }
3421 case DRR_FREEOBJECTS:
3422 {
3423 struct drr_freeobjects *drrfo =
3424 &rrd->header.drr_u.drr_freeobjects;
3425 err = receive_freeobjects(rwa, drrfo);
3426 break;
3427 }
3428 case DRR_WRITE:
3429 {
3430 struct drr_write *drrw = &rrd->header.drr_u.drr_write;
3431 err = receive_write(rwa, drrw, rrd->arc_buf);
3432 /* if receive_write() is successful, it consumes the arc_buf */
3433 if (err != 0)
3434 dmu_return_arcbuf(rrd->arc_buf);
3435 rrd->arc_buf = NULL;
3436 rrd->payload = NULL;
3437 break;
3438 }
3439 case DRR_WRITE_BYREF:
3440 {
3441 struct drr_write_byref *drrwbr =
3442 &rrd->header.drr_u.drr_write_byref;
3443 err = receive_write_byref(rwa, drrwbr);
3444 break;
3445 }
3446 case DRR_WRITE_EMBEDDED:
3447 {
3448 struct drr_write_embedded *drrwe =
3449 &rrd->header.drr_u.drr_write_embedded;
3450 err = receive_write_embedded(rwa, drrwe, rrd->payload);
3451 kmem_free(rrd->payload, rrd->payload_size);
3452 rrd->payload = NULL;
3453 break;
3454 }
3455 case DRR_FREE:
3456 {
3457 struct drr_free *drrf = &rrd->header.drr_u.drr_free;
3458 err = receive_free(rwa, drrf);
3459 break;
3460 }
3461 case DRR_SPILL:
3462 {
3463 struct drr_spill *drrs = &rrd->header.drr_u.drr_spill;
3464 err = receive_spill(rwa, drrs, rrd->arc_buf);
3465 /* if receive_spill() is successful, it consumes the arc_buf */
3466 if (err != 0)
3467 dmu_return_arcbuf(rrd->arc_buf);
3468 rrd->arc_buf = NULL;
3469 rrd->payload = NULL;
3470 break;
3471 }
3472 case DRR_OBJECT_RANGE:
3473 {
3474 struct drr_object_range *drror =
3475 &rrd->header.drr_u.drr_object_range;
3476 return (receive_object_range(rwa, drror));
3477 }
3478 default:
3479 return (SET_ERROR(EINVAL));
3480 }
3481
3482 if (err != 0)
3483 dprintf_drr(rrd, err);
3484
3485 return (err);
3486 }
3487
3488 /*
3489 * dmu_recv_stream's worker thread; pull records off the queue, and then call
3490 * receive_process_record When we're done, signal the main thread and exit.
3491 */
3492 static void
3493 receive_writer_thread(void *arg)
3494 {
3495 struct receive_writer_arg *rwa = arg;
3496 struct receive_record_arg *rrd;
3497 fstrans_cookie_t cookie = spl_fstrans_mark();
3498
3499 for (rrd = bqueue_dequeue(&rwa->q); !rrd->eos_marker;
3500 rrd = bqueue_dequeue(&rwa->q)) {
3501 /*
3502 * If there's an error, the main thread will stop putting things
3503 * on the queue, but we need to clear everything in it before we
3504 * can exit.
3505 */
3506 if (rwa->err == 0) {
3507 rwa->err = receive_process_record(rwa, rrd);
3508 } else if (rrd->arc_buf != NULL) {
3509 dmu_return_arcbuf(rrd->arc_buf);
3510 rrd->arc_buf = NULL;
3511 rrd->payload = NULL;
3512 } else if (rrd->payload != NULL) {
3513 kmem_free(rrd->payload, rrd->payload_size);
3514 rrd->payload = NULL;
3515 }
3516 kmem_free(rrd, sizeof (*rrd));
3517 }
3518 kmem_free(rrd, sizeof (*rrd));
3519 mutex_enter(&rwa->mutex);
3520 rwa->done = B_TRUE;
3521 cv_signal(&rwa->cv);
3522 mutex_exit(&rwa->mutex);
3523 spl_fstrans_unmark(cookie);
3524 thread_exit();
3525 }
3526
3527 static int
3528 resume_check(struct receive_arg *ra, nvlist_t *begin_nvl)
3529 {
3530 uint64_t val;
3531 objset_t *mos = dmu_objset_pool(ra->os)->dp_meta_objset;
3532 uint64_t dsobj = dmu_objset_id(ra->os);
3533 uint64_t resume_obj, resume_off;
3534
3535 if (nvlist_lookup_uint64(begin_nvl,
3536 "resume_object", &resume_obj) != 0 ||
3537 nvlist_lookup_uint64(begin_nvl,
3538 "resume_offset", &resume_off) != 0) {
3539 return (SET_ERROR(EINVAL));
3540 }
3541 VERIFY0(zap_lookup(mos, dsobj,
3542 DS_FIELD_RESUME_OBJECT, sizeof (val), 1, &val));
3543 if (resume_obj != val)
3544 return (SET_ERROR(EINVAL));
3545 VERIFY0(zap_lookup(mos, dsobj,
3546 DS_FIELD_RESUME_OFFSET, sizeof (val), 1, &val));
3547 if (resume_off != val)
3548 return (SET_ERROR(EINVAL));
3549
3550 return (0);
3551 }
3552
3553 /*
3554 * Read in the stream's records, one by one, and apply them to the pool. There
3555 * are two threads involved; the thread that calls this function will spin up a
3556 * worker thread, read the records off the stream one by one, and issue
3557 * prefetches for any necessary indirect blocks. It will then push the records
3558 * onto an internal blocking queue. The worker thread will pull the records off
3559 * the queue, and actually write the data into the DMU. This way, the worker
3560 * thread doesn't have to wait for reads to complete, since everything it needs
3561 * (the indirect blocks) will be prefetched.
3562 *
3563 * NB: callers *must* call dmu_recv_end() if this succeeds.
3564 */
3565 int
3566 dmu_recv_stream(dmu_recv_cookie_t *drc, vnode_t *vp, offset_t *voffp,
3567 int cleanup_fd, uint64_t *action_handlep)
3568 {
3569 int err = 0;
3570 struct receive_arg *ra;
3571 struct receive_writer_arg *rwa;
3572 int featureflags;
3573 uint32_t payloadlen;
3574 void *payload;
3575 nvlist_t *begin_nvl = NULL;
3576
3577 ra = kmem_zalloc(sizeof (*ra), KM_SLEEP);
3578 rwa = kmem_zalloc(sizeof (*rwa), KM_SLEEP);
3579
3580 ra->byteswap = drc->drc_byteswap;
3581 ra->raw = drc->drc_raw;
3582 ra->cksum = drc->drc_cksum;
3583 ra->vp = vp;
3584 ra->voff = *voffp;
3585
3586 if (dsl_dataset_is_zapified(drc->drc_ds)) {
3587 (void) zap_lookup(drc->drc_ds->ds_dir->dd_pool->dp_meta_objset,
3588 drc->drc_ds->ds_object, DS_FIELD_RESUME_BYTES,
3589 sizeof (ra->bytes_read), 1, &ra->bytes_read);
3590 }
3591
3592 objlist_create(&ra->ignore_objlist);
3593
3594 /* these were verified in dmu_recv_begin */
3595 ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo), ==,
3596 DMU_SUBSTREAM);
3597 ASSERT3U(drc->drc_drrb->drr_type, <, DMU_OST_NUMTYPES);
3598
3599 /*
3600 * Open the objset we are modifying.
3601 */
3602 VERIFY0(dmu_objset_from_ds(drc->drc_ds, &ra->os));
3603
3604 ASSERT(dsl_dataset_phys(drc->drc_ds)->ds_flags & DS_FLAG_INCONSISTENT);
3605
3606 featureflags = DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo);
3607 ra->featureflags = featureflags;
3608
3609 /* embedded data is incompatible with encrypted datasets */
3610 if (ra->os->os_encrypted &&
3611 (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)) {
3612 err = SET_ERROR(EINVAL);
3613 goto out;
3614 }
3615
3616 /* if this stream is dedup'ed, set up the avl tree for guid mapping */
3617 if (featureflags & DMU_BACKUP_FEATURE_DEDUP) {
3618 minor_t minor;
3619
3620 if (cleanup_fd == -1) {
3621 err = SET_ERROR(EBADF);
3622 goto out;
3623 }
3624 err = zfs_onexit_fd_hold(cleanup_fd, &minor);
3625 if (err != 0) {
3626 cleanup_fd = -1;
3627 goto out;
3628 }
3629
3630 if (*action_handlep == 0) {
3631 rwa->guid_to_ds_map =
3632 kmem_alloc(sizeof (avl_tree_t), KM_SLEEP);
3633 avl_create(rwa->guid_to_ds_map, guid_compare,
3634 sizeof (guid_map_entry_t),
3635 offsetof(guid_map_entry_t, avlnode));
3636 err = zfs_onexit_add_cb(minor,
3637 free_guid_map_onexit, rwa->guid_to_ds_map,
3638 action_handlep);
3639 if (err != 0)
3640 goto out;
3641 } else {
3642 err = zfs_onexit_cb_data(minor, *action_handlep,
3643 (void **)&rwa->guid_to_ds_map);
3644 if (err != 0)
3645 goto out;
3646 }
3647
3648 drc->drc_guid_to_ds_map = rwa->guid_to_ds_map;
3649 }
3650
3651 payloadlen = drc->drc_drr_begin->drr_payloadlen;
3652 payload = NULL;
3653 if (payloadlen != 0)
3654 payload = kmem_alloc(payloadlen, KM_SLEEP);
3655
3656 err = receive_read_payload_and_next_header(ra, payloadlen, payload);
3657 if (err != 0) {
3658 if (payloadlen != 0)
3659 kmem_free(payload, payloadlen);
3660 goto out;
3661 }
3662 if (payloadlen != 0) {
3663 err = nvlist_unpack(payload, payloadlen, &begin_nvl, KM_SLEEP);
3664 kmem_free(payload, payloadlen);
3665 if (err != 0)
3666 goto out;
3667 }
3668
3669 /* handle DSL encryption key payload */
3670 if (featureflags & DMU_BACKUP_FEATURE_RAW) {
3671 nvlist_t *keynvl = NULL;
3672
3673 ASSERT(ra->os->os_encrypted);
3674 ASSERT(drc->drc_raw);
3675
3676 err = nvlist_lookup_nvlist(begin_nvl, "crypt_keydata", &keynvl);
3677 if (err != 0)
3678 goto out;
3679
3680 err = dsl_crypto_recv_key(spa_name(ra->os->os_spa),
3681 drc->drc_ds->ds_object, drc->drc_drrb->drr_type,
3682 keynvl);
3683 if (err != 0)
3684 goto out;
3685 }
3686
3687 if (featureflags & DMU_BACKUP_FEATURE_RESUMING) {
3688 err = resume_check(ra, begin_nvl);
3689 if (err != 0)
3690 goto out;
3691 }
3692
3693 (void) bqueue_init(&rwa->q, zfs_recv_queue_length,
3694 offsetof(struct receive_record_arg, node));
3695 cv_init(&rwa->cv, NULL, CV_DEFAULT, NULL);
3696 mutex_init(&rwa->mutex, NULL, MUTEX_DEFAULT, NULL);
3697 rwa->os = ra->os;
3698 rwa->byteswap = drc->drc_byteswap;
3699 rwa->resumable = drc->drc_resumable;
3700 rwa->raw = drc->drc_raw;
3701
3702 (void) thread_create(NULL, 0, receive_writer_thread, rwa, 0, curproc,
3703 TS_RUN, minclsyspri);
3704 /*
3705 * We're reading rwa->err without locks, which is safe since we are the
3706 * only reader, and the worker thread is the only writer. It's ok if we
3707 * miss a write for an iteration or two of the loop, since the writer
3708 * thread will keep freeing records we send it until we send it an eos
3709 * marker.
3710 *
3711 * We can leave this loop in 3 ways: First, if rwa->err is
3712 * non-zero. In that case, the writer thread will free the rrd we just
3713 * pushed. Second, if we're interrupted; in that case, either it's the
3714 * first loop and ra->rrd was never allocated, or it's later and ra->rrd
3715 * has been handed off to the writer thread who will free it. Finally,
3716 * if receive_read_record fails or we're at the end of the stream, then
3717 * we free ra->rrd and exit.
3718 */
3719 while (rwa->err == 0) {
3720 if (issig(JUSTLOOKING) && issig(FORREAL)) {
3721 err = SET_ERROR(EINTR);
3722 break;
3723 }
3724
3725 ASSERT3P(ra->rrd, ==, NULL);
3726 ra->rrd = ra->next_rrd;
3727 ra->next_rrd = NULL;
3728 /* Allocates and loads header into ra->next_rrd */
3729 err = receive_read_record(ra);
3730
3731 if (ra->rrd->header.drr_type == DRR_END || err != 0) {
3732 kmem_free(ra->rrd, sizeof (*ra->rrd));
3733 ra->rrd = NULL;
3734 break;
3735 }
3736
3737 bqueue_enqueue(&rwa->q, ra->rrd,
3738 sizeof (struct receive_record_arg) + ra->rrd->payload_size);
3739 ra->rrd = NULL;
3740 }
3741 if (ra->next_rrd == NULL)
3742 ra->next_rrd = kmem_zalloc(sizeof (*ra->next_rrd), KM_SLEEP);
3743 ra->next_rrd->eos_marker = B_TRUE;
3744 bqueue_enqueue(&rwa->q, ra->next_rrd, 1);
3745
3746 mutex_enter(&rwa->mutex);
3747 while (!rwa->done) {
3748 cv_wait(&rwa->cv, &rwa->mutex);
3749 }
3750 mutex_exit(&rwa->mutex);
3751
3752 /*
3753 * If we are receiving a full stream as a clone, all object IDs which
3754 * are greater than the maximum ID referenced in the stream are
3755 * by definition unused and must be freed.
3756 */
3757 if (drc->drc_clone && drc->drc_drrb->drr_fromguid == 0) {
3758 uint64_t obj = rwa->max_object + 1;
3759 int free_err = 0;
3760 int next_err = 0;
3761
3762 while (next_err == 0) {
3763 free_err = dmu_free_long_object(rwa->os, obj);
3764 if (free_err != 0 && free_err != ENOENT)
3765 break;
3766
3767 next_err = dmu_object_next(rwa->os, &obj, FALSE, 0);
3768 }
3769
3770 if (err == 0) {
3771 if (free_err != 0 && free_err != ENOENT)
3772 err = free_err;
3773 else if (next_err != ESRCH)
3774 err = next_err;
3775 }
3776 }
3777
3778 cv_destroy(&rwa->cv);
3779 mutex_destroy(&rwa->mutex);
3780 bqueue_destroy(&rwa->q);
3781 if (err == 0)
3782 err = rwa->err;
3783
3784 out:
3785 nvlist_free(begin_nvl);
3786 if ((featureflags & DMU_BACKUP_FEATURE_DEDUP) && (cleanup_fd != -1))
3787 zfs_onexit_fd_rele(cleanup_fd);
3788
3789 if (err != 0) {
3790 /*
3791 * Clean up references. If receive is not resumable,
3792 * destroy what we created, so we don't leave it in
3793 * the inconsistent state.
3794 */
3795 dmu_recv_cleanup_ds(drc);
3796 }
3797
3798 *voffp = ra->voff;
3799 objlist_destroy(&ra->ignore_objlist);
3800 kmem_free(ra, sizeof (*ra));
3801 kmem_free(rwa, sizeof (*rwa));
3802 return (err);
3803 }
3804
3805 static int
3806 dmu_recv_end_check(void *arg, dmu_tx_t *tx)
3807 {
3808 dmu_recv_cookie_t *drc = arg;
3809 dsl_pool_t *dp = dmu_tx_pool(tx);
3810 int error;
3811
3812 ASSERT3P(drc->drc_ds->ds_owner, ==, dmu_recv_tag);
3813
3814 if (!drc->drc_newfs) {
3815 dsl_dataset_t *origin_head;
3816
3817 error = dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head);
3818 if (error != 0)
3819 return (error);
3820 if (drc->drc_force) {
3821 /*
3822 * We will destroy any snapshots in tofs (i.e. before
3823 * origin_head) that are after the origin (which is
3824 * the snap before drc_ds, because drc_ds can not
3825 * have any snaps of its own).
3826 */
3827 uint64_t obj;
3828
3829 obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
3830 while (obj !=
3831 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
3832 dsl_dataset_t *snap;
3833 error = dsl_dataset_hold_obj(dp, obj, FTAG,
3834 &snap);
3835 if (error != 0)
3836 break;
3837 if (snap->ds_dir != origin_head->ds_dir)
3838 error = SET_ERROR(EINVAL);
3839 if (error == 0) {
3840 error = dsl_destroy_snapshot_check_impl(
3841 snap, B_FALSE);
3842 }
3843 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
3844 dsl_dataset_rele(snap, FTAG);
3845 if (error != 0)
3846 break;
3847 }
3848 if (error != 0) {
3849 dsl_dataset_rele(origin_head, FTAG);
3850 return (error);
3851 }
3852 }
3853 error = dsl_dataset_clone_swap_check_impl(drc->drc_ds,
3854 origin_head, drc->drc_force, drc->drc_owner, tx);
3855 if (error != 0) {
3856 dsl_dataset_rele(origin_head, FTAG);
3857 return (error);
3858 }
3859 error = dsl_dataset_snapshot_check_impl(origin_head,
3860 drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred);
3861 dsl_dataset_rele(origin_head, FTAG);
3862 if (error != 0)
3863 return (error);
3864
3865 error = dsl_destroy_head_check_impl(drc->drc_ds, 1);
3866 } else {
3867 error = dsl_dataset_snapshot_check_impl(drc->drc_ds,
3868 drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred);
3869 }
3870 return (error);
3871 }
3872
3873 static void
3874 dmu_recv_end_sync(void *arg, dmu_tx_t *tx)
3875 {
3876 dmu_recv_cookie_t *drc = arg;
3877 dsl_pool_t *dp = dmu_tx_pool(tx);
3878 boolean_t encrypted = drc->drc_ds->ds_dir->dd_crypto_obj != 0;
3879
3880 spa_history_log_internal_ds(drc->drc_ds, "finish receiving",
3881 tx, "snap=%s", drc->drc_tosnap);
3882
3883 if (!drc->drc_newfs) {
3884 dsl_dataset_t *origin_head;
3885
3886 VERIFY0(dsl_dataset_hold(dp, drc->drc_tofs, FTAG,
3887 &origin_head));
3888
3889 if (drc->drc_force) {
3890 /*
3891 * Destroy any snapshots of drc_tofs (origin_head)
3892 * after the origin (the snap before drc_ds).
3893 */
3894 uint64_t obj;
3895
3896 obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
3897 while (obj !=
3898 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
3899 dsl_dataset_t *snap;
3900 VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG,
3901 &snap));
3902 ASSERT3P(snap->ds_dir, ==, origin_head->ds_dir);
3903 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
3904 dsl_destroy_snapshot_sync_impl(snap,
3905 B_FALSE, tx);
3906 dsl_dataset_rele(snap, FTAG);
3907 }
3908 }
3909 VERIFY3P(drc->drc_ds->ds_prev, ==,
3910 origin_head->ds_prev);
3911
3912 dsl_dataset_clone_swap_sync_impl(drc->drc_ds,
3913 origin_head, tx);
3914 dsl_dataset_snapshot_sync_impl(origin_head,
3915 drc->drc_tosnap, tx);
3916
3917 /* set snapshot's creation time and guid */
3918 dmu_buf_will_dirty(origin_head->ds_prev->ds_dbuf, tx);
3919 dsl_dataset_phys(origin_head->ds_prev)->ds_creation_time =
3920 drc->drc_drrb->drr_creation_time;
3921 dsl_dataset_phys(origin_head->ds_prev)->ds_guid =
3922 drc->drc_drrb->drr_toguid;
3923 dsl_dataset_phys(origin_head->ds_prev)->ds_flags &=
3924 ~DS_FLAG_INCONSISTENT;
3925
3926 dmu_buf_will_dirty(origin_head->ds_dbuf, tx);
3927 dsl_dataset_phys(origin_head)->ds_flags &=
3928 ~DS_FLAG_INCONSISTENT;
3929
3930 drc->drc_newsnapobj =
3931 dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
3932
3933 dsl_dataset_rele(origin_head, FTAG);
3934 dsl_destroy_head_sync_impl(drc->drc_ds, tx);
3935
3936 if (drc->drc_owner != NULL)
3937 VERIFY3P(origin_head->ds_owner, ==, drc->drc_owner);
3938 } else {
3939 dsl_dataset_t *ds = drc->drc_ds;
3940
3941 dsl_dataset_snapshot_sync_impl(ds, drc->drc_tosnap, tx);
3942
3943 /* set snapshot's creation time and guid */
3944 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
3945 dsl_dataset_phys(ds->ds_prev)->ds_creation_time =
3946 drc->drc_drrb->drr_creation_time;
3947 dsl_dataset_phys(ds->ds_prev)->ds_guid =
3948 drc->drc_drrb->drr_toguid;
3949 dsl_dataset_phys(ds->ds_prev)->ds_flags &=
3950 ~DS_FLAG_INCONSISTENT;
3951
3952 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3953 dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT;
3954 if (dsl_dataset_has_resume_receive_state(ds)) {
3955 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3956 DS_FIELD_RESUME_FROMGUID, tx);
3957 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3958 DS_FIELD_RESUME_OBJECT, tx);
3959 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3960 DS_FIELD_RESUME_OFFSET, tx);
3961 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3962 DS_FIELD_RESUME_BYTES, tx);
3963 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3964 DS_FIELD_RESUME_TOGUID, tx);
3965 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3966 DS_FIELD_RESUME_TONAME, tx);
3967 }
3968 drc->drc_newsnapobj =
3969 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj;
3970 }
3971 zvol_create_minors(dp->dp_spa, drc->drc_tofs, B_TRUE);
3972
3973 /*
3974 * Release the hold from dmu_recv_begin. This must be done before
3975 * we return to open context, so that when we free the dataset's dnode
3976 * we can evict its bonus buffer. Since the dataset may be destroyed
3977 * at this point (and therefore won't have a valid pointer to the spa)
3978 * we release the key mapping manually here while we do have a valid
3979 * pointer, if it exists.
3980 */
3981 if (!drc->drc_raw && encrypted) {
3982 (void) spa_keystore_remove_mapping(dmu_tx_pool(tx)->dp_spa,
3983 drc->drc_ds->ds_object, drc->drc_ds);
3984 }
3985 dsl_dataset_disown(drc->drc_ds, 0, dmu_recv_tag);
3986 drc->drc_ds = NULL;
3987 }
3988
3989 static int
3990 add_ds_to_guidmap(const char *name, avl_tree_t *guid_map, uint64_t snapobj,
3991 boolean_t raw)
3992 {
3993 dsl_pool_t *dp;
3994 dsl_dataset_t *snapds;
3995 guid_map_entry_t *gmep;
3996 ds_hold_flags_t dsflags = (raw) ? 0 : DS_HOLD_FLAG_DECRYPT;
3997 int err;
3998
3999 ASSERT(guid_map != NULL);
4000
4001 err = dsl_pool_hold(name, FTAG, &dp);
4002 if (err != 0)
4003 return (err);
4004 gmep = kmem_alloc(sizeof (*gmep), KM_SLEEP);
4005 err = dsl_dataset_hold_obj_flags(dp, snapobj, dsflags, gmep, &snapds);
4006 if (err == 0) {
4007 gmep->guid = dsl_dataset_phys(snapds)->ds_guid;
4008 gmep->raw = raw;
4009 gmep->gme_ds = snapds;
4010 avl_add(guid_map, gmep);
4011 dsl_dataset_long_hold(snapds, gmep);
4012 } else {
4013 kmem_free(gmep, sizeof (*gmep));
4014 }
4015
4016 dsl_pool_rele(dp, FTAG);
4017 return (err);
4018 }
4019
4020 static int dmu_recv_end_modified_blocks = 3;
4021
4022 static int
4023 dmu_recv_existing_end(dmu_recv_cookie_t *drc)
4024 {
4025 #ifdef _KERNEL
4026 /*
4027 * We will be destroying the ds; make sure its origin is unmounted if
4028 * necessary.
4029 */
4030 char name[ZFS_MAX_DATASET_NAME_LEN];
4031 dsl_dataset_name(drc->drc_ds, name);
4032 zfs_destroy_unmount_origin(name);
4033 #endif
4034
4035 return (dsl_sync_task(drc->drc_tofs,
4036 dmu_recv_end_check, dmu_recv_end_sync, drc,
4037 dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL));
4038 }
4039
4040 static int
4041 dmu_recv_new_end(dmu_recv_cookie_t *drc)
4042 {
4043 return (dsl_sync_task(drc->drc_tofs,
4044 dmu_recv_end_check, dmu_recv_end_sync, drc,
4045 dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL));
4046 }
4047
4048 int
4049 dmu_recv_end(dmu_recv_cookie_t *drc, void *owner)
4050 {
4051 int error;
4052
4053 drc->drc_owner = owner;
4054
4055 if (drc->drc_newfs)
4056 error = dmu_recv_new_end(drc);
4057 else
4058 error = dmu_recv_existing_end(drc);
4059
4060 if (error != 0) {
4061 dmu_recv_cleanup_ds(drc);
4062 } else if (drc->drc_guid_to_ds_map != NULL) {
4063 (void) add_ds_to_guidmap(drc->drc_tofs, drc->drc_guid_to_ds_map,
4064 drc->drc_newsnapobj, drc->drc_raw);
4065 }
4066 return (error);
4067 }
4068
4069 /*
4070 * Return TRUE if this objset is currently being received into.
4071 */
4072 boolean_t
4073 dmu_objset_is_receiving(objset_t *os)
4074 {
4075 return (os->os_dsl_dataset != NULL &&
4076 os->os_dsl_dataset->ds_owner == dmu_recv_tag);
4077 }
4078
4079 #if defined(_KERNEL)
4080 module_param(zfs_send_corrupt_data, int, 0644);
4081 MODULE_PARM_DESC(zfs_send_corrupt_data, "Allow sending corrupt data");
4082 #endif