]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/dmu_recv.c
Fix unchecked return values and unused return values
[mirror_zfs.git] / module / zfs / dmu_recv.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2011, 2020 by Delphix. All rights reserved.
25 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
26 * Copyright 2014 HybridCluster. All rights reserved.
27 * Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
28 * Copyright (c) 2019, Klara Inc.
29 * Copyright (c) 2019, Allan Jude
30 * Copyright (c) 2019 Datto Inc.
31 * Copyright (c) 2022 Axcient.
32 */
33
34 #include <sys/spa_impl.h>
35 #include <sys/dmu.h>
36 #include <sys/dmu_impl.h>
37 #include <sys/dmu_send.h>
38 #include <sys/dmu_recv.h>
39 #include <sys/dmu_tx.h>
40 #include <sys/dbuf.h>
41 #include <sys/dnode.h>
42 #include <sys/zfs_context.h>
43 #include <sys/dmu_objset.h>
44 #include <sys/dmu_traverse.h>
45 #include <sys/dsl_dataset.h>
46 #include <sys/dsl_dir.h>
47 #include <sys/dsl_prop.h>
48 #include <sys/dsl_pool.h>
49 #include <sys/dsl_synctask.h>
50 #include <sys/zfs_ioctl.h>
51 #include <sys/zap.h>
52 #include <sys/zvol.h>
53 #include <sys/zio_checksum.h>
54 #include <sys/zfs_znode.h>
55 #include <zfs_fletcher.h>
56 #include <sys/avl.h>
57 #include <sys/ddt.h>
58 #include <sys/zfs_onexit.h>
59 #include <sys/dsl_destroy.h>
60 #include <sys/blkptr.h>
61 #include <sys/dsl_bookmark.h>
62 #include <sys/zfeature.h>
63 #include <sys/bqueue.h>
64 #include <sys/objlist.h>
65 #ifdef _KERNEL
66 #include <sys/zfs_vfsops.h>
67 #endif
68 #include <sys/zfs_file.h>
69
70 static int zfs_recv_queue_length = SPA_MAXBLOCKSIZE;
71 static int zfs_recv_queue_ff = 20;
72 static int zfs_recv_write_batch_size = 1024 * 1024;
73 static int zfs_recv_best_effort_corrective = 0;
74
75 static const void *const dmu_recv_tag = "dmu_recv_tag";
76 const char *const recv_clone_name = "%recv";
77
78 static int receive_read_payload_and_next_header(dmu_recv_cookie_t *ra, int len,
79 void *buf);
80
81 struct receive_record_arg {
82 dmu_replay_record_t header;
83 void *payload; /* Pointer to a buffer containing the payload */
84 /*
85 * If the record is a WRITE or SPILL, pointer to the abd containing the
86 * payload.
87 */
88 abd_t *abd;
89 int payload_size;
90 uint64_t bytes_read; /* bytes read from stream when record created */
91 boolean_t eos_marker; /* Marks the end of the stream */
92 bqueue_node_t node;
93 };
94
95 struct receive_writer_arg {
96 objset_t *os;
97 boolean_t byteswap;
98 bqueue_t q;
99
100 /*
101 * These three members are used to signal to the main thread when
102 * we're done.
103 */
104 kmutex_t mutex;
105 kcondvar_t cv;
106 boolean_t done;
107
108 int err;
109 const char *tofs;
110 boolean_t heal;
111 boolean_t resumable;
112 boolean_t raw; /* DMU_BACKUP_FEATURE_RAW set */
113 boolean_t spill; /* DRR_FLAG_SPILL_BLOCK set */
114 boolean_t full; /* this is a full send stream */
115 uint64_t last_object;
116 uint64_t last_offset;
117 uint64_t max_object; /* highest object ID referenced in stream */
118 uint64_t bytes_read; /* bytes read when current record created */
119
120 list_t write_batch;
121
122 /* Encryption parameters for the last received DRR_OBJECT_RANGE */
123 boolean_t or_crypt_params_present;
124 uint64_t or_firstobj;
125 uint64_t or_numslots;
126 uint8_t or_salt[ZIO_DATA_SALT_LEN];
127 uint8_t or_iv[ZIO_DATA_IV_LEN];
128 uint8_t or_mac[ZIO_DATA_MAC_LEN];
129 boolean_t or_byteorder;
130 zio_t *heal_pio;
131 };
132
133 typedef struct dmu_recv_begin_arg {
134 const char *drba_origin;
135 dmu_recv_cookie_t *drba_cookie;
136 cred_t *drba_cred;
137 proc_t *drba_proc;
138 dsl_crypto_params_t *drba_dcp;
139 } dmu_recv_begin_arg_t;
140
141 static void
142 byteswap_record(dmu_replay_record_t *drr)
143 {
144 #define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
145 #define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
146 drr->drr_type = BSWAP_32(drr->drr_type);
147 drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen);
148
149 switch (drr->drr_type) {
150 case DRR_BEGIN:
151 DO64(drr_begin.drr_magic);
152 DO64(drr_begin.drr_versioninfo);
153 DO64(drr_begin.drr_creation_time);
154 DO32(drr_begin.drr_type);
155 DO32(drr_begin.drr_flags);
156 DO64(drr_begin.drr_toguid);
157 DO64(drr_begin.drr_fromguid);
158 break;
159 case DRR_OBJECT:
160 DO64(drr_object.drr_object);
161 DO32(drr_object.drr_type);
162 DO32(drr_object.drr_bonustype);
163 DO32(drr_object.drr_blksz);
164 DO32(drr_object.drr_bonuslen);
165 DO32(drr_object.drr_raw_bonuslen);
166 DO64(drr_object.drr_toguid);
167 DO64(drr_object.drr_maxblkid);
168 break;
169 case DRR_FREEOBJECTS:
170 DO64(drr_freeobjects.drr_firstobj);
171 DO64(drr_freeobjects.drr_numobjs);
172 DO64(drr_freeobjects.drr_toguid);
173 break;
174 case DRR_WRITE:
175 DO64(drr_write.drr_object);
176 DO32(drr_write.drr_type);
177 DO64(drr_write.drr_offset);
178 DO64(drr_write.drr_logical_size);
179 DO64(drr_write.drr_toguid);
180 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write.drr_key.ddk_cksum);
181 DO64(drr_write.drr_key.ddk_prop);
182 DO64(drr_write.drr_compressed_size);
183 break;
184 case DRR_WRITE_EMBEDDED:
185 DO64(drr_write_embedded.drr_object);
186 DO64(drr_write_embedded.drr_offset);
187 DO64(drr_write_embedded.drr_length);
188 DO64(drr_write_embedded.drr_toguid);
189 DO32(drr_write_embedded.drr_lsize);
190 DO32(drr_write_embedded.drr_psize);
191 break;
192 case DRR_FREE:
193 DO64(drr_free.drr_object);
194 DO64(drr_free.drr_offset);
195 DO64(drr_free.drr_length);
196 DO64(drr_free.drr_toguid);
197 break;
198 case DRR_SPILL:
199 DO64(drr_spill.drr_object);
200 DO64(drr_spill.drr_length);
201 DO64(drr_spill.drr_toguid);
202 DO64(drr_spill.drr_compressed_size);
203 DO32(drr_spill.drr_type);
204 break;
205 case DRR_OBJECT_RANGE:
206 DO64(drr_object_range.drr_firstobj);
207 DO64(drr_object_range.drr_numslots);
208 DO64(drr_object_range.drr_toguid);
209 break;
210 case DRR_REDACT:
211 DO64(drr_redact.drr_object);
212 DO64(drr_redact.drr_offset);
213 DO64(drr_redact.drr_length);
214 DO64(drr_redact.drr_toguid);
215 break;
216 case DRR_END:
217 DO64(drr_end.drr_toguid);
218 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_end.drr_checksum);
219 break;
220 default:
221 break;
222 }
223
224 if (drr->drr_type != DRR_BEGIN) {
225 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_checksum.drr_checksum);
226 }
227
228 #undef DO64
229 #undef DO32
230 }
231
232 static boolean_t
233 redact_snaps_contains(uint64_t *snaps, uint64_t num_snaps, uint64_t guid)
234 {
235 for (int i = 0; i < num_snaps; i++) {
236 if (snaps[i] == guid)
237 return (B_TRUE);
238 }
239 return (B_FALSE);
240 }
241
242 /*
243 * Check that the new stream we're trying to receive is redacted with respect to
244 * a subset of the snapshots that the origin was redacted with respect to. For
245 * the reasons behind this, see the man page on redacted zfs sends and receives.
246 */
247 static boolean_t
248 compatible_redact_snaps(uint64_t *origin_snaps, uint64_t origin_num_snaps,
249 uint64_t *redact_snaps, uint64_t num_redact_snaps)
250 {
251 /*
252 * Short circuit the comparison; if we are redacted with respect to
253 * more snapshots than the origin, we can't be redacted with respect
254 * to a subset.
255 */
256 if (num_redact_snaps > origin_num_snaps) {
257 return (B_FALSE);
258 }
259
260 for (int i = 0; i < num_redact_snaps; i++) {
261 if (!redact_snaps_contains(origin_snaps, origin_num_snaps,
262 redact_snaps[i])) {
263 return (B_FALSE);
264 }
265 }
266 return (B_TRUE);
267 }
268
269 static boolean_t
270 redact_check(dmu_recv_begin_arg_t *drba, dsl_dataset_t *origin)
271 {
272 uint64_t *origin_snaps;
273 uint64_t origin_num_snaps;
274 dmu_recv_cookie_t *drc = drba->drba_cookie;
275 struct drr_begin *drrb = drc->drc_drrb;
276 int featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
277 int err = 0;
278 boolean_t ret = B_TRUE;
279 uint64_t *redact_snaps;
280 uint_t numredactsnaps;
281
282 /*
283 * If this is a full send stream, we're safe no matter what.
284 */
285 if (drrb->drr_fromguid == 0)
286 return (ret);
287
288 VERIFY(dsl_dataset_get_uint64_array_feature(origin,
289 SPA_FEATURE_REDACTED_DATASETS, &origin_num_snaps, &origin_snaps));
290
291 if (nvlist_lookup_uint64_array(drc->drc_begin_nvl,
292 BEGINNV_REDACT_FROM_SNAPS, &redact_snaps, &numredactsnaps) ==
293 0) {
294 /*
295 * If the send stream was sent from the redaction bookmark or
296 * the redacted version of the dataset, then we're safe. Verify
297 * that this is from the a compatible redaction bookmark or
298 * redacted dataset.
299 */
300 if (!compatible_redact_snaps(origin_snaps, origin_num_snaps,
301 redact_snaps, numredactsnaps)) {
302 err = EINVAL;
303 }
304 } else if (featureflags & DMU_BACKUP_FEATURE_REDACTED) {
305 /*
306 * If the stream is redacted, it must be redacted with respect
307 * to a subset of what the origin is redacted with respect to.
308 * See case number 2 in the zfs man page section on redacted zfs
309 * send.
310 */
311 err = nvlist_lookup_uint64_array(drc->drc_begin_nvl,
312 BEGINNV_REDACT_SNAPS, &redact_snaps, &numredactsnaps);
313
314 if (err != 0 || !compatible_redact_snaps(origin_snaps,
315 origin_num_snaps, redact_snaps, numredactsnaps)) {
316 err = EINVAL;
317 }
318 } else if (!redact_snaps_contains(origin_snaps, origin_num_snaps,
319 drrb->drr_toguid)) {
320 /*
321 * If the stream isn't redacted but the origin is, this must be
322 * one of the snapshots the origin is redacted with respect to.
323 * See case number 1 in the zfs man page section on redacted zfs
324 * send.
325 */
326 err = EINVAL;
327 }
328
329 if (err != 0)
330 ret = B_FALSE;
331 return (ret);
332 }
333
334 /*
335 * If we previously received a stream with --large-block, we don't support
336 * receiving an incremental on top of it without --large-block. This avoids
337 * forcing a read-modify-write or trying to re-aggregate a string of WRITE
338 * records.
339 */
340 static int
341 recv_check_large_blocks(dsl_dataset_t *ds, uint64_t featureflags)
342 {
343 if (dsl_dataset_feature_is_active(ds, SPA_FEATURE_LARGE_BLOCKS) &&
344 !(featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS))
345 return (SET_ERROR(ZFS_ERR_STREAM_LARGE_BLOCK_MISMATCH));
346 return (0);
347 }
348
349 static int
350 recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds,
351 uint64_t fromguid, uint64_t featureflags)
352 {
353 uint64_t obj;
354 uint64_t children;
355 int error;
356 dsl_dataset_t *snap;
357 dsl_pool_t *dp = ds->ds_dir->dd_pool;
358 boolean_t encrypted = ds->ds_dir->dd_crypto_obj != 0;
359 boolean_t raw = (featureflags & DMU_BACKUP_FEATURE_RAW) != 0;
360 boolean_t embed = (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) != 0;
361
362 /* Temporary clone name must not exist. */
363 error = zap_lookup(dp->dp_meta_objset,
364 dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, recv_clone_name,
365 8, 1, &obj);
366 if (error != ENOENT)
367 return (error == 0 ? SET_ERROR(EBUSY) : error);
368
369 /* Resume state must not be set. */
370 if (dsl_dataset_has_resume_receive_state(ds))
371 return (SET_ERROR(EBUSY));
372
373 /* New snapshot name must not exist if we're not healing it. */
374 error = zap_lookup(dp->dp_meta_objset,
375 dsl_dataset_phys(ds)->ds_snapnames_zapobj,
376 drba->drba_cookie->drc_tosnap, 8, 1, &obj);
377 if (drba->drba_cookie->drc_heal) {
378 if (error != 0)
379 return (error);
380 } else if (error != ENOENT) {
381 return (error == 0 ? SET_ERROR(EEXIST) : error);
382 }
383
384 /* Must not have children if receiving a ZVOL. */
385 error = zap_count(dp->dp_meta_objset,
386 dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, &children);
387 if (error != 0)
388 return (error);
389 if (drba->drba_cookie->drc_drrb->drr_type != DMU_OST_ZFS &&
390 children > 0)
391 return (SET_ERROR(ZFS_ERR_WRONG_PARENT));
392
393 /*
394 * Check snapshot limit before receiving. We'll recheck again at the
395 * end, but might as well abort before receiving if we're already over
396 * the limit.
397 *
398 * Note that we do not check the file system limit with
399 * dsl_dir_fscount_check because the temporary %clones don't count
400 * against that limit.
401 */
402 error = dsl_fs_ss_limit_check(ds->ds_dir, 1, ZFS_PROP_SNAPSHOT_LIMIT,
403 NULL, drba->drba_cred, drba->drba_proc);
404 if (error != 0)
405 return (error);
406
407 if (drba->drba_cookie->drc_heal) {
408 /* Encryption is incompatible with embedded data. */
409 if (encrypted && embed)
410 return (SET_ERROR(EINVAL));
411
412 /* Healing is not supported when in 'force' mode. */
413 if (drba->drba_cookie->drc_force)
414 return (SET_ERROR(EINVAL));
415
416 /* Must have keys loaded if doing encrypted non-raw recv. */
417 if (encrypted && !raw) {
418 if (spa_keystore_lookup_key(dp->dp_spa, ds->ds_object,
419 NULL, NULL) != 0)
420 return (SET_ERROR(EACCES));
421 }
422
423 error = dsl_dataset_hold_obj(dp, obj, FTAG, &snap);
424 if (error != 0)
425 return (error);
426
427 /*
428 * When not doing best effort corrective recv healing can only
429 * be done if the send stream is for the same snapshot as the
430 * one we are trying to heal.
431 */
432 if (zfs_recv_best_effort_corrective == 0 &&
433 drba->drba_cookie->drc_drrb->drr_toguid !=
434 dsl_dataset_phys(snap)->ds_guid) {
435 dsl_dataset_rele(snap, FTAG);
436 return (SET_ERROR(ENOTSUP));
437 }
438 dsl_dataset_rele(snap, FTAG);
439 } else if (fromguid != 0) {
440 /* Sanity check the incremental recv */
441 uint64_t obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
442
443 /* Can't perform a raw receive on top of a non-raw receive */
444 if (!encrypted && raw)
445 return (SET_ERROR(EINVAL));
446
447 /* Encryption is incompatible with embedded data */
448 if (encrypted && embed)
449 return (SET_ERROR(EINVAL));
450
451 /* Find snapshot in this dir that matches fromguid. */
452 while (obj != 0) {
453 error = dsl_dataset_hold_obj(dp, obj, FTAG,
454 &snap);
455 if (error != 0)
456 return (SET_ERROR(ENODEV));
457 if (snap->ds_dir != ds->ds_dir) {
458 dsl_dataset_rele(snap, FTAG);
459 return (SET_ERROR(ENODEV));
460 }
461 if (dsl_dataset_phys(snap)->ds_guid == fromguid)
462 break;
463 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
464 dsl_dataset_rele(snap, FTAG);
465 }
466 if (obj == 0)
467 return (SET_ERROR(ENODEV));
468
469 if (drba->drba_cookie->drc_force) {
470 drba->drba_cookie->drc_fromsnapobj = obj;
471 } else {
472 /*
473 * If we are not forcing, there must be no
474 * changes since fromsnap. Raw sends have an
475 * additional constraint that requires that
476 * no "noop" snapshots exist between fromsnap
477 * and tosnap for the IVset checking code to
478 * work properly.
479 */
480 if (dsl_dataset_modified_since_snap(ds, snap) ||
481 (raw &&
482 dsl_dataset_phys(ds)->ds_prev_snap_obj !=
483 snap->ds_object)) {
484 dsl_dataset_rele(snap, FTAG);
485 return (SET_ERROR(ETXTBSY));
486 }
487 drba->drba_cookie->drc_fromsnapobj =
488 ds->ds_prev->ds_object;
489 }
490
491 if (dsl_dataset_feature_is_active(snap,
492 SPA_FEATURE_REDACTED_DATASETS) && !redact_check(drba,
493 snap)) {
494 dsl_dataset_rele(snap, FTAG);
495 return (SET_ERROR(EINVAL));
496 }
497
498 error = recv_check_large_blocks(snap, featureflags);
499 if (error != 0) {
500 dsl_dataset_rele(snap, FTAG);
501 return (error);
502 }
503
504 dsl_dataset_rele(snap, FTAG);
505 } else {
506 /* If full and not healing then must be forced. */
507 if (!drba->drba_cookie->drc_force)
508 return (SET_ERROR(EEXIST));
509
510 /*
511 * We don't support using zfs recv -F to blow away
512 * encrypted filesystems. This would require the
513 * dsl dir to point to the old encryption key and
514 * the new one at the same time during the receive.
515 */
516 if ((!encrypted && raw) || encrypted)
517 return (SET_ERROR(EINVAL));
518
519 /*
520 * Perform the same encryption checks we would if
521 * we were creating a new dataset from scratch.
522 */
523 if (!raw) {
524 boolean_t will_encrypt;
525
526 error = dmu_objset_create_crypt_check(
527 ds->ds_dir->dd_parent, drba->drba_dcp,
528 &will_encrypt);
529 if (error != 0)
530 return (error);
531
532 if (will_encrypt && embed)
533 return (SET_ERROR(EINVAL));
534 }
535 }
536
537 return (0);
538 }
539
540 /*
541 * Check that any feature flags used in the data stream we're receiving are
542 * supported by the pool we are receiving into.
543 *
544 * Note that some of the features we explicitly check here have additional
545 * (implicit) features they depend on, but those dependencies are enforced
546 * through the zfeature_register() calls declaring the features that we
547 * explicitly check.
548 */
549 static int
550 recv_begin_check_feature_flags_impl(uint64_t featureflags, spa_t *spa)
551 {
552 /*
553 * Check if there are any unsupported feature flags.
554 */
555 if (!DMU_STREAM_SUPPORTED(featureflags)) {
556 return (SET_ERROR(ZFS_ERR_UNKNOWN_SEND_STREAM_FEATURE));
557 }
558
559 /* Verify pool version supports SA if SA_SPILL feature set */
560 if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) &&
561 spa_version(spa) < SPA_VERSION_SA)
562 return (SET_ERROR(ENOTSUP));
563
564 /*
565 * LZ4 compressed, ZSTD compressed, embedded, mooched, large blocks,
566 * and large_dnodes in the stream can only be used if those pool
567 * features are enabled because we don't attempt to decompress /
568 * un-embed / un-mooch / split up the blocks / dnodes during the
569 * receive process.
570 */
571 if ((featureflags & DMU_BACKUP_FEATURE_LZ4) &&
572 !spa_feature_is_enabled(spa, SPA_FEATURE_LZ4_COMPRESS))
573 return (SET_ERROR(ENOTSUP));
574 if ((featureflags & DMU_BACKUP_FEATURE_ZSTD) &&
575 !spa_feature_is_enabled(spa, SPA_FEATURE_ZSTD_COMPRESS))
576 return (SET_ERROR(ENOTSUP));
577 if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) &&
578 !spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA))
579 return (SET_ERROR(ENOTSUP));
580 if ((featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
581 !spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS))
582 return (SET_ERROR(ENOTSUP));
583 if ((featureflags & DMU_BACKUP_FEATURE_LARGE_DNODE) &&
584 !spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE))
585 return (SET_ERROR(ENOTSUP));
586
587 /*
588 * Receiving redacted streams requires that redacted datasets are
589 * enabled.
590 */
591 if ((featureflags & DMU_BACKUP_FEATURE_REDACTED) &&
592 !spa_feature_is_enabled(spa, SPA_FEATURE_REDACTED_DATASETS))
593 return (SET_ERROR(ENOTSUP));
594
595 return (0);
596 }
597
598 static int
599 dmu_recv_begin_check(void *arg, dmu_tx_t *tx)
600 {
601 dmu_recv_begin_arg_t *drba = arg;
602 dsl_pool_t *dp = dmu_tx_pool(tx);
603 struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
604 uint64_t fromguid = drrb->drr_fromguid;
605 int flags = drrb->drr_flags;
606 ds_hold_flags_t dsflags = DS_HOLD_FLAG_NONE;
607 int error;
608 uint64_t featureflags = drba->drba_cookie->drc_featureflags;
609 dsl_dataset_t *ds;
610 const char *tofs = drba->drba_cookie->drc_tofs;
611
612 /* already checked */
613 ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
614 ASSERT(!(featureflags & DMU_BACKUP_FEATURE_RESUMING));
615
616 if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
617 DMU_COMPOUNDSTREAM ||
618 drrb->drr_type >= DMU_OST_NUMTYPES ||
619 ((flags & DRR_FLAG_CLONE) && drba->drba_origin == NULL))
620 return (SET_ERROR(EINVAL));
621
622 error = recv_begin_check_feature_flags_impl(featureflags, dp->dp_spa);
623 if (error != 0)
624 return (error);
625
626 /* Resumable receives require extensible datasets */
627 if (drba->drba_cookie->drc_resumable &&
628 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EXTENSIBLE_DATASET))
629 return (SET_ERROR(ENOTSUP));
630
631 if (featureflags & DMU_BACKUP_FEATURE_RAW) {
632 /* raw receives require the encryption feature */
633 if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_ENCRYPTION))
634 return (SET_ERROR(ENOTSUP));
635
636 /* embedded data is incompatible with encryption and raw recv */
637 if (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)
638 return (SET_ERROR(EINVAL));
639
640 /* raw receives require spill block allocation flag */
641 if (!(flags & DRR_FLAG_SPILL_BLOCK))
642 return (SET_ERROR(ZFS_ERR_SPILL_BLOCK_FLAG_MISSING));
643 } else {
644 /*
645 * We support unencrypted datasets below encrypted ones now,
646 * so add the DS_HOLD_FLAG_DECRYPT flag only if we are dealing
647 * with a dataset we may encrypt.
648 */
649 if (drba->drba_dcp != NULL &&
650 drba->drba_dcp->cp_crypt != ZIO_CRYPT_OFF) {
651 dsflags |= DS_HOLD_FLAG_DECRYPT;
652 }
653 }
654
655 error = dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds);
656 if (error == 0) {
657 /* target fs already exists; recv into temp clone */
658
659 /* Can't recv a clone into an existing fs */
660 if (flags & DRR_FLAG_CLONE || drba->drba_origin) {
661 dsl_dataset_rele_flags(ds, dsflags, FTAG);
662 return (SET_ERROR(EINVAL));
663 }
664
665 error = recv_begin_check_existing_impl(drba, ds, fromguid,
666 featureflags);
667 dsl_dataset_rele_flags(ds, dsflags, FTAG);
668 } else if (error == ENOENT) {
669 /* target fs does not exist; must be a full backup or clone */
670 char buf[ZFS_MAX_DATASET_NAME_LEN];
671 objset_t *os;
672
673 /* healing recv must be done "into" an existing snapshot */
674 if (drba->drba_cookie->drc_heal == B_TRUE)
675 return (SET_ERROR(ENOTSUP));
676
677 /*
678 * If it's a non-clone incremental, we are missing the
679 * target fs, so fail the recv.
680 */
681 if (fromguid != 0 && !((flags & DRR_FLAG_CLONE) ||
682 drba->drba_origin))
683 return (SET_ERROR(ENOENT));
684
685 /*
686 * If we're receiving a full send as a clone, and it doesn't
687 * contain all the necessary free records and freeobject
688 * records, reject it.
689 */
690 if (fromguid == 0 && drba->drba_origin != NULL &&
691 !(flags & DRR_FLAG_FREERECORDS))
692 return (SET_ERROR(EINVAL));
693
694 /* Open the parent of tofs */
695 ASSERT3U(strlen(tofs), <, sizeof (buf));
696 (void) strlcpy(buf, tofs, strrchr(tofs, '/') - tofs + 1);
697 error = dsl_dataset_hold(dp, buf, FTAG, &ds);
698 if (error != 0)
699 return (error);
700
701 if ((featureflags & DMU_BACKUP_FEATURE_RAW) == 0 &&
702 drba->drba_origin == NULL) {
703 boolean_t will_encrypt;
704
705 /*
706 * Check that we aren't breaking any encryption rules
707 * and that we have all the parameters we need to
708 * create an encrypted dataset if necessary. If we are
709 * making an encrypted dataset the stream can't have
710 * embedded data.
711 */
712 error = dmu_objset_create_crypt_check(ds->ds_dir,
713 drba->drba_dcp, &will_encrypt);
714 if (error != 0) {
715 dsl_dataset_rele(ds, FTAG);
716 return (error);
717 }
718
719 if (will_encrypt &&
720 (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)) {
721 dsl_dataset_rele(ds, FTAG);
722 return (SET_ERROR(EINVAL));
723 }
724 }
725
726 /*
727 * Check filesystem and snapshot limits before receiving. We'll
728 * recheck snapshot limits again at the end (we create the
729 * filesystems and increment those counts during begin_sync).
730 */
731 error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
732 ZFS_PROP_FILESYSTEM_LIMIT, NULL,
733 drba->drba_cred, drba->drba_proc);
734 if (error != 0) {
735 dsl_dataset_rele(ds, FTAG);
736 return (error);
737 }
738
739 error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
740 ZFS_PROP_SNAPSHOT_LIMIT, NULL,
741 drba->drba_cred, drba->drba_proc);
742 if (error != 0) {
743 dsl_dataset_rele(ds, FTAG);
744 return (error);
745 }
746
747 /* can't recv below anything but filesystems (eg. no ZVOLs) */
748 error = dmu_objset_from_ds(ds, &os);
749 if (error != 0) {
750 dsl_dataset_rele(ds, FTAG);
751 return (error);
752 }
753 if (dmu_objset_type(os) != DMU_OST_ZFS) {
754 dsl_dataset_rele(ds, FTAG);
755 return (SET_ERROR(ZFS_ERR_WRONG_PARENT));
756 }
757
758 if (drba->drba_origin != NULL) {
759 dsl_dataset_t *origin;
760 error = dsl_dataset_hold_flags(dp, drba->drba_origin,
761 dsflags, FTAG, &origin);
762 if (error != 0) {
763 dsl_dataset_rele(ds, FTAG);
764 return (error);
765 }
766 if (!origin->ds_is_snapshot) {
767 dsl_dataset_rele_flags(origin, dsflags, FTAG);
768 dsl_dataset_rele(ds, FTAG);
769 return (SET_ERROR(EINVAL));
770 }
771 if (dsl_dataset_phys(origin)->ds_guid != fromguid &&
772 fromguid != 0) {
773 dsl_dataset_rele_flags(origin, dsflags, FTAG);
774 dsl_dataset_rele(ds, FTAG);
775 return (SET_ERROR(ENODEV));
776 }
777
778 if (origin->ds_dir->dd_crypto_obj != 0 &&
779 (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)) {
780 dsl_dataset_rele_flags(origin, dsflags, FTAG);
781 dsl_dataset_rele(ds, FTAG);
782 return (SET_ERROR(EINVAL));
783 }
784
785 /*
786 * If the origin is redacted we need to verify that this
787 * send stream can safely be received on top of the
788 * origin.
789 */
790 if (dsl_dataset_feature_is_active(origin,
791 SPA_FEATURE_REDACTED_DATASETS)) {
792 if (!redact_check(drba, origin)) {
793 dsl_dataset_rele_flags(origin, dsflags,
794 FTAG);
795 dsl_dataset_rele_flags(ds, dsflags,
796 FTAG);
797 return (SET_ERROR(EINVAL));
798 }
799 }
800
801 error = recv_check_large_blocks(ds, featureflags);
802 if (error != 0) {
803 dsl_dataset_rele_flags(origin, dsflags, FTAG);
804 dsl_dataset_rele_flags(ds, dsflags, FTAG);
805 return (error);
806 }
807
808 dsl_dataset_rele_flags(origin, dsflags, FTAG);
809 }
810
811 dsl_dataset_rele(ds, FTAG);
812 error = 0;
813 }
814 return (error);
815 }
816
817 static void
818 dmu_recv_begin_sync(void *arg, dmu_tx_t *tx)
819 {
820 dmu_recv_begin_arg_t *drba = arg;
821 dsl_pool_t *dp = dmu_tx_pool(tx);
822 objset_t *mos = dp->dp_meta_objset;
823 dmu_recv_cookie_t *drc = drba->drba_cookie;
824 struct drr_begin *drrb = drc->drc_drrb;
825 const char *tofs = drc->drc_tofs;
826 uint64_t featureflags = drc->drc_featureflags;
827 dsl_dataset_t *ds, *newds;
828 objset_t *os;
829 uint64_t dsobj;
830 ds_hold_flags_t dsflags = DS_HOLD_FLAG_NONE;
831 int error;
832 uint64_t crflags = 0;
833 dsl_crypto_params_t dummy_dcp = { 0 };
834 dsl_crypto_params_t *dcp = drba->drba_dcp;
835
836 if (drrb->drr_flags & DRR_FLAG_CI_DATA)
837 crflags |= DS_FLAG_CI_DATASET;
838
839 if ((featureflags & DMU_BACKUP_FEATURE_RAW) == 0)
840 dsflags |= DS_HOLD_FLAG_DECRYPT;
841
842 /*
843 * Raw, non-incremental recvs always use a dummy dcp with
844 * the raw cmd set. Raw incremental recvs do not use a dcp
845 * since the encryption parameters are already set in stone.
846 */
847 if (dcp == NULL && drrb->drr_fromguid == 0 &&
848 drba->drba_origin == NULL) {
849 ASSERT3P(dcp, ==, NULL);
850 dcp = &dummy_dcp;
851
852 if (featureflags & DMU_BACKUP_FEATURE_RAW)
853 dcp->cp_cmd = DCP_CMD_RAW_RECV;
854 }
855
856 error = dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds);
857 if (error == 0) {
858 /* Create temporary clone unless we're doing corrective recv */
859 dsl_dataset_t *snap = NULL;
860
861 if (drba->drba_cookie->drc_fromsnapobj != 0) {
862 VERIFY0(dsl_dataset_hold_obj(dp,
863 drba->drba_cookie->drc_fromsnapobj, FTAG, &snap));
864 ASSERT3P(dcp, ==, NULL);
865 }
866 if (drc->drc_heal) {
867 /* When healing we want to use the provided snapshot */
868 VERIFY0(dsl_dataset_snap_lookup(ds, drc->drc_tosnap,
869 &dsobj));
870 } else {
871 dsobj = dsl_dataset_create_sync(ds->ds_dir,
872 recv_clone_name, snap, crflags, drba->drba_cred,
873 dcp, tx);
874 }
875 if (drba->drba_cookie->drc_fromsnapobj != 0)
876 dsl_dataset_rele(snap, FTAG);
877 dsl_dataset_rele_flags(ds, dsflags, FTAG);
878 } else {
879 dsl_dir_t *dd;
880 const char *tail;
881 dsl_dataset_t *origin = NULL;
882
883 VERIFY0(dsl_dir_hold(dp, tofs, FTAG, &dd, &tail));
884
885 if (drba->drba_origin != NULL) {
886 VERIFY0(dsl_dataset_hold(dp, drba->drba_origin,
887 FTAG, &origin));
888 ASSERT3P(dcp, ==, NULL);
889 }
890
891 /* Create new dataset. */
892 dsobj = dsl_dataset_create_sync(dd, strrchr(tofs, '/') + 1,
893 origin, crflags, drba->drba_cred, dcp, tx);
894 if (origin != NULL)
895 dsl_dataset_rele(origin, FTAG);
896 dsl_dir_rele(dd, FTAG);
897 drc->drc_newfs = B_TRUE;
898 }
899 VERIFY0(dsl_dataset_own_obj_force(dp, dsobj, dsflags, dmu_recv_tag,
900 &newds));
901 if (dsl_dataset_feature_is_active(newds,
902 SPA_FEATURE_REDACTED_DATASETS)) {
903 /*
904 * If the origin dataset is redacted, the child will be redacted
905 * when we create it. We clear the new dataset's
906 * redaction info; if it should be redacted, we'll fill
907 * in its information later.
908 */
909 dsl_dataset_deactivate_feature(newds,
910 SPA_FEATURE_REDACTED_DATASETS, tx);
911 }
912 VERIFY0(dmu_objset_from_ds(newds, &os));
913
914 if (drc->drc_resumable) {
915 dsl_dataset_zapify(newds, tx);
916 if (drrb->drr_fromguid != 0) {
917 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_FROMGUID,
918 8, 1, &drrb->drr_fromguid, tx));
919 }
920 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TOGUID,
921 8, 1, &drrb->drr_toguid, tx));
922 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TONAME,
923 1, strlen(drrb->drr_toname) + 1, drrb->drr_toname, tx));
924 uint64_t one = 1;
925 uint64_t zero = 0;
926 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OBJECT,
927 8, 1, &one, tx));
928 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OFFSET,
929 8, 1, &zero, tx));
930 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_BYTES,
931 8, 1, &zero, tx));
932 if (featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) {
933 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_LARGEBLOCK,
934 8, 1, &one, tx));
935 }
936 if (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) {
937 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_EMBEDOK,
938 8, 1, &one, tx));
939 }
940 if (featureflags & DMU_BACKUP_FEATURE_COMPRESSED) {
941 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_COMPRESSOK,
942 8, 1, &one, tx));
943 }
944 if (featureflags & DMU_BACKUP_FEATURE_RAW) {
945 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_RAWOK,
946 8, 1, &one, tx));
947 }
948
949 uint64_t *redact_snaps;
950 uint_t numredactsnaps;
951 if (nvlist_lookup_uint64_array(drc->drc_begin_nvl,
952 BEGINNV_REDACT_FROM_SNAPS, &redact_snaps,
953 &numredactsnaps) == 0) {
954 VERIFY0(zap_add(mos, dsobj,
955 DS_FIELD_RESUME_REDACT_BOOKMARK_SNAPS,
956 sizeof (*redact_snaps), numredactsnaps,
957 redact_snaps, tx));
958 }
959 }
960
961 /*
962 * Usually the os->os_encrypted value is tied to the presence of a
963 * DSL Crypto Key object in the dd. However, that will not be received
964 * until dmu_recv_stream(), so we set the value manually for now.
965 */
966 if (featureflags & DMU_BACKUP_FEATURE_RAW) {
967 os->os_encrypted = B_TRUE;
968 drba->drba_cookie->drc_raw = B_TRUE;
969 }
970
971 if (featureflags & DMU_BACKUP_FEATURE_REDACTED) {
972 uint64_t *redact_snaps;
973 uint_t numredactsnaps;
974 VERIFY0(nvlist_lookup_uint64_array(drc->drc_begin_nvl,
975 BEGINNV_REDACT_SNAPS, &redact_snaps, &numredactsnaps));
976 dsl_dataset_activate_redaction(newds, redact_snaps,
977 numredactsnaps, tx);
978 }
979
980 dmu_buf_will_dirty(newds->ds_dbuf, tx);
981 dsl_dataset_phys(newds)->ds_flags |= DS_FLAG_INCONSISTENT;
982
983 /*
984 * If we actually created a non-clone, we need to create the objset
985 * in our new dataset. If this is a raw send we postpone this until
986 * dmu_recv_stream() so that we can allocate the metadnode with the
987 * properties from the DRR_BEGIN payload.
988 */
989 rrw_enter(&newds->ds_bp_rwlock, RW_READER, FTAG);
990 if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds)) &&
991 (featureflags & DMU_BACKUP_FEATURE_RAW) == 0 &&
992 !drc->drc_heal) {
993 (void) dmu_objset_create_impl(dp->dp_spa,
994 newds, dsl_dataset_get_blkptr(newds), drrb->drr_type, tx);
995 }
996 rrw_exit(&newds->ds_bp_rwlock, FTAG);
997
998 drba->drba_cookie->drc_ds = newds;
999 drba->drba_cookie->drc_os = os;
1000
1001 spa_history_log_internal_ds(newds, "receive", tx, " ");
1002 }
1003
1004 static int
1005 dmu_recv_resume_begin_check(void *arg, dmu_tx_t *tx)
1006 {
1007 dmu_recv_begin_arg_t *drba = arg;
1008 dmu_recv_cookie_t *drc = drba->drba_cookie;
1009 dsl_pool_t *dp = dmu_tx_pool(tx);
1010 struct drr_begin *drrb = drc->drc_drrb;
1011 int error;
1012 ds_hold_flags_t dsflags = DS_HOLD_FLAG_NONE;
1013 dsl_dataset_t *ds;
1014 const char *tofs = drc->drc_tofs;
1015
1016 /* already checked */
1017 ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
1018 ASSERT(drc->drc_featureflags & DMU_BACKUP_FEATURE_RESUMING);
1019
1020 if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
1021 DMU_COMPOUNDSTREAM ||
1022 drrb->drr_type >= DMU_OST_NUMTYPES)
1023 return (SET_ERROR(EINVAL));
1024
1025 /*
1026 * This is mostly a sanity check since we should have already done these
1027 * checks during a previous attempt to receive the data.
1028 */
1029 error = recv_begin_check_feature_flags_impl(drc->drc_featureflags,
1030 dp->dp_spa);
1031 if (error != 0)
1032 return (error);
1033
1034 /* 6 extra bytes for /%recv */
1035 char recvname[ZFS_MAX_DATASET_NAME_LEN + 6];
1036
1037 (void) snprintf(recvname, sizeof (recvname), "%s/%s",
1038 tofs, recv_clone_name);
1039
1040 if (drc->drc_featureflags & DMU_BACKUP_FEATURE_RAW) {
1041 /* raw receives require spill block allocation flag */
1042 if (!(drrb->drr_flags & DRR_FLAG_SPILL_BLOCK))
1043 return (SET_ERROR(ZFS_ERR_SPILL_BLOCK_FLAG_MISSING));
1044 } else {
1045 dsflags |= DS_HOLD_FLAG_DECRYPT;
1046 }
1047
1048 if (dsl_dataset_hold_flags(dp, recvname, dsflags, FTAG, &ds) != 0) {
1049 /* %recv does not exist; continue in tofs */
1050 error = dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds);
1051 if (error != 0)
1052 return (error);
1053 }
1054
1055 /* check that ds is marked inconsistent */
1056 if (!DS_IS_INCONSISTENT(ds)) {
1057 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1058 return (SET_ERROR(EINVAL));
1059 }
1060
1061 /* check that there is resuming data, and that the toguid matches */
1062 if (!dsl_dataset_is_zapified(ds)) {
1063 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1064 return (SET_ERROR(EINVAL));
1065 }
1066 uint64_t val;
1067 error = zap_lookup(dp->dp_meta_objset, ds->ds_object,
1068 DS_FIELD_RESUME_TOGUID, sizeof (val), 1, &val);
1069 if (error != 0 || drrb->drr_toguid != val) {
1070 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1071 return (SET_ERROR(EINVAL));
1072 }
1073
1074 /*
1075 * Check if the receive is still running. If so, it will be owned.
1076 * Note that nothing else can own the dataset (e.g. after the receive
1077 * fails) because it will be marked inconsistent.
1078 */
1079 if (dsl_dataset_has_owner(ds)) {
1080 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1081 return (SET_ERROR(EBUSY));
1082 }
1083
1084 /* There should not be any snapshots of this fs yet. */
1085 if (ds->ds_prev != NULL && ds->ds_prev->ds_dir == ds->ds_dir) {
1086 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1087 return (SET_ERROR(EINVAL));
1088 }
1089
1090 /*
1091 * Note: resume point will be checked when we process the first WRITE
1092 * record.
1093 */
1094
1095 /* check that the origin matches */
1096 val = 0;
1097 (void) zap_lookup(dp->dp_meta_objset, ds->ds_object,
1098 DS_FIELD_RESUME_FROMGUID, sizeof (val), 1, &val);
1099 if (drrb->drr_fromguid != val) {
1100 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1101 return (SET_ERROR(EINVAL));
1102 }
1103
1104 if (ds->ds_prev != NULL && drrb->drr_fromguid != 0)
1105 drc->drc_fromsnapobj = ds->ds_prev->ds_object;
1106
1107 /*
1108 * If we're resuming, and the send is redacted, then the original send
1109 * must have been redacted, and must have been redacted with respect to
1110 * the same snapshots.
1111 */
1112 if (drc->drc_featureflags & DMU_BACKUP_FEATURE_REDACTED) {
1113 uint64_t num_ds_redact_snaps;
1114 uint64_t *ds_redact_snaps;
1115
1116 uint_t num_stream_redact_snaps;
1117 uint64_t *stream_redact_snaps;
1118
1119 if (nvlist_lookup_uint64_array(drc->drc_begin_nvl,
1120 BEGINNV_REDACT_SNAPS, &stream_redact_snaps,
1121 &num_stream_redact_snaps) != 0) {
1122 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1123 return (SET_ERROR(EINVAL));
1124 }
1125
1126 if (!dsl_dataset_get_uint64_array_feature(ds,
1127 SPA_FEATURE_REDACTED_DATASETS, &num_ds_redact_snaps,
1128 &ds_redact_snaps)) {
1129 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1130 return (SET_ERROR(EINVAL));
1131 }
1132
1133 for (int i = 0; i < num_ds_redact_snaps; i++) {
1134 if (!redact_snaps_contains(ds_redact_snaps,
1135 num_ds_redact_snaps, stream_redact_snaps[i])) {
1136 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1137 return (SET_ERROR(EINVAL));
1138 }
1139 }
1140 }
1141
1142 error = recv_check_large_blocks(ds, drc->drc_featureflags);
1143 if (error != 0) {
1144 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1145 return (error);
1146 }
1147
1148 dsl_dataset_rele_flags(ds, dsflags, FTAG);
1149 return (0);
1150 }
1151
1152 static void
1153 dmu_recv_resume_begin_sync(void *arg, dmu_tx_t *tx)
1154 {
1155 dmu_recv_begin_arg_t *drba = arg;
1156 dsl_pool_t *dp = dmu_tx_pool(tx);
1157 const char *tofs = drba->drba_cookie->drc_tofs;
1158 uint64_t featureflags = drba->drba_cookie->drc_featureflags;
1159 dsl_dataset_t *ds;
1160 ds_hold_flags_t dsflags = DS_HOLD_FLAG_NONE;
1161 /* 6 extra bytes for /%recv */
1162 char recvname[ZFS_MAX_DATASET_NAME_LEN + 6];
1163
1164 (void) snprintf(recvname, sizeof (recvname), "%s/%s", tofs,
1165 recv_clone_name);
1166
1167 if (featureflags & DMU_BACKUP_FEATURE_RAW) {
1168 drba->drba_cookie->drc_raw = B_TRUE;
1169 } else {
1170 dsflags |= DS_HOLD_FLAG_DECRYPT;
1171 }
1172
1173 if (dsl_dataset_own_force(dp, recvname, dsflags, dmu_recv_tag, &ds)
1174 != 0) {
1175 /* %recv does not exist; continue in tofs */
1176 VERIFY0(dsl_dataset_own_force(dp, tofs, dsflags, dmu_recv_tag,
1177 &ds));
1178 drba->drba_cookie->drc_newfs = B_TRUE;
1179 }
1180
1181 ASSERT(DS_IS_INCONSISTENT(ds));
1182 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
1183 ASSERT(!BP_IS_HOLE(dsl_dataset_get_blkptr(ds)) ||
1184 drba->drba_cookie->drc_raw);
1185 rrw_exit(&ds->ds_bp_rwlock, FTAG);
1186
1187 drba->drba_cookie->drc_ds = ds;
1188 VERIFY0(dmu_objset_from_ds(ds, &drba->drba_cookie->drc_os));
1189 drba->drba_cookie->drc_should_save = B_TRUE;
1190
1191 spa_history_log_internal_ds(ds, "resume receive", tx, " ");
1192 }
1193
1194 /*
1195 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
1196 * succeeds; otherwise we will leak the holds on the datasets.
1197 */
1198 int
1199 dmu_recv_begin(char *tofs, char *tosnap, dmu_replay_record_t *drr_begin,
1200 boolean_t force, boolean_t heal, boolean_t resumable, nvlist_t *localprops,
1201 nvlist_t *hidden_args, char *origin, dmu_recv_cookie_t *drc,
1202 zfs_file_t *fp, offset_t *voffp)
1203 {
1204 dmu_recv_begin_arg_t drba = { 0 };
1205 int err;
1206
1207 memset(drc, 0, sizeof (dmu_recv_cookie_t));
1208 drc->drc_drr_begin = drr_begin;
1209 drc->drc_drrb = &drr_begin->drr_u.drr_begin;
1210 drc->drc_tosnap = tosnap;
1211 drc->drc_tofs = tofs;
1212 drc->drc_force = force;
1213 drc->drc_heal = heal;
1214 drc->drc_resumable = resumable;
1215 drc->drc_cred = CRED();
1216 drc->drc_proc = curproc;
1217 drc->drc_clone = (origin != NULL);
1218
1219 if (drc->drc_drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) {
1220 drc->drc_byteswap = B_TRUE;
1221 (void) fletcher_4_incremental_byteswap(drr_begin,
1222 sizeof (dmu_replay_record_t), &drc->drc_cksum);
1223 byteswap_record(drr_begin);
1224 } else if (drc->drc_drrb->drr_magic == DMU_BACKUP_MAGIC) {
1225 (void) fletcher_4_incremental_native(drr_begin,
1226 sizeof (dmu_replay_record_t), &drc->drc_cksum);
1227 } else {
1228 return (SET_ERROR(EINVAL));
1229 }
1230
1231 drc->drc_fp = fp;
1232 drc->drc_voff = *voffp;
1233 drc->drc_featureflags =
1234 DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo);
1235
1236 uint32_t payloadlen = drc->drc_drr_begin->drr_payloadlen;
1237 void *payload = NULL;
1238 if (payloadlen != 0)
1239 payload = kmem_alloc(payloadlen, KM_SLEEP);
1240
1241 err = receive_read_payload_and_next_header(drc, payloadlen,
1242 payload);
1243 if (err != 0) {
1244 kmem_free(payload, payloadlen);
1245 return (err);
1246 }
1247 if (payloadlen != 0) {
1248 err = nvlist_unpack(payload, payloadlen, &drc->drc_begin_nvl,
1249 KM_SLEEP);
1250 kmem_free(payload, payloadlen);
1251 if (err != 0) {
1252 kmem_free(drc->drc_next_rrd,
1253 sizeof (*drc->drc_next_rrd));
1254 return (err);
1255 }
1256 }
1257
1258 if (drc->drc_drrb->drr_flags & DRR_FLAG_SPILL_BLOCK)
1259 drc->drc_spill = B_TRUE;
1260
1261 drba.drba_origin = origin;
1262 drba.drba_cookie = drc;
1263 drba.drba_cred = CRED();
1264 drba.drba_proc = curproc;
1265
1266 if (drc->drc_featureflags & DMU_BACKUP_FEATURE_RESUMING) {
1267 err = dsl_sync_task(tofs,
1268 dmu_recv_resume_begin_check, dmu_recv_resume_begin_sync,
1269 &drba, 5, ZFS_SPACE_CHECK_NORMAL);
1270 } else {
1271 /*
1272 * For non-raw, non-incremental, non-resuming receives the
1273 * user can specify encryption parameters on the command line
1274 * with "zfs recv -o". For these receives we create a dcp and
1275 * pass it to the sync task. Creating the dcp will implicitly
1276 * remove the encryption params from the localprops nvlist,
1277 * which avoids errors when trying to set these normally
1278 * read-only properties. Any other kind of receive that
1279 * attempts to set these properties will fail as a result.
1280 */
1281 if ((DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo) &
1282 DMU_BACKUP_FEATURE_RAW) == 0 &&
1283 origin == NULL && drc->drc_drrb->drr_fromguid == 0) {
1284 err = dsl_crypto_params_create_nvlist(DCP_CMD_NONE,
1285 localprops, hidden_args, &drba.drba_dcp);
1286 }
1287
1288 if (err == 0) {
1289 err = dsl_sync_task(tofs,
1290 dmu_recv_begin_check, dmu_recv_begin_sync,
1291 &drba, 5, ZFS_SPACE_CHECK_NORMAL);
1292 dsl_crypto_params_free(drba.drba_dcp, !!err);
1293 }
1294 }
1295
1296 if (err != 0) {
1297 kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd));
1298 nvlist_free(drc->drc_begin_nvl);
1299 }
1300 return (err);
1301 }
1302
1303 /*
1304 * Holds data need for corrective recv callback
1305 */
1306 typedef struct cr_cb_data {
1307 uint64_t size;
1308 zbookmark_phys_t zb;
1309 spa_t *spa;
1310 } cr_cb_data_t;
1311
1312 static void
1313 corrective_read_done(zio_t *zio)
1314 {
1315 cr_cb_data_t *data = zio->io_private;
1316 /* Corruption corrected; update error log if needed */
1317 if (zio->io_error == 0)
1318 spa_remove_error(data->spa, &data->zb);
1319 kmem_free(data, sizeof (cr_cb_data_t));
1320 abd_free(zio->io_abd);
1321 }
1322
1323 /*
1324 * zio_rewrite the data pointed to by bp with the data from the rrd's abd.
1325 */
1326 static int
1327 do_corrective_recv(struct receive_writer_arg *rwa, struct drr_write *drrw,
1328 struct receive_record_arg *rrd, blkptr_t *bp)
1329 {
1330 int err;
1331 zio_t *io;
1332 zbookmark_phys_t zb;
1333 dnode_t *dn;
1334 abd_t *abd = rrd->abd;
1335 zio_cksum_t bp_cksum = bp->blk_cksum;
1336 enum zio_flag flags = ZIO_FLAG_SPECULATIVE |
1337 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_CANFAIL;
1338
1339 if (rwa->raw)
1340 flags |= ZIO_FLAG_RAW;
1341
1342 err = dnode_hold(rwa->os, drrw->drr_object, FTAG, &dn);
1343 if (err != 0)
1344 return (err);
1345 SET_BOOKMARK(&zb, dmu_objset_id(rwa->os), drrw->drr_object, 0,
1346 dbuf_whichblock(dn, 0, drrw->drr_offset));
1347 dnode_rele(dn, FTAG);
1348
1349 if (!rwa->raw && DRR_WRITE_COMPRESSED(drrw)) {
1350 /* Decompress the stream data */
1351 abd_t *dabd = abd_alloc_linear(
1352 drrw->drr_logical_size, B_FALSE);
1353 err = zio_decompress_data(drrw->drr_compressiontype,
1354 abd, abd_to_buf(dabd), abd_get_size(abd),
1355 abd_get_size(dabd), NULL);
1356
1357 if (err != 0) {
1358 abd_free(dabd);
1359 return (err);
1360 }
1361 /* Swap in the newly decompressed data into the abd */
1362 abd_free(abd);
1363 abd = dabd;
1364 }
1365
1366 if (!rwa->raw && BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF) {
1367 /* Recompress the data */
1368 abd_t *cabd = abd_alloc_linear(BP_GET_PSIZE(bp),
1369 B_FALSE);
1370 uint64_t csize = zio_compress_data(BP_GET_COMPRESS(bp),
1371 abd, abd_to_buf(cabd), abd_get_size(abd),
1372 rwa->os->os_complevel);
1373 abd_zero_off(cabd, csize, BP_GET_PSIZE(bp) - csize);
1374 /* Swap in newly compressed data into the abd */
1375 abd_free(abd);
1376 abd = cabd;
1377 flags |= ZIO_FLAG_RAW_COMPRESS;
1378 }
1379
1380 /*
1381 * The stream is not encrypted but the data on-disk is.
1382 * We need to re-encrypt the buf using the same
1383 * encryption type, salt, iv, and mac that was used to encrypt
1384 * the block previosly.
1385 */
1386 if (!rwa->raw && BP_USES_CRYPT(bp)) {
1387 dsl_dataset_t *ds;
1388 dsl_crypto_key_t *dck = NULL;
1389 uint8_t salt[ZIO_DATA_SALT_LEN];
1390 uint8_t iv[ZIO_DATA_IV_LEN];
1391 uint8_t mac[ZIO_DATA_MAC_LEN];
1392 boolean_t no_crypt = B_FALSE;
1393 dsl_pool_t *dp = dmu_objset_pool(rwa->os);
1394 abd_t *eabd = abd_alloc_linear(BP_GET_PSIZE(bp), B_FALSE);
1395
1396 zio_crypt_decode_params_bp(bp, salt, iv);
1397 zio_crypt_decode_mac_bp(bp, mac);
1398
1399 dsl_pool_config_enter(dp, FTAG);
1400 err = dsl_dataset_hold_flags(dp, rwa->tofs,
1401 DS_HOLD_FLAG_DECRYPT, FTAG, &ds);
1402 if (err != 0) {
1403 dsl_pool_config_exit(dp, FTAG);
1404 abd_free(eabd);
1405 return (SET_ERROR(EACCES));
1406 }
1407
1408 /* Look up the key from the spa's keystore */
1409 err = spa_keystore_lookup_key(rwa->os->os_spa,
1410 zb.zb_objset, FTAG, &dck);
1411 if (err != 0) {
1412 dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT,
1413 FTAG);
1414 dsl_pool_config_exit(dp, FTAG);
1415 abd_free(eabd);
1416 return (SET_ERROR(EACCES));
1417 }
1418
1419 err = zio_do_crypt_abd(B_TRUE, &dck->dck_key,
1420 BP_GET_TYPE(bp), BP_SHOULD_BYTESWAP(bp), salt, iv,
1421 mac, abd_get_size(abd), abd, eabd, &no_crypt);
1422
1423 spa_keystore_dsl_key_rele(rwa->os->os_spa, dck, FTAG);
1424 dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
1425 dsl_pool_config_exit(dp, FTAG);
1426
1427 ASSERT0(no_crypt);
1428 if (err != 0) {
1429 abd_free(eabd);
1430 return (err);
1431 }
1432 /* Swap in the newly encrypted data into the abd */
1433 abd_free(abd);
1434 abd = eabd;
1435
1436 /*
1437 * We want to prevent zio_rewrite() from trying to
1438 * encrypt the data again
1439 */
1440 flags |= ZIO_FLAG_RAW_ENCRYPT;
1441 }
1442 rrd->abd = abd;
1443
1444 io = zio_rewrite(NULL, rwa->os->os_spa, bp->blk_birth, bp, abd,
1445 BP_GET_PSIZE(bp), NULL, NULL, ZIO_PRIORITY_SYNC_WRITE, flags, &zb);
1446
1447 ASSERT(abd_get_size(abd) == BP_GET_LSIZE(bp) ||
1448 abd_get_size(abd) == BP_GET_PSIZE(bp));
1449
1450 /* compute new bp checksum value and make sure it matches the old one */
1451 zio_checksum_compute(io, BP_GET_CHECKSUM(bp), abd, abd_get_size(abd));
1452 if (!ZIO_CHECKSUM_EQUAL(bp_cksum, io->io_bp->blk_cksum)) {
1453 zio_destroy(io);
1454 if (zfs_recv_best_effort_corrective != 0)
1455 return (0);
1456 return (SET_ERROR(ECKSUM));
1457 }
1458
1459 /* Correct the corruption in place */
1460 err = zio_wait(io);
1461 if (err == 0) {
1462 cr_cb_data_t *cb_data =
1463 kmem_alloc(sizeof (cr_cb_data_t), KM_SLEEP);
1464 cb_data->spa = rwa->os->os_spa;
1465 cb_data->size = drrw->drr_logical_size;
1466 cb_data->zb = zb;
1467 /* Test if healing worked by re-reading the bp */
1468 err = zio_wait(zio_read(rwa->heal_pio, rwa->os->os_spa, bp,
1469 abd_alloc_for_io(drrw->drr_logical_size, B_FALSE),
1470 drrw->drr_logical_size, corrective_read_done,
1471 cb_data, ZIO_PRIORITY_ASYNC_READ, flags, NULL));
1472 }
1473 if (err != 0 && zfs_recv_best_effort_corrective != 0)
1474 err = 0;
1475
1476 return (err);
1477 }
1478
1479 static int
1480 receive_read(dmu_recv_cookie_t *drc, int len, void *buf)
1481 {
1482 int done = 0;
1483
1484 /*
1485 * The code doesn't rely on this (lengths being multiples of 8). See
1486 * comment in dump_bytes.
1487 */
1488 ASSERT(len % 8 == 0 ||
1489 (drc->drc_featureflags & DMU_BACKUP_FEATURE_RAW) != 0);
1490
1491 while (done < len) {
1492 ssize_t resid;
1493 zfs_file_t *fp = drc->drc_fp;
1494 int err = zfs_file_read(fp, (char *)buf + done,
1495 len - done, &resid);
1496 if (resid == len - done) {
1497 /*
1498 * Note: ECKSUM or ZFS_ERR_STREAM_TRUNCATED indicates
1499 * that the receive was interrupted and can
1500 * potentially be resumed.
1501 */
1502 err = SET_ERROR(ZFS_ERR_STREAM_TRUNCATED);
1503 }
1504 drc->drc_voff += len - done - resid;
1505 done = len - resid;
1506 if (err != 0)
1507 return (err);
1508 }
1509
1510 drc->drc_bytes_read += len;
1511
1512 ASSERT3U(done, ==, len);
1513 return (0);
1514 }
1515
1516 static inline uint8_t
1517 deduce_nblkptr(dmu_object_type_t bonus_type, uint64_t bonus_size)
1518 {
1519 if (bonus_type == DMU_OT_SA) {
1520 return (1);
1521 } else {
1522 return (1 +
1523 ((DN_OLD_MAX_BONUSLEN -
1524 MIN(DN_OLD_MAX_BONUSLEN, bonus_size)) >> SPA_BLKPTRSHIFT));
1525 }
1526 }
1527
1528 static void
1529 save_resume_state(struct receive_writer_arg *rwa,
1530 uint64_t object, uint64_t offset, dmu_tx_t *tx)
1531 {
1532 int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
1533
1534 if (!rwa->resumable)
1535 return;
1536
1537 /*
1538 * We use ds_resume_bytes[] != 0 to indicate that we need to
1539 * update this on disk, so it must not be 0.
1540 */
1541 ASSERT(rwa->bytes_read != 0);
1542
1543 /*
1544 * We only resume from write records, which have a valid
1545 * (non-meta-dnode) object number.
1546 */
1547 ASSERT(object != 0);
1548
1549 /*
1550 * For resuming to work correctly, we must receive records in order,
1551 * sorted by object,offset. This is checked by the callers, but
1552 * assert it here for good measure.
1553 */
1554 ASSERT3U(object, >=, rwa->os->os_dsl_dataset->ds_resume_object[txgoff]);
1555 ASSERT(object != rwa->os->os_dsl_dataset->ds_resume_object[txgoff] ||
1556 offset >= rwa->os->os_dsl_dataset->ds_resume_offset[txgoff]);
1557 ASSERT3U(rwa->bytes_read, >=,
1558 rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff]);
1559
1560 rwa->os->os_dsl_dataset->ds_resume_object[txgoff] = object;
1561 rwa->os->os_dsl_dataset->ds_resume_offset[txgoff] = offset;
1562 rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff] = rwa->bytes_read;
1563 }
1564
1565 static int
1566 receive_object_is_same_generation(objset_t *os, uint64_t object,
1567 dmu_object_type_t old_bonus_type, dmu_object_type_t new_bonus_type,
1568 const void *new_bonus, boolean_t *samegenp)
1569 {
1570 zfs_file_info_t zoi;
1571 int err;
1572
1573 dmu_buf_t *old_bonus_dbuf;
1574 err = dmu_bonus_hold(os, object, FTAG, &old_bonus_dbuf);
1575 if (err != 0)
1576 return (err);
1577 err = dmu_get_file_info(os, old_bonus_type, old_bonus_dbuf->db_data,
1578 &zoi);
1579 dmu_buf_rele(old_bonus_dbuf, FTAG);
1580 if (err != 0)
1581 return (err);
1582 uint64_t old_gen = zoi.zfi_generation;
1583
1584 err = dmu_get_file_info(os, new_bonus_type, new_bonus, &zoi);
1585 if (err != 0)
1586 return (err);
1587 uint64_t new_gen = zoi.zfi_generation;
1588
1589 *samegenp = (old_gen == new_gen);
1590 return (0);
1591 }
1592
1593 static int
1594 receive_handle_existing_object(const struct receive_writer_arg *rwa,
1595 const struct drr_object *drro, const dmu_object_info_t *doi,
1596 const void *bonus_data,
1597 uint64_t *object_to_hold, uint32_t *new_blksz)
1598 {
1599 uint32_t indblksz = drro->drr_indblkshift ?
1600 1ULL << drro->drr_indblkshift : 0;
1601 int nblkptr = deduce_nblkptr(drro->drr_bonustype,
1602 drro->drr_bonuslen);
1603 uint8_t dn_slots = drro->drr_dn_slots != 0 ?
1604 drro->drr_dn_slots : DNODE_MIN_SLOTS;
1605 boolean_t do_free_range = B_FALSE;
1606 int err;
1607
1608 *object_to_hold = drro->drr_object;
1609
1610 /* nblkptr should be bounded by the bonus size and type */
1611 if (rwa->raw && nblkptr != drro->drr_nblkptr)
1612 return (SET_ERROR(EINVAL));
1613
1614 /*
1615 * After the previous send stream, the sending system may
1616 * have freed this object, and then happened to re-allocate
1617 * this object number in a later txg. In this case, we are
1618 * receiving a different logical file, and the block size may
1619 * appear to be different. i.e. we may have a different
1620 * block size for this object than what the send stream says.
1621 * In this case we need to remove the object's contents,
1622 * so that its structure can be changed and then its contents
1623 * entirely replaced by subsequent WRITE records.
1624 *
1625 * If this is a -L (--large-block) incremental stream, and
1626 * the previous stream was not -L, the block size may appear
1627 * to increase. i.e. we may have a smaller block size for
1628 * this object than what the send stream says. In this case
1629 * we need to keep the object's contents and block size
1630 * intact, so that we don't lose parts of the object's
1631 * contents that are not changed by this incremental send
1632 * stream.
1633 *
1634 * We can distinguish between the two above cases by using
1635 * the ZPL's generation number (see
1636 * receive_object_is_same_generation()). However, we only
1637 * want to rely on the generation number when absolutely
1638 * necessary, because with raw receives, the generation is
1639 * encrypted. We also want to minimize dependence on the
1640 * ZPL, so that other types of datasets can also be received
1641 * (e.g. ZVOLs, although note that ZVOLS currently do not
1642 * reallocate their objects or change their structure).
1643 * Therefore, we check a number of different cases where we
1644 * know it is safe to discard the object's contents, before
1645 * using the ZPL's generation number to make the above
1646 * distinction.
1647 */
1648 if (drro->drr_blksz != doi->doi_data_block_size) {
1649 if (rwa->raw) {
1650 /*
1651 * RAW streams always have large blocks, so
1652 * we are sure that the data is not needed
1653 * due to changing --large-block to be on.
1654 * Which is fortunate since the bonus buffer
1655 * (which contains the ZPL generation) is
1656 * encrypted, and the key might not be
1657 * loaded.
1658 */
1659 do_free_range = B_TRUE;
1660 } else if (rwa->full) {
1661 /*
1662 * This is a full send stream, so it always
1663 * replaces what we have. Even if the
1664 * generation numbers happen to match, this
1665 * can not actually be the same logical file.
1666 * This is relevant when receiving a full
1667 * send as a clone.
1668 */
1669 do_free_range = B_TRUE;
1670 } else if (drro->drr_type !=
1671 DMU_OT_PLAIN_FILE_CONTENTS ||
1672 doi->doi_type != DMU_OT_PLAIN_FILE_CONTENTS) {
1673 /*
1674 * PLAIN_FILE_CONTENTS are the only type of
1675 * objects that have ever been stored with
1676 * large blocks, so we don't need the special
1677 * logic below. ZAP blocks can shrink (when
1678 * there's only one block), so we don't want
1679 * to hit the error below about block size
1680 * only increasing.
1681 */
1682 do_free_range = B_TRUE;
1683 } else if (doi->doi_max_offset <=
1684 doi->doi_data_block_size) {
1685 /*
1686 * There is only one block. We can free it,
1687 * because its contents will be replaced by a
1688 * WRITE record. This can not be the no-L ->
1689 * -L case, because the no-L case would have
1690 * resulted in multiple blocks. If we
1691 * supported -L -> no-L, it would not be safe
1692 * to free the file's contents. Fortunately,
1693 * that is not allowed (see
1694 * recv_check_large_blocks()).
1695 */
1696 do_free_range = B_TRUE;
1697 } else {
1698 boolean_t is_same_gen;
1699 err = receive_object_is_same_generation(rwa->os,
1700 drro->drr_object, doi->doi_bonus_type,
1701 drro->drr_bonustype, bonus_data, &is_same_gen);
1702 if (err != 0)
1703 return (SET_ERROR(EINVAL));
1704
1705 if (is_same_gen) {
1706 /*
1707 * This is the same logical file, and
1708 * the block size must be increasing.
1709 * It could only decrease if
1710 * --large-block was changed to be
1711 * off, which is checked in
1712 * recv_check_large_blocks().
1713 */
1714 if (drro->drr_blksz <=
1715 doi->doi_data_block_size)
1716 return (SET_ERROR(EINVAL));
1717 /*
1718 * We keep the existing blocksize and
1719 * contents.
1720 */
1721 *new_blksz =
1722 doi->doi_data_block_size;
1723 } else {
1724 do_free_range = B_TRUE;
1725 }
1726 }
1727 }
1728
1729 /* nblkptr can only decrease if the object was reallocated */
1730 if (nblkptr < doi->doi_nblkptr)
1731 do_free_range = B_TRUE;
1732
1733 /* number of slots can only change on reallocation */
1734 if (dn_slots != doi->doi_dnodesize >> DNODE_SHIFT)
1735 do_free_range = B_TRUE;
1736
1737 /*
1738 * For raw sends we also check a few other fields to
1739 * ensure we are preserving the objset structure exactly
1740 * as it was on the receive side:
1741 * - A changed indirect block size
1742 * - A smaller nlevels
1743 */
1744 if (rwa->raw) {
1745 if (indblksz != doi->doi_metadata_block_size)
1746 do_free_range = B_TRUE;
1747 if (drro->drr_nlevels < doi->doi_indirection)
1748 do_free_range = B_TRUE;
1749 }
1750
1751 if (do_free_range) {
1752 err = dmu_free_long_range(rwa->os, drro->drr_object,
1753 0, DMU_OBJECT_END);
1754 if (err != 0)
1755 return (SET_ERROR(EINVAL));
1756 }
1757
1758 /*
1759 * The dmu does not currently support decreasing nlevels
1760 * or changing the number of dnode slots on an object. For
1761 * non-raw sends, this does not matter and the new object
1762 * can just use the previous one's nlevels. For raw sends,
1763 * however, the structure of the received dnode (including
1764 * nlevels and dnode slots) must match that of the send
1765 * side. Therefore, instead of using dmu_object_reclaim(),
1766 * we must free the object completely and call
1767 * dmu_object_claim_dnsize() instead.
1768 */
1769 if ((rwa->raw && drro->drr_nlevels < doi->doi_indirection) ||
1770 dn_slots != doi->doi_dnodesize >> DNODE_SHIFT) {
1771 err = dmu_free_long_object(rwa->os, drro->drr_object);
1772 if (err != 0)
1773 return (SET_ERROR(EINVAL));
1774
1775 txg_wait_synced(dmu_objset_pool(rwa->os), 0);
1776 *object_to_hold = DMU_NEW_OBJECT;
1777 }
1778
1779 /*
1780 * For raw receives, free everything beyond the new incoming
1781 * maxblkid. Normally this would be done with a DRR_FREE
1782 * record that would come after this DRR_OBJECT record is
1783 * processed. However, for raw receives we manually set the
1784 * maxblkid from the drr_maxblkid and so we must first free
1785 * everything above that blkid to ensure the DMU is always
1786 * consistent with itself. We will never free the first block
1787 * of the object here because a maxblkid of 0 could indicate
1788 * an object with a single block or one with no blocks. This
1789 * free may be skipped when dmu_free_long_range() was called
1790 * above since it covers the entire object's contents.
1791 */
1792 if (rwa->raw && *object_to_hold != DMU_NEW_OBJECT && !do_free_range) {
1793 err = dmu_free_long_range(rwa->os, drro->drr_object,
1794 (drro->drr_maxblkid + 1) * doi->doi_data_block_size,
1795 DMU_OBJECT_END);
1796 if (err != 0)
1797 return (SET_ERROR(EINVAL));
1798 }
1799 return (0);
1800 }
1801
1802 noinline static int
1803 receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
1804 void *data)
1805 {
1806 dmu_object_info_t doi;
1807 dmu_tx_t *tx;
1808 int err;
1809 uint32_t new_blksz = drro->drr_blksz;
1810 uint8_t dn_slots = drro->drr_dn_slots != 0 ?
1811 drro->drr_dn_slots : DNODE_MIN_SLOTS;
1812
1813 if (drro->drr_type == DMU_OT_NONE ||
1814 !DMU_OT_IS_VALID(drro->drr_type) ||
1815 !DMU_OT_IS_VALID(drro->drr_bonustype) ||
1816 drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS ||
1817 drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS ||
1818 P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) ||
1819 drro->drr_blksz < SPA_MINBLOCKSIZE ||
1820 drro->drr_blksz > spa_maxblocksize(dmu_objset_spa(rwa->os)) ||
1821 drro->drr_bonuslen >
1822 DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(rwa->os))) ||
1823 dn_slots >
1824 (spa_maxdnodesize(dmu_objset_spa(rwa->os)) >> DNODE_SHIFT)) {
1825 return (SET_ERROR(EINVAL));
1826 }
1827
1828 if (rwa->raw) {
1829 /*
1830 * We should have received a DRR_OBJECT_RANGE record
1831 * containing this block and stored it in rwa.
1832 */
1833 if (drro->drr_object < rwa->or_firstobj ||
1834 drro->drr_object >= rwa->or_firstobj + rwa->or_numslots ||
1835 drro->drr_raw_bonuslen < drro->drr_bonuslen ||
1836 drro->drr_indblkshift > SPA_MAXBLOCKSHIFT ||
1837 drro->drr_nlevels > DN_MAX_LEVELS ||
1838 drro->drr_nblkptr > DN_MAX_NBLKPTR ||
1839 DN_SLOTS_TO_BONUSLEN(dn_slots) <
1840 drro->drr_raw_bonuslen)
1841 return (SET_ERROR(EINVAL));
1842 } else {
1843 /*
1844 * The DRR_OBJECT_SPILL flag is valid when the DRR_BEGIN
1845 * record indicates this by setting DRR_FLAG_SPILL_BLOCK.
1846 */
1847 if (((drro->drr_flags & ~(DRR_OBJECT_SPILL))) ||
1848 (!rwa->spill && DRR_OBJECT_HAS_SPILL(drro->drr_flags))) {
1849 return (SET_ERROR(EINVAL));
1850 }
1851
1852 if (drro->drr_raw_bonuslen != 0 || drro->drr_nblkptr != 0 ||
1853 drro->drr_indblkshift != 0 || drro->drr_nlevels != 0) {
1854 return (SET_ERROR(EINVAL));
1855 }
1856 }
1857
1858 err = dmu_object_info(rwa->os, drro->drr_object, &doi);
1859
1860 if (err != 0 && err != ENOENT && err != EEXIST)
1861 return (SET_ERROR(EINVAL));
1862
1863 if (drro->drr_object > rwa->max_object)
1864 rwa->max_object = drro->drr_object;
1865
1866 /*
1867 * If we are losing blkptrs or changing the block size this must
1868 * be a new file instance. We must clear out the previous file
1869 * contents before we can change this type of metadata in the dnode.
1870 * Raw receives will also check that the indirect structure of the
1871 * dnode hasn't changed.
1872 */
1873 uint64_t object_to_hold;
1874 if (err == 0) {
1875 err = receive_handle_existing_object(rwa, drro, &doi, data,
1876 &object_to_hold, &new_blksz);
1877 if (err != 0)
1878 return (err);
1879 } else if (err == EEXIST) {
1880 /*
1881 * The object requested is currently an interior slot of a
1882 * multi-slot dnode. This will be resolved when the next txg
1883 * is synced out, since the send stream will have told us
1884 * to free this slot when we freed the associated dnode
1885 * earlier in the stream.
1886 */
1887 txg_wait_synced(dmu_objset_pool(rwa->os), 0);
1888
1889 if (dmu_object_info(rwa->os, drro->drr_object, NULL) != ENOENT)
1890 return (SET_ERROR(EINVAL));
1891
1892 /* object was freed and we are about to allocate a new one */
1893 object_to_hold = DMU_NEW_OBJECT;
1894 } else {
1895 /* object is free and we are about to allocate a new one */
1896 object_to_hold = DMU_NEW_OBJECT;
1897 }
1898
1899 /*
1900 * If this is a multi-slot dnode there is a chance that this
1901 * object will expand into a slot that is already used by
1902 * another object from the previous snapshot. We must free
1903 * these objects before we attempt to allocate the new dnode.
1904 */
1905 if (dn_slots > 1) {
1906 boolean_t need_sync = B_FALSE;
1907
1908 for (uint64_t slot = drro->drr_object + 1;
1909 slot < drro->drr_object + dn_slots;
1910 slot++) {
1911 dmu_object_info_t slot_doi;
1912
1913 err = dmu_object_info(rwa->os, slot, &slot_doi);
1914 if (err == ENOENT || err == EEXIST)
1915 continue;
1916 else if (err != 0)
1917 return (err);
1918
1919 err = dmu_free_long_object(rwa->os, slot);
1920 if (err != 0)
1921 return (err);
1922
1923 need_sync = B_TRUE;
1924 }
1925
1926 if (need_sync)
1927 txg_wait_synced(dmu_objset_pool(rwa->os), 0);
1928 }
1929
1930 tx = dmu_tx_create(rwa->os);
1931 dmu_tx_hold_bonus(tx, object_to_hold);
1932 dmu_tx_hold_write(tx, object_to_hold, 0, 0);
1933 err = dmu_tx_assign(tx, TXG_WAIT);
1934 if (err != 0) {
1935 dmu_tx_abort(tx);
1936 return (err);
1937 }
1938
1939 if (object_to_hold == DMU_NEW_OBJECT) {
1940 /* Currently free, wants to be allocated */
1941 err = dmu_object_claim_dnsize(rwa->os, drro->drr_object,
1942 drro->drr_type, new_blksz,
1943 drro->drr_bonustype, drro->drr_bonuslen,
1944 dn_slots << DNODE_SHIFT, tx);
1945 } else if (drro->drr_type != doi.doi_type ||
1946 new_blksz != doi.doi_data_block_size ||
1947 drro->drr_bonustype != doi.doi_bonus_type ||
1948 drro->drr_bonuslen != doi.doi_bonus_size) {
1949 /* Currently allocated, but with different properties */
1950 err = dmu_object_reclaim_dnsize(rwa->os, drro->drr_object,
1951 drro->drr_type, new_blksz,
1952 drro->drr_bonustype, drro->drr_bonuslen,
1953 dn_slots << DNODE_SHIFT, rwa->spill ?
1954 DRR_OBJECT_HAS_SPILL(drro->drr_flags) : B_FALSE, tx);
1955 } else if (rwa->spill && !DRR_OBJECT_HAS_SPILL(drro->drr_flags)) {
1956 /*
1957 * Currently allocated, the existing version of this object
1958 * may reference a spill block that is no longer allocated
1959 * at the source and needs to be freed.
1960 */
1961 err = dmu_object_rm_spill(rwa->os, drro->drr_object, tx);
1962 }
1963
1964 if (err != 0) {
1965 dmu_tx_commit(tx);
1966 return (SET_ERROR(EINVAL));
1967 }
1968
1969 if (rwa->or_crypt_params_present) {
1970 /*
1971 * Set the crypt params for the buffer associated with this
1972 * range of dnodes. This causes the blkptr_t to have the
1973 * same crypt params (byteorder, salt, iv, mac) as on the
1974 * sending side.
1975 *
1976 * Since we are committing this tx now, it is possible for
1977 * the dnode block to end up on-disk with the incorrect MAC,
1978 * if subsequent objects in this block are received in a
1979 * different txg. However, since the dataset is marked as
1980 * inconsistent, no code paths will do a non-raw read (or
1981 * decrypt the block / verify the MAC). The receive code and
1982 * scrub code can safely do raw reads and verify the
1983 * checksum. They don't need to verify the MAC.
1984 */
1985 dmu_buf_t *db = NULL;
1986 uint64_t offset = rwa->or_firstobj * DNODE_MIN_SIZE;
1987
1988 err = dmu_buf_hold_by_dnode(DMU_META_DNODE(rwa->os),
1989 offset, FTAG, &db, DMU_READ_PREFETCH | DMU_READ_NO_DECRYPT);
1990 if (err != 0) {
1991 dmu_tx_commit(tx);
1992 return (SET_ERROR(EINVAL));
1993 }
1994
1995 dmu_buf_set_crypt_params(db, rwa->or_byteorder,
1996 rwa->or_salt, rwa->or_iv, rwa->or_mac, tx);
1997
1998 dmu_buf_rele(db, FTAG);
1999
2000 rwa->or_crypt_params_present = B_FALSE;
2001 }
2002
2003 dmu_object_set_checksum(rwa->os, drro->drr_object,
2004 drro->drr_checksumtype, tx);
2005 dmu_object_set_compress(rwa->os, drro->drr_object,
2006 drro->drr_compress, tx);
2007
2008 /* handle more restrictive dnode structuring for raw recvs */
2009 if (rwa->raw) {
2010 /*
2011 * Set the indirect block size, block shift, nlevels.
2012 * This will not fail because we ensured all of the
2013 * blocks were freed earlier if this is a new object.
2014 * For non-new objects block size and indirect block
2015 * shift cannot change and nlevels can only increase.
2016 */
2017 ASSERT3U(new_blksz, ==, drro->drr_blksz);
2018 VERIFY0(dmu_object_set_blocksize(rwa->os, drro->drr_object,
2019 drro->drr_blksz, drro->drr_indblkshift, tx));
2020 VERIFY0(dmu_object_set_nlevels(rwa->os, drro->drr_object,
2021 drro->drr_nlevels, tx));
2022
2023 /*
2024 * Set the maxblkid. This will always succeed because
2025 * we freed all blocks beyond the new maxblkid above.
2026 */
2027 VERIFY0(dmu_object_set_maxblkid(rwa->os, drro->drr_object,
2028 drro->drr_maxblkid, tx));
2029 }
2030
2031 if (data != NULL) {
2032 dmu_buf_t *db;
2033 dnode_t *dn;
2034 uint32_t flags = DMU_READ_NO_PREFETCH;
2035
2036 if (rwa->raw)
2037 flags |= DMU_READ_NO_DECRYPT;
2038
2039 VERIFY0(dnode_hold(rwa->os, drro->drr_object, FTAG, &dn));
2040 VERIFY0(dmu_bonus_hold_by_dnode(dn, FTAG, &db, flags));
2041
2042 dmu_buf_will_dirty(db, tx);
2043
2044 ASSERT3U(db->db_size, >=, drro->drr_bonuslen);
2045 memcpy(db->db_data, data, DRR_OBJECT_PAYLOAD_SIZE(drro));
2046
2047 /*
2048 * Raw bonus buffers have their byteorder determined by the
2049 * DRR_OBJECT_RANGE record.
2050 */
2051 if (rwa->byteswap && !rwa->raw) {
2052 dmu_object_byteswap_t byteswap =
2053 DMU_OT_BYTESWAP(drro->drr_bonustype);
2054 dmu_ot_byteswap[byteswap].ob_func(db->db_data,
2055 DRR_OBJECT_PAYLOAD_SIZE(drro));
2056 }
2057 dmu_buf_rele(db, FTAG);
2058 dnode_rele(dn, FTAG);
2059 }
2060 dmu_tx_commit(tx);
2061
2062 return (0);
2063 }
2064
2065 noinline static int
2066 receive_freeobjects(struct receive_writer_arg *rwa,
2067 struct drr_freeobjects *drrfo)
2068 {
2069 uint64_t obj;
2070 int next_err = 0;
2071
2072 if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj)
2073 return (SET_ERROR(EINVAL));
2074
2075 for (obj = drrfo->drr_firstobj == 0 ? 1 : drrfo->drr_firstobj;
2076 obj < drrfo->drr_firstobj + drrfo->drr_numobjs &&
2077 obj < DN_MAX_OBJECT && next_err == 0;
2078 next_err = dmu_object_next(rwa->os, &obj, FALSE, 0)) {
2079 dmu_object_info_t doi;
2080 int err;
2081
2082 err = dmu_object_info(rwa->os, obj, &doi);
2083 if (err == ENOENT)
2084 continue;
2085 else if (err != 0)
2086 return (err);
2087
2088 err = dmu_free_long_object(rwa->os, obj);
2089
2090 if (err != 0)
2091 return (err);
2092 }
2093 if (next_err != ESRCH)
2094 return (next_err);
2095 return (0);
2096 }
2097
2098 /*
2099 * Note: if this fails, the caller will clean up any records left on the
2100 * rwa->write_batch list.
2101 */
2102 static int
2103 flush_write_batch_impl(struct receive_writer_arg *rwa)
2104 {
2105 dnode_t *dn;
2106 int err;
2107
2108 if (dnode_hold(rwa->os, rwa->last_object, FTAG, &dn) != 0)
2109 return (SET_ERROR(EINVAL));
2110
2111 struct receive_record_arg *last_rrd = list_tail(&rwa->write_batch);
2112 struct drr_write *last_drrw = &last_rrd->header.drr_u.drr_write;
2113
2114 struct receive_record_arg *first_rrd = list_head(&rwa->write_batch);
2115 struct drr_write *first_drrw = &first_rrd->header.drr_u.drr_write;
2116
2117 ASSERT3U(rwa->last_object, ==, last_drrw->drr_object);
2118 ASSERT3U(rwa->last_offset, ==, last_drrw->drr_offset);
2119
2120 dmu_tx_t *tx = dmu_tx_create(rwa->os);
2121 dmu_tx_hold_write_by_dnode(tx, dn, first_drrw->drr_offset,
2122 last_drrw->drr_offset - first_drrw->drr_offset +
2123 last_drrw->drr_logical_size);
2124 err = dmu_tx_assign(tx, TXG_WAIT);
2125 if (err != 0) {
2126 dmu_tx_abort(tx);
2127 dnode_rele(dn, FTAG);
2128 return (err);
2129 }
2130
2131 struct receive_record_arg *rrd;
2132 while ((rrd = list_head(&rwa->write_batch)) != NULL) {
2133 struct drr_write *drrw = &rrd->header.drr_u.drr_write;
2134 abd_t *abd = rrd->abd;
2135
2136 ASSERT3U(drrw->drr_object, ==, rwa->last_object);
2137
2138 if (drrw->drr_logical_size != dn->dn_datablksz) {
2139 /*
2140 * The WRITE record is larger than the object's block
2141 * size. We must be receiving an incremental
2142 * large-block stream into a dataset that previously did
2143 * a non-large-block receive. Lightweight writes must
2144 * be exactly one block, so we need to decompress the
2145 * data (if compressed) and do a normal dmu_write().
2146 */
2147 ASSERT3U(drrw->drr_logical_size, >, dn->dn_datablksz);
2148 if (DRR_WRITE_COMPRESSED(drrw)) {
2149 abd_t *decomp_abd =
2150 abd_alloc_linear(drrw->drr_logical_size,
2151 B_FALSE);
2152
2153 err = zio_decompress_data(
2154 drrw->drr_compressiontype,
2155 abd, abd_to_buf(decomp_abd),
2156 abd_get_size(abd),
2157 abd_get_size(decomp_abd), NULL);
2158
2159 if (err == 0) {
2160 dmu_write_by_dnode(dn,
2161 drrw->drr_offset,
2162 drrw->drr_logical_size,
2163 abd_to_buf(decomp_abd), tx);
2164 }
2165 abd_free(decomp_abd);
2166 } else {
2167 dmu_write_by_dnode(dn,
2168 drrw->drr_offset,
2169 drrw->drr_logical_size,
2170 abd_to_buf(abd), tx);
2171 }
2172 if (err == 0)
2173 abd_free(abd);
2174 } else {
2175 zio_prop_t zp;
2176 dmu_write_policy(rwa->os, dn, 0, 0, &zp);
2177
2178 enum zio_flag zio_flags = 0;
2179
2180 if (rwa->raw) {
2181 zp.zp_encrypt = B_TRUE;
2182 zp.zp_compress = drrw->drr_compressiontype;
2183 zp.zp_byteorder = ZFS_HOST_BYTEORDER ^
2184 !!DRR_IS_RAW_BYTESWAPPED(drrw->drr_flags) ^
2185 rwa->byteswap;
2186 memcpy(zp.zp_salt, drrw->drr_salt,
2187 ZIO_DATA_SALT_LEN);
2188 memcpy(zp.zp_iv, drrw->drr_iv,
2189 ZIO_DATA_IV_LEN);
2190 memcpy(zp.zp_mac, drrw->drr_mac,
2191 ZIO_DATA_MAC_LEN);
2192 if (DMU_OT_IS_ENCRYPTED(zp.zp_type)) {
2193 zp.zp_nopwrite = B_FALSE;
2194 zp.zp_copies = MIN(zp.zp_copies,
2195 SPA_DVAS_PER_BP - 1);
2196 }
2197 zio_flags |= ZIO_FLAG_RAW;
2198 } else if (DRR_WRITE_COMPRESSED(drrw)) {
2199 ASSERT3U(drrw->drr_compressed_size, >, 0);
2200 ASSERT3U(drrw->drr_logical_size, >=,
2201 drrw->drr_compressed_size);
2202 zp.zp_compress = drrw->drr_compressiontype;
2203 zio_flags |= ZIO_FLAG_RAW_COMPRESS;
2204 } else if (rwa->byteswap) {
2205 /*
2206 * Note: compressed blocks never need to be
2207 * byteswapped, because WRITE records for
2208 * metadata blocks are never compressed. The
2209 * exception is raw streams, which are written
2210 * in the original byteorder, and the byteorder
2211 * bit is preserved in the BP by setting
2212 * zp_byteorder above.
2213 */
2214 dmu_object_byteswap_t byteswap =
2215 DMU_OT_BYTESWAP(drrw->drr_type);
2216 dmu_ot_byteswap[byteswap].ob_func(
2217 abd_to_buf(abd),
2218 DRR_WRITE_PAYLOAD_SIZE(drrw));
2219 }
2220
2221 /*
2222 * Since this data can't be read until the receive
2223 * completes, we can do a "lightweight" write for
2224 * improved performance.
2225 */
2226 err = dmu_lightweight_write_by_dnode(dn,
2227 drrw->drr_offset, abd, &zp, zio_flags, tx);
2228 }
2229
2230 if (err != 0) {
2231 /*
2232 * This rrd is left on the list, so the caller will
2233 * free it (and the abd).
2234 */
2235 break;
2236 }
2237
2238 /*
2239 * Note: If the receive fails, we want the resume stream to
2240 * start with the same record that we last successfully
2241 * received (as opposed to the next record), so that we can
2242 * verify that we are resuming from the correct location.
2243 */
2244 save_resume_state(rwa, drrw->drr_object, drrw->drr_offset, tx);
2245
2246 list_remove(&rwa->write_batch, rrd);
2247 kmem_free(rrd, sizeof (*rrd));
2248 }
2249
2250 dmu_tx_commit(tx);
2251 dnode_rele(dn, FTAG);
2252 return (err);
2253 }
2254
2255 noinline static int
2256 flush_write_batch(struct receive_writer_arg *rwa)
2257 {
2258 if (list_is_empty(&rwa->write_batch))
2259 return (0);
2260 int err = rwa->err;
2261 if (err == 0)
2262 err = flush_write_batch_impl(rwa);
2263 if (err != 0) {
2264 struct receive_record_arg *rrd;
2265 while ((rrd = list_remove_head(&rwa->write_batch)) != NULL) {
2266 abd_free(rrd->abd);
2267 kmem_free(rrd, sizeof (*rrd));
2268 }
2269 }
2270 ASSERT(list_is_empty(&rwa->write_batch));
2271 return (err);
2272 }
2273
2274 noinline static int
2275 receive_process_write_record(struct receive_writer_arg *rwa,
2276 struct receive_record_arg *rrd)
2277 {
2278 int err = 0;
2279
2280 ASSERT3U(rrd->header.drr_type, ==, DRR_WRITE);
2281 struct drr_write *drrw = &rrd->header.drr_u.drr_write;
2282
2283 if (drrw->drr_offset + drrw->drr_logical_size < drrw->drr_offset ||
2284 !DMU_OT_IS_VALID(drrw->drr_type))
2285 return (SET_ERROR(EINVAL));
2286
2287 if (rwa->heal) {
2288 blkptr_t *bp;
2289 dmu_buf_t *dbp;
2290 dnode_t *dn;
2291 int flags = DB_RF_CANFAIL;
2292
2293 if (rwa->raw)
2294 flags |= DB_RF_NO_DECRYPT;
2295
2296 if (rwa->byteswap) {
2297 dmu_object_byteswap_t byteswap =
2298 DMU_OT_BYTESWAP(drrw->drr_type);
2299 dmu_ot_byteswap[byteswap].ob_func(abd_to_buf(rrd->abd),
2300 DRR_WRITE_PAYLOAD_SIZE(drrw));
2301 }
2302
2303 err = dmu_buf_hold_noread(rwa->os, drrw->drr_object,
2304 drrw->drr_offset, FTAG, &dbp);
2305 if (err != 0)
2306 return (err);
2307
2308 /* Try to read the object to see if it needs healing */
2309 err = dbuf_read((dmu_buf_impl_t *)dbp, NULL, flags);
2310 /*
2311 * We only try to heal when dbuf_read() returns a ECKSUMs.
2312 * Other errors (even EIO) get returned to caller.
2313 * EIO indicates that the device is not present/accessible,
2314 * so writing to it will likely fail.
2315 * If the block is healthy, we don't want to overwrite it
2316 * unnecessarily.
2317 */
2318 if (err != ECKSUM) {
2319 dmu_buf_rele(dbp, FTAG);
2320 return (err);
2321 }
2322 dn = dmu_buf_dnode_enter(dbp);
2323 /* Make sure the on-disk block and recv record sizes match */
2324 if (drrw->drr_logical_size !=
2325 dn->dn_datablkszsec << SPA_MINBLOCKSHIFT) {
2326 err = ENOTSUP;
2327 dmu_buf_dnode_exit(dbp);
2328 dmu_buf_rele(dbp, FTAG);
2329 return (err);
2330 }
2331 /* Get the block pointer for the corrupted block */
2332 bp = dmu_buf_get_blkptr(dbp);
2333 err = do_corrective_recv(rwa, drrw, rrd, bp);
2334 dmu_buf_dnode_exit(dbp);
2335 dmu_buf_rele(dbp, FTAG);
2336 return (err);
2337 }
2338
2339 /*
2340 * For resuming to work, records must be in increasing order
2341 * by (object, offset).
2342 */
2343 if (drrw->drr_object < rwa->last_object ||
2344 (drrw->drr_object == rwa->last_object &&
2345 drrw->drr_offset < rwa->last_offset)) {
2346 return (SET_ERROR(EINVAL));
2347 }
2348
2349 struct receive_record_arg *first_rrd = list_head(&rwa->write_batch);
2350 struct drr_write *first_drrw = &first_rrd->header.drr_u.drr_write;
2351 uint64_t batch_size =
2352 MIN(zfs_recv_write_batch_size, DMU_MAX_ACCESS / 2);
2353 if (first_rrd != NULL &&
2354 (drrw->drr_object != first_drrw->drr_object ||
2355 drrw->drr_offset >= first_drrw->drr_offset + batch_size)) {
2356 err = flush_write_batch(rwa);
2357 if (err != 0)
2358 return (err);
2359 }
2360
2361 rwa->last_object = drrw->drr_object;
2362 rwa->last_offset = drrw->drr_offset;
2363
2364 if (rwa->last_object > rwa->max_object)
2365 rwa->max_object = rwa->last_object;
2366
2367 list_insert_tail(&rwa->write_batch, rrd);
2368 /*
2369 * Return EAGAIN to indicate that we will use this rrd again,
2370 * so the caller should not free it
2371 */
2372 return (EAGAIN);
2373 }
2374
2375 static int
2376 receive_write_embedded(struct receive_writer_arg *rwa,
2377 struct drr_write_embedded *drrwe, void *data)
2378 {
2379 dmu_tx_t *tx;
2380 int err;
2381
2382 if (drrwe->drr_offset + drrwe->drr_length < drrwe->drr_offset)
2383 return (SET_ERROR(EINVAL));
2384
2385 if (drrwe->drr_psize > BPE_PAYLOAD_SIZE)
2386 return (SET_ERROR(EINVAL));
2387
2388 if (drrwe->drr_etype >= NUM_BP_EMBEDDED_TYPES)
2389 return (SET_ERROR(EINVAL));
2390 if (drrwe->drr_compression >= ZIO_COMPRESS_FUNCTIONS)
2391 return (SET_ERROR(EINVAL));
2392 if (rwa->raw)
2393 return (SET_ERROR(EINVAL));
2394
2395 if (drrwe->drr_object > rwa->max_object)
2396 rwa->max_object = drrwe->drr_object;
2397
2398 tx = dmu_tx_create(rwa->os);
2399
2400 dmu_tx_hold_write(tx, drrwe->drr_object,
2401 drrwe->drr_offset, drrwe->drr_length);
2402 err = dmu_tx_assign(tx, TXG_WAIT);
2403 if (err != 0) {
2404 dmu_tx_abort(tx);
2405 return (err);
2406 }
2407
2408 dmu_write_embedded(rwa->os, drrwe->drr_object,
2409 drrwe->drr_offset, data, drrwe->drr_etype,
2410 drrwe->drr_compression, drrwe->drr_lsize, drrwe->drr_psize,
2411 rwa->byteswap ^ ZFS_HOST_BYTEORDER, tx);
2412
2413 /* See comment in restore_write. */
2414 save_resume_state(rwa, drrwe->drr_object, drrwe->drr_offset, tx);
2415 dmu_tx_commit(tx);
2416 return (0);
2417 }
2418
2419 static int
2420 receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs,
2421 abd_t *abd)
2422 {
2423 dmu_buf_t *db, *db_spill;
2424 int err;
2425
2426 if (drrs->drr_length < SPA_MINBLOCKSIZE ||
2427 drrs->drr_length > spa_maxblocksize(dmu_objset_spa(rwa->os)))
2428 return (SET_ERROR(EINVAL));
2429
2430 /*
2431 * This is an unmodified spill block which was added to the stream
2432 * to resolve an issue with incorrectly removing spill blocks. It
2433 * should be ignored by current versions of the code which support
2434 * the DRR_FLAG_SPILL_BLOCK flag.
2435 */
2436 if (rwa->spill && DRR_SPILL_IS_UNMODIFIED(drrs->drr_flags)) {
2437 abd_free(abd);
2438 return (0);
2439 }
2440
2441 if (rwa->raw) {
2442 if (!DMU_OT_IS_VALID(drrs->drr_type) ||
2443 drrs->drr_compressiontype >= ZIO_COMPRESS_FUNCTIONS ||
2444 drrs->drr_compressed_size == 0)
2445 return (SET_ERROR(EINVAL));
2446 }
2447
2448 if (dmu_object_info(rwa->os, drrs->drr_object, NULL) != 0)
2449 return (SET_ERROR(EINVAL));
2450
2451 if (drrs->drr_object > rwa->max_object)
2452 rwa->max_object = drrs->drr_object;
2453
2454 VERIFY0(dmu_bonus_hold(rwa->os, drrs->drr_object, FTAG, &db));
2455 if ((err = dmu_spill_hold_by_bonus(db, DMU_READ_NO_DECRYPT, FTAG,
2456 &db_spill)) != 0) {
2457 dmu_buf_rele(db, FTAG);
2458 return (err);
2459 }
2460
2461 dmu_tx_t *tx = dmu_tx_create(rwa->os);
2462
2463 dmu_tx_hold_spill(tx, db->db_object);
2464
2465 err = dmu_tx_assign(tx, TXG_WAIT);
2466 if (err != 0) {
2467 dmu_buf_rele(db, FTAG);
2468 dmu_buf_rele(db_spill, FTAG);
2469 dmu_tx_abort(tx);
2470 return (err);
2471 }
2472
2473 /*
2474 * Spill blocks may both grow and shrink. When a change in size
2475 * occurs any existing dbuf must be updated to match the logical
2476 * size of the provided arc_buf_t.
2477 */
2478 if (db_spill->db_size != drrs->drr_length) {
2479 dmu_buf_will_fill(db_spill, tx);
2480 VERIFY0(dbuf_spill_set_blksz(db_spill,
2481 drrs->drr_length, tx));
2482 }
2483
2484 arc_buf_t *abuf;
2485 if (rwa->raw) {
2486 boolean_t byteorder = ZFS_HOST_BYTEORDER ^
2487 !!DRR_IS_RAW_BYTESWAPPED(drrs->drr_flags) ^
2488 rwa->byteswap;
2489
2490 abuf = arc_loan_raw_buf(dmu_objset_spa(rwa->os),
2491 drrs->drr_object, byteorder, drrs->drr_salt,
2492 drrs->drr_iv, drrs->drr_mac, drrs->drr_type,
2493 drrs->drr_compressed_size, drrs->drr_length,
2494 drrs->drr_compressiontype, 0);
2495 } else {
2496 abuf = arc_loan_buf(dmu_objset_spa(rwa->os),
2497 DMU_OT_IS_METADATA(drrs->drr_type),
2498 drrs->drr_length);
2499 if (rwa->byteswap) {
2500 dmu_object_byteswap_t byteswap =
2501 DMU_OT_BYTESWAP(drrs->drr_type);
2502 dmu_ot_byteswap[byteswap].ob_func(abd_to_buf(abd),
2503 DRR_SPILL_PAYLOAD_SIZE(drrs));
2504 }
2505 }
2506
2507 memcpy(abuf->b_data, abd_to_buf(abd), DRR_SPILL_PAYLOAD_SIZE(drrs));
2508 abd_free(abd);
2509 dbuf_assign_arcbuf((dmu_buf_impl_t *)db_spill, abuf, tx);
2510
2511 dmu_buf_rele(db, FTAG);
2512 dmu_buf_rele(db_spill, FTAG);
2513
2514 dmu_tx_commit(tx);
2515 return (0);
2516 }
2517
2518 noinline static int
2519 receive_free(struct receive_writer_arg *rwa, struct drr_free *drrf)
2520 {
2521 int err;
2522
2523 if (drrf->drr_length != -1ULL &&
2524 drrf->drr_offset + drrf->drr_length < drrf->drr_offset)
2525 return (SET_ERROR(EINVAL));
2526
2527 if (dmu_object_info(rwa->os, drrf->drr_object, NULL) != 0)
2528 return (SET_ERROR(EINVAL));
2529
2530 if (drrf->drr_object > rwa->max_object)
2531 rwa->max_object = drrf->drr_object;
2532
2533 err = dmu_free_long_range(rwa->os, drrf->drr_object,
2534 drrf->drr_offset, drrf->drr_length);
2535
2536 return (err);
2537 }
2538
2539 static int
2540 receive_object_range(struct receive_writer_arg *rwa,
2541 struct drr_object_range *drror)
2542 {
2543 /*
2544 * By default, we assume this block is in our native format
2545 * (ZFS_HOST_BYTEORDER). We then take into account whether
2546 * the send stream is byteswapped (rwa->byteswap). Finally,
2547 * we need to byteswap again if this particular block was
2548 * in non-native format on the send side.
2549 */
2550 boolean_t byteorder = ZFS_HOST_BYTEORDER ^ rwa->byteswap ^
2551 !!DRR_IS_RAW_BYTESWAPPED(drror->drr_flags);
2552
2553 /*
2554 * Since dnode block sizes are constant, we should not need to worry
2555 * about making sure that the dnode block size is the same on the
2556 * sending and receiving sides for the time being. For non-raw sends,
2557 * this does not matter (and in fact we do not send a DRR_OBJECT_RANGE
2558 * record at all). Raw sends require this record type because the
2559 * encryption parameters are used to protect an entire block of bonus
2560 * buffers. If the size of dnode blocks ever becomes variable,
2561 * handling will need to be added to ensure that dnode block sizes
2562 * match on the sending and receiving side.
2563 */
2564 if (drror->drr_numslots != DNODES_PER_BLOCK ||
2565 P2PHASE(drror->drr_firstobj, DNODES_PER_BLOCK) != 0 ||
2566 !rwa->raw)
2567 return (SET_ERROR(EINVAL));
2568
2569 if (drror->drr_firstobj > rwa->max_object)
2570 rwa->max_object = drror->drr_firstobj;
2571
2572 /*
2573 * The DRR_OBJECT_RANGE handling must be deferred to receive_object()
2574 * so that the block of dnodes is not written out when it's empty,
2575 * and converted to a HOLE BP.
2576 */
2577 rwa->or_crypt_params_present = B_TRUE;
2578 rwa->or_firstobj = drror->drr_firstobj;
2579 rwa->or_numslots = drror->drr_numslots;
2580 memcpy(rwa->or_salt, drror->drr_salt, ZIO_DATA_SALT_LEN);
2581 memcpy(rwa->or_iv, drror->drr_iv, ZIO_DATA_IV_LEN);
2582 memcpy(rwa->or_mac, drror->drr_mac, ZIO_DATA_MAC_LEN);
2583 rwa->or_byteorder = byteorder;
2584
2585 return (0);
2586 }
2587
2588 /*
2589 * Until we have the ability to redact large ranges of data efficiently, we
2590 * process these records as frees.
2591 */
2592 noinline static int
2593 receive_redact(struct receive_writer_arg *rwa, struct drr_redact *drrr)
2594 {
2595 struct drr_free drrf = {0};
2596 drrf.drr_length = drrr->drr_length;
2597 drrf.drr_object = drrr->drr_object;
2598 drrf.drr_offset = drrr->drr_offset;
2599 drrf.drr_toguid = drrr->drr_toguid;
2600 return (receive_free(rwa, &drrf));
2601 }
2602
2603 /* used to destroy the drc_ds on error */
2604 static void
2605 dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc)
2606 {
2607 dsl_dataset_t *ds = drc->drc_ds;
2608 ds_hold_flags_t dsflags;
2609
2610 dsflags = (drc->drc_raw) ? DS_HOLD_FLAG_NONE : DS_HOLD_FLAG_DECRYPT;
2611 /*
2612 * Wait for the txg sync before cleaning up the receive. For
2613 * resumable receives, this ensures that our resume state has
2614 * been written out to disk. For raw receives, this ensures
2615 * that the user accounting code will not attempt to do anything
2616 * after we stopped receiving the dataset.
2617 */
2618 txg_wait_synced(ds->ds_dir->dd_pool, 0);
2619 ds->ds_objset->os_raw_receive = B_FALSE;
2620
2621 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
2622 if (drc->drc_resumable && drc->drc_should_save &&
2623 !BP_IS_HOLE(dsl_dataset_get_blkptr(ds))) {
2624 rrw_exit(&ds->ds_bp_rwlock, FTAG);
2625 dsl_dataset_disown(ds, dsflags, dmu_recv_tag);
2626 } else {
2627 char name[ZFS_MAX_DATASET_NAME_LEN];
2628 rrw_exit(&ds->ds_bp_rwlock, FTAG);
2629 dsl_dataset_name(ds, name);
2630 dsl_dataset_disown(ds, dsflags, dmu_recv_tag);
2631 if (!drc->drc_heal)
2632 (void) dsl_destroy_head(name);
2633 }
2634 }
2635
2636 static void
2637 receive_cksum(dmu_recv_cookie_t *drc, int len, void *buf)
2638 {
2639 if (drc->drc_byteswap) {
2640 (void) fletcher_4_incremental_byteswap(buf, len,
2641 &drc->drc_cksum);
2642 } else {
2643 (void) fletcher_4_incremental_native(buf, len, &drc->drc_cksum);
2644 }
2645 }
2646
2647 /*
2648 * Read the payload into a buffer of size len, and update the current record's
2649 * payload field.
2650 * Allocate drc->drc_next_rrd and read the next record's header into
2651 * drc->drc_next_rrd->header.
2652 * Verify checksum of payload and next record.
2653 */
2654 static int
2655 receive_read_payload_and_next_header(dmu_recv_cookie_t *drc, int len, void *buf)
2656 {
2657 int err;
2658
2659 if (len != 0) {
2660 ASSERT3U(len, <=, SPA_MAXBLOCKSIZE);
2661 err = receive_read(drc, len, buf);
2662 if (err != 0)
2663 return (err);
2664 receive_cksum(drc, len, buf);
2665
2666 /* note: rrd is NULL when reading the begin record's payload */
2667 if (drc->drc_rrd != NULL) {
2668 drc->drc_rrd->payload = buf;
2669 drc->drc_rrd->payload_size = len;
2670 drc->drc_rrd->bytes_read = drc->drc_bytes_read;
2671 }
2672 } else {
2673 ASSERT3P(buf, ==, NULL);
2674 }
2675
2676 drc->drc_prev_cksum = drc->drc_cksum;
2677
2678 drc->drc_next_rrd = kmem_zalloc(sizeof (*drc->drc_next_rrd), KM_SLEEP);
2679 err = receive_read(drc, sizeof (drc->drc_next_rrd->header),
2680 &drc->drc_next_rrd->header);
2681 drc->drc_next_rrd->bytes_read = drc->drc_bytes_read;
2682
2683 if (err != 0) {
2684 kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd));
2685 drc->drc_next_rrd = NULL;
2686 return (err);
2687 }
2688 if (drc->drc_next_rrd->header.drr_type == DRR_BEGIN) {
2689 kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd));
2690 drc->drc_next_rrd = NULL;
2691 return (SET_ERROR(EINVAL));
2692 }
2693
2694 /*
2695 * Note: checksum is of everything up to but not including the
2696 * checksum itself.
2697 */
2698 ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
2699 ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
2700 receive_cksum(drc,
2701 offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
2702 &drc->drc_next_rrd->header);
2703
2704 zio_cksum_t cksum_orig =
2705 drc->drc_next_rrd->header.drr_u.drr_checksum.drr_checksum;
2706 zio_cksum_t *cksump =
2707 &drc->drc_next_rrd->header.drr_u.drr_checksum.drr_checksum;
2708
2709 if (drc->drc_byteswap)
2710 byteswap_record(&drc->drc_next_rrd->header);
2711
2712 if ((!ZIO_CHECKSUM_IS_ZERO(cksump)) &&
2713 !ZIO_CHECKSUM_EQUAL(drc->drc_cksum, *cksump)) {
2714 kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd));
2715 drc->drc_next_rrd = NULL;
2716 return (SET_ERROR(ECKSUM));
2717 }
2718
2719 receive_cksum(drc, sizeof (cksum_orig), &cksum_orig);
2720
2721 return (0);
2722 }
2723
2724 /*
2725 * Issue the prefetch reads for any necessary indirect blocks.
2726 *
2727 * We use the object ignore list to tell us whether or not to issue prefetches
2728 * for a given object. We do this for both correctness (in case the blocksize
2729 * of an object has changed) and performance (if the object doesn't exist, don't
2730 * needlessly try to issue prefetches). We also trim the list as we go through
2731 * the stream to prevent it from growing to an unbounded size.
2732 *
2733 * The object numbers within will always be in sorted order, and any write
2734 * records we see will also be in sorted order, but they're not sorted with
2735 * respect to each other (i.e. we can get several object records before
2736 * receiving each object's write records). As a result, once we've reached a
2737 * given object number, we can safely remove any reference to lower object
2738 * numbers in the ignore list. In practice, we receive up to 32 object records
2739 * before receiving write records, so the list can have up to 32 nodes in it.
2740 */
2741 static void
2742 receive_read_prefetch(dmu_recv_cookie_t *drc, uint64_t object, uint64_t offset,
2743 uint64_t length)
2744 {
2745 if (!objlist_exists(drc->drc_ignore_objlist, object)) {
2746 dmu_prefetch(drc->drc_os, object, 1, offset, length,
2747 ZIO_PRIORITY_SYNC_READ);
2748 }
2749 }
2750
2751 /*
2752 * Read records off the stream, issuing any necessary prefetches.
2753 */
2754 static int
2755 receive_read_record(dmu_recv_cookie_t *drc)
2756 {
2757 int err;
2758
2759 switch (drc->drc_rrd->header.drr_type) {
2760 case DRR_OBJECT:
2761 {
2762 struct drr_object *drro =
2763 &drc->drc_rrd->header.drr_u.drr_object;
2764 uint32_t size = DRR_OBJECT_PAYLOAD_SIZE(drro);
2765 void *buf = NULL;
2766 dmu_object_info_t doi;
2767
2768 if (size != 0)
2769 buf = kmem_zalloc(size, KM_SLEEP);
2770
2771 err = receive_read_payload_and_next_header(drc, size, buf);
2772 if (err != 0) {
2773 kmem_free(buf, size);
2774 return (err);
2775 }
2776 err = dmu_object_info(drc->drc_os, drro->drr_object, &doi);
2777 /*
2778 * See receive_read_prefetch for an explanation why we're
2779 * storing this object in the ignore_obj_list.
2780 */
2781 if (err == ENOENT || err == EEXIST ||
2782 (err == 0 && doi.doi_data_block_size != drro->drr_blksz)) {
2783 objlist_insert(drc->drc_ignore_objlist,
2784 drro->drr_object);
2785 err = 0;
2786 }
2787 return (err);
2788 }
2789 case DRR_FREEOBJECTS:
2790 {
2791 err = receive_read_payload_and_next_header(drc, 0, NULL);
2792 return (err);
2793 }
2794 case DRR_WRITE:
2795 {
2796 struct drr_write *drrw = &drc->drc_rrd->header.drr_u.drr_write;
2797 int size = DRR_WRITE_PAYLOAD_SIZE(drrw);
2798 abd_t *abd = abd_alloc_linear(size, B_FALSE);
2799 err = receive_read_payload_and_next_header(drc, size,
2800 abd_to_buf(abd));
2801 if (err != 0) {
2802 abd_free(abd);
2803 return (err);
2804 }
2805 drc->drc_rrd->abd = abd;
2806 receive_read_prefetch(drc, drrw->drr_object, drrw->drr_offset,
2807 drrw->drr_logical_size);
2808 return (err);
2809 }
2810 case DRR_WRITE_EMBEDDED:
2811 {
2812 struct drr_write_embedded *drrwe =
2813 &drc->drc_rrd->header.drr_u.drr_write_embedded;
2814 uint32_t size = P2ROUNDUP(drrwe->drr_psize, 8);
2815 void *buf = kmem_zalloc(size, KM_SLEEP);
2816
2817 err = receive_read_payload_and_next_header(drc, size, buf);
2818 if (err != 0) {
2819 kmem_free(buf, size);
2820 return (err);
2821 }
2822
2823 receive_read_prefetch(drc, drrwe->drr_object, drrwe->drr_offset,
2824 drrwe->drr_length);
2825 return (err);
2826 }
2827 case DRR_FREE:
2828 case DRR_REDACT:
2829 {
2830 /*
2831 * It might be beneficial to prefetch indirect blocks here, but
2832 * we don't really have the data to decide for sure.
2833 */
2834 err = receive_read_payload_and_next_header(drc, 0, NULL);
2835 return (err);
2836 }
2837 case DRR_END:
2838 {
2839 struct drr_end *drre = &drc->drc_rrd->header.drr_u.drr_end;
2840 if (!ZIO_CHECKSUM_EQUAL(drc->drc_prev_cksum,
2841 drre->drr_checksum))
2842 return (SET_ERROR(ECKSUM));
2843 return (0);
2844 }
2845 case DRR_SPILL:
2846 {
2847 struct drr_spill *drrs = &drc->drc_rrd->header.drr_u.drr_spill;
2848 int size = DRR_SPILL_PAYLOAD_SIZE(drrs);
2849 abd_t *abd = abd_alloc_linear(size, B_FALSE);
2850 err = receive_read_payload_and_next_header(drc, size,
2851 abd_to_buf(abd));
2852 if (err != 0)
2853 abd_free(abd);
2854 else
2855 drc->drc_rrd->abd = abd;
2856 return (err);
2857 }
2858 case DRR_OBJECT_RANGE:
2859 {
2860 err = receive_read_payload_and_next_header(drc, 0, NULL);
2861 return (err);
2862
2863 }
2864 default:
2865 return (SET_ERROR(EINVAL));
2866 }
2867 }
2868
2869
2870
2871 static void
2872 dprintf_drr(struct receive_record_arg *rrd, int err)
2873 {
2874 #ifdef ZFS_DEBUG
2875 switch (rrd->header.drr_type) {
2876 case DRR_OBJECT:
2877 {
2878 struct drr_object *drro = &rrd->header.drr_u.drr_object;
2879 dprintf("drr_type = OBJECT obj = %llu type = %u "
2880 "bonustype = %u blksz = %u bonuslen = %u cksumtype = %u "
2881 "compress = %u dn_slots = %u err = %d\n",
2882 (u_longlong_t)drro->drr_object, drro->drr_type,
2883 drro->drr_bonustype, drro->drr_blksz, drro->drr_bonuslen,
2884 drro->drr_checksumtype, drro->drr_compress,
2885 drro->drr_dn_slots, err);
2886 break;
2887 }
2888 case DRR_FREEOBJECTS:
2889 {
2890 struct drr_freeobjects *drrfo =
2891 &rrd->header.drr_u.drr_freeobjects;
2892 dprintf("drr_type = FREEOBJECTS firstobj = %llu "
2893 "numobjs = %llu err = %d\n",
2894 (u_longlong_t)drrfo->drr_firstobj,
2895 (u_longlong_t)drrfo->drr_numobjs, err);
2896 break;
2897 }
2898 case DRR_WRITE:
2899 {
2900 struct drr_write *drrw = &rrd->header.drr_u.drr_write;
2901 dprintf("drr_type = WRITE obj = %llu type = %u offset = %llu "
2902 "lsize = %llu cksumtype = %u flags = %u "
2903 "compress = %u psize = %llu err = %d\n",
2904 (u_longlong_t)drrw->drr_object, drrw->drr_type,
2905 (u_longlong_t)drrw->drr_offset,
2906 (u_longlong_t)drrw->drr_logical_size,
2907 drrw->drr_checksumtype, drrw->drr_flags,
2908 drrw->drr_compressiontype,
2909 (u_longlong_t)drrw->drr_compressed_size, err);
2910 break;
2911 }
2912 case DRR_WRITE_BYREF:
2913 {
2914 struct drr_write_byref *drrwbr =
2915 &rrd->header.drr_u.drr_write_byref;
2916 dprintf("drr_type = WRITE_BYREF obj = %llu offset = %llu "
2917 "length = %llu toguid = %llx refguid = %llx "
2918 "refobject = %llu refoffset = %llu cksumtype = %u "
2919 "flags = %u err = %d\n",
2920 (u_longlong_t)drrwbr->drr_object,
2921 (u_longlong_t)drrwbr->drr_offset,
2922 (u_longlong_t)drrwbr->drr_length,
2923 (u_longlong_t)drrwbr->drr_toguid,
2924 (u_longlong_t)drrwbr->drr_refguid,
2925 (u_longlong_t)drrwbr->drr_refobject,
2926 (u_longlong_t)drrwbr->drr_refoffset,
2927 drrwbr->drr_checksumtype, drrwbr->drr_flags, err);
2928 break;
2929 }
2930 case DRR_WRITE_EMBEDDED:
2931 {
2932 struct drr_write_embedded *drrwe =
2933 &rrd->header.drr_u.drr_write_embedded;
2934 dprintf("drr_type = WRITE_EMBEDDED obj = %llu offset = %llu "
2935 "length = %llu compress = %u etype = %u lsize = %u "
2936 "psize = %u err = %d\n",
2937 (u_longlong_t)drrwe->drr_object,
2938 (u_longlong_t)drrwe->drr_offset,
2939 (u_longlong_t)drrwe->drr_length,
2940 drrwe->drr_compression, drrwe->drr_etype,
2941 drrwe->drr_lsize, drrwe->drr_psize, err);
2942 break;
2943 }
2944 case DRR_FREE:
2945 {
2946 struct drr_free *drrf = &rrd->header.drr_u.drr_free;
2947 dprintf("drr_type = FREE obj = %llu offset = %llu "
2948 "length = %lld err = %d\n",
2949 (u_longlong_t)drrf->drr_object,
2950 (u_longlong_t)drrf->drr_offset,
2951 (longlong_t)drrf->drr_length,
2952 err);
2953 break;
2954 }
2955 case DRR_SPILL:
2956 {
2957 struct drr_spill *drrs = &rrd->header.drr_u.drr_spill;
2958 dprintf("drr_type = SPILL obj = %llu length = %llu "
2959 "err = %d\n", (u_longlong_t)drrs->drr_object,
2960 (u_longlong_t)drrs->drr_length, err);
2961 break;
2962 }
2963 case DRR_OBJECT_RANGE:
2964 {
2965 struct drr_object_range *drror =
2966 &rrd->header.drr_u.drr_object_range;
2967 dprintf("drr_type = OBJECT_RANGE firstobj = %llu "
2968 "numslots = %llu flags = %u err = %d\n",
2969 (u_longlong_t)drror->drr_firstobj,
2970 (u_longlong_t)drror->drr_numslots,
2971 drror->drr_flags, err);
2972 break;
2973 }
2974 default:
2975 return;
2976 }
2977 #endif
2978 }
2979
2980 /*
2981 * Commit the records to the pool.
2982 */
2983 static int
2984 receive_process_record(struct receive_writer_arg *rwa,
2985 struct receive_record_arg *rrd)
2986 {
2987 int err;
2988
2989 /* Processing in order, therefore bytes_read should be increasing. */
2990 ASSERT3U(rrd->bytes_read, >=, rwa->bytes_read);
2991 rwa->bytes_read = rrd->bytes_read;
2992
2993 /* We can only heal write records; other ones get ignored */
2994 if (rwa->heal && rrd->header.drr_type != DRR_WRITE) {
2995 if (rrd->abd != NULL) {
2996 abd_free(rrd->abd);
2997 rrd->abd = NULL;
2998 } else if (rrd->payload != NULL) {
2999 kmem_free(rrd->payload, rrd->payload_size);
3000 rrd->payload = NULL;
3001 }
3002 return (0);
3003 }
3004
3005 if (!rwa->heal && rrd->header.drr_type != DRR_WRITE) {
3006 err = flush_write_batch(rwa);
3007 if (err != 0) {
3008 if (rrd->abd != NULL) {
3009 abd_free(rrd->abd);
3010 rrd->abd = NULL;
3011 rrd->payload = NULL;
3012 } else if (rrd->payload != NULL) {
3013 kmem_free(rrd->payload, rrd->payload_size);
3014 rrd->payload = NULL;
3015 }
3016
3017 return (err);
3018 }
3019 }
3020
3021 switch (rrd->header.drr_type) {
3022 case DRR_OBJECT:
3023 {
3024 struct drr_object *drro = &rrd->header.drr_u.drr_object;
3025 err = receive_object(rwa, drro, rrd->payload);
3026 kmem_free(rrd->payload, rrd->payload_size);
3027 rrd->payload = NULL;
3028 break;
3029 }
3030 case DRR_FREEOBJECTS:
3031 {
3032 struct drr_freeobjects *drrfo =
3033 &rrd->header.drr_u.drr_freeobjects;
3034 err = receive_freeobjects(rwa, drrfo);
3035 break;
3036 }
3037 case DRR_WRITE:
3038 {
3039 err = receive_process_write_record(rwa, rrd);
3040 if (rwa->heal) {
3041 /*
3042 * If healing - always free the abd after processing
3043 */
3044 abd_free(rrd->abd);
3045 rrd->abd = NULL;
3046 } else if (err != EAGAIN) {
3047 /*
3048 * On success, a non-healing
3049 * receive_process_write_record() returns
3050 * EAGAIN to indicate that we do not want to free
3051 * the rrd or arc_buf.
3052 */
3053 ASSERT(err != 0);
3054 abd_free(rrd->abd);
3055 rrd->abd = NULL;
3056 }
3057 break;
3058 }
3059 case DRR_WRITE_EMBEDDED:
3060 {
3061 struct drr_write_embedded *drrwe =
3062 &rrd->header.drr_u.drr_write_embedded;
3063 err = receive_write_embedded(rwa, drrwe, rrd->payload);
3064 kmem_free(rrd->payload, rrd->payload_size);
3065 rrd->payload = NULL;
3066 break;
3067 }
3068 case DRR_FREE:
3069 {
3070 struct drr_free *drrf = &rrd->header.drr_u.drr_free;
3071 err = receive_free(rwa, drrf);
3072 break;
3073 }
3074 case DRR_SPILL:
3075 {
3076 struct drr_spill *drrs = &rrd->header.drr_u.drr_spill;
3077 err = receive_spill(rwa, drrs, rrd->abd);
3078 if (err != 0)
3079 abd_free(rrd->abd);
3080 rrd->abd = NULL;
3081 rrd->payload = NULL;
3082 break;
3083 }
3084 case DRR_OBJECT_RANGE:
3085 {
3086 struct drr_object_range *drror =
3087 &rrd->header.drr_u.drr_object_range;
3088 err = receive_object_range(rwa, drror);
3089 break;
3090 }
3091 case DRR_REDACT:
3092 {
3093 struct drr_redact *drrr = &rrd->header.drr_u.drr_redact;
3094 err = receive_redact(rwa, drrr);
3095 break;
3096 }
3097 default:
3098 err = (SET_ERROR(EINVAL));
3099 }
3100
3101 if (err != 0)
3102 dprintf_drr(rrd, err);
3103
3104 return (err);
3105 }
3106
3107 /*
3108 * dmu_recv_stream's worker thread; pull records off the queue, and then call
3109 * receive_process_record When we're done, signal the main thread and exit.
3110 */
3111 static __attribute__((noreturn)) void
3112 receive_writer_thread(void *arg)
3113 {
3114 struct receive_writer_arg *rwa = arg;
3115 struct receive_record_arg *rrd;
3116 fstrans_cookie_t cookie = spl_fstrans_mark();
3117
3118 for (rrd = bqueue_dequeue(&rwa->q); !rrd->eos_marker;
3119 rrd = bqueue_dequeue(&rwa->q)) {
3120 /*
3121 * If there's an error, the main thread will stop putting things
3122 * on the queue, but we need to clear everything in it before we
3123 * can exit.
3124 */
3125 int err = 0;
3126 if (rwa->err == 0) {
3127 err = receive_process_record(rwa, rrd);
3128 } else if (rrd->abd != NULL) {
3129 abd_free(rrd->abd);
3130 rrd->abd = NULL;
3131 rrd->payload = NULL;
3132 } else if (rrd->payload != NULL) {
3133 kmem_free(rrd->payload, rrd->payload_size);
3134 rrd->payload = NULL;
3135 }
3136 /*
3137 * EAGAIN indicates that this record has been saved (on
3138 * raw->write_batch), and will be used again, so we don't
3139 * free it.
3140 * When healing data we always need to free the record.
3141 */
3142 if (err != EAGAIN || rwa->heal) {
3143 if (rwa->err == 0)
3144 rwa->err = err;
3145 kmem_free(rrd, sizeof (*rrd));
3146 }
3147 }
3148 kmem_free(rrd, sizeof (*rrd));
3149
3150 if (rwa->heal) {
3151 zio_wait(rwa->heal_pio);
3152 } else {
3153 int err = flush_write_batch(rwa);
3154 if (rwa->err == 0)
3155 rwa->err = err;
3156 }
3157 mutex_enter(&rwa->mutex);
3158 rwa->done = B_TRUE;
3159 cv_signal(&rwa->cv);
3160 mutex_exit(&rwa->mutex);
3161 spl_fstrans_unmark(cookie);
3162 thread_exit();
3163 }
3164
3165 static int
3166 resume_check(dmu_recv_cookie_t *drc, nvlist_t *begin_nvl)
3167 {
3168 uint64_t val;
3169 objset_t *mos = dmu_objset_pool(drc->drc_os)->dp_meta_objset;
3170 uint64_t dsobj = dmu_objset_id(drc->drc_os);
3171 uint64_t resume_obj, resume_off;
3172
3173 if (nvlist_lookup_uint64(begin_nvl,
3174 "resume_object", &resume_obj) != 0 ||
3175 nvlist_lookup_uint64(begin_nvl,
3176 "resume_offset", &resume_off) != 0) {
3177 return (SET_ERROR(EINVAL));
3178 }
3179 VERIFY0(zap_lookup(mos, dsobj,
3180 DS_FIELD_RESUME_OBJECT, sizeof (val), 1, &val));
3181 if (resume_obj != val)
3182 return (SET_ERROR(EINVAL));
3183 VERIFY0(zap_lookup(mos, dsobj,
3184 DS_FIELD_RESUME_OFFSET, sizeof (val), 1, &val));
3185 if (resume_off != val)
3186 return (SET_ERROR(EINVAL));
3187
3188 return (0);
3189 }
3190
3191 /*
3192 * Read in the stream's records, one by one, and apply them to the pool. There
3193 * are two threads involved; the thread that calls this function will spin up a
3194 * worker thread, read the records off the stream one by one, and issue
3195 * prefetches for any necessary indirect blocks. It will then push the records
3196 * onto an internal blocking queue. The worker thread will pull the records off
3197 * the queue, and actually write the data into the DMU. This way, the worker
3198 * thread doesn't have to wait for reads to complete, since everything it needs
3199 * (the indirect blocks) will be prefetched.
3200 *
3201 * NB: callers *must* call dmu_recv_end() if this succeeds.
3202 */
3203 int
3204 dmu_recv_stream(dmu_recv_cookie_t *drc, offset_t *voffp)
3205 {
3206 int err = 0;
3207 struct receive_writer_arg *rwa = kmem_zalloc(sizeof (*rwa), KM_SLEEP);
3208
3209 if (dsl_dataset_has_resume_receive_state(drc->drc_ds)) {
3210 uint64_t bytes = 0;
3211 (void) zap_lookup(drc->drc_ds->ds_dir->dd_pool->dp_meta_objset,
3212 drc->drc_ds->ds_object, DS_FIELD_RESUME_BYTES,
3213 sizeof (bytes), 1, &bytes);
3214 drc->drc_bytes_read += bytes;
3215 }
3216
3217 drc->drc_ignore_objlist = objlist_create();
3218
3219 /* these were verified in dmu_recv_begin */
3220 ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo), ==,
3221 DMU_SUBSTREAM);
3222 ASSERT3U(drc->drc_drrb->drr_type, <, DMU_OST_NUMTYPES);
3223
3224 ASSERT(dsl_dataset_phys(drc->drc_ds)->ds_flags & DS_FLAG_INCONSISTENT);
3225 ASSERT0(drc->drc_os->os_encrypted &&
3226 (drc->drc_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA));
3227
3228 /* handle DSL encryption key payload */
3229 if (drc->drc_featureflags & DMU_BACKUP_FEATURE_RAW) {
3230 nvlist_t *keynvl = NULL;
3231
3232 ASSERT(drc->drc_os->os_encrypted);
3233 ASSERT(drc->drc_raw);
3234
3235 err = nvlist_lookup_nvlist(drc->drc_begin_nvl, "crypt_keydata",
3236 &keynvl);
3237 if (err != 0)
3238 goto out;
3239
3240 if (!drc->drc_heal) {
3241 /*
3242 * If this is a new dataset we set the key immediately.
3243 * Otherwise we don't want to change the key until we
3244 * are sure the rest of the receive succeeded so we
3245 * stash the keynvl away until then.
3246 */
3247 err = dsl_crypto_recv_raw(spa_name(drc->drc_os->os_spa),
3248 drc->drc_ds->ds_object, drc->drc_fromsnapobj,
3249 drc->drc_drrb->drr_type, keynvl, drc->drc_newfs);
3250 if (err != 0)
3251 goto out;
3252 }
3253
3254 /* see comment in dmu_recv_end_sync() */
3255 drc->drc_ivset_guid = 0;
3256 (void) nvlist_lookup_uint64(keynvl, "to_ivset_guid",
3257 &drc->drc_ivset_guid);
3258
3259 if (!drc->drc_newfs)
3260 drc->drc_keynvl = fnvlist_dup(keynvl);
3261 }
3262
3263 if (drc->drc_featureflags & DMU_BACKUP_FEATURE_RESUMING) {
3264 err = resume_check(drc, drc->drc_begin_nvl);
3265 if (err != 0)
3266 goto out;
3267 }
3268
3269 /*
3270 * If we failed before this point we will clean up any new resume
3271 * state that was created. Now that we've gotten past the initial
3272 * checks we are ok to retain that resume state.
3273 */
3274 drc->drc_should_save = B_TRUE;
3275
3276 (void) bqueue_init(&rwa->q, zfs_recv_queue_ff,
3277 MAX(zfs_recv_queue_length, 2 * zfs_max_recordsize),
3278 offsetof(struct receive_record_arg, node));
3279 cv_init(&rwa->cv, NULL, CV_DEFAULT, NULL);
3280 mutex_init(&rwa->mutex, NULL, MUTEX_DEFAULT, NULL);
3281 rwa->os = drc->drc_os;
3282 rwa->byteswap = drc->drc_byteswap;
3283 rwa->heal = drc->drc_heal;
3284 rwa->tofs = drc->drc_tofs;
3285 rwa->resumable = drc->drc_resumable;
3286 rwa->raw = drc->drc_raw;
3287 rwa->spill = drc->drc_spill;
3288 rwa->full = (drc->drc_drr_begin->drr_u.drr_begin.drr_fromguid == 0);
3289 rwa->os->os_raw_receive = drc->drc_raw;
3290 if (drc->drc_heal) {
3291 rwa->heal_pio = zio_root(drc->drc_os->os_spa, NULL, NULL,
3292 ZIO_FLAG_GODFATHER);
3293 }
3294 list_create(&rwa->write_batch, sizeof (struct receive_record_arg),
3295 offsetof(struct receive_record_arg, node.bqn_node));
3296
3297 (void) thread_create(NULL, 0, receive_writer_thread, rwa, 0, curproc,
3298 TS_RUN, minclsyspri);
3299 /*
3300 * We're reading rwa->err without locks, which is safe since we are the
3301 * only reader, and the worker thread is the only writer. It's ok if we
3302 * miss a write for an iteration or two of the loop, since the writer
3303 * thread will keep freeing records we send it until we send it an eos
3304 * marker.
3305 *
3306 * We can leave this loop in 3 ways: First, if rwa->err is
3307 * non-zero. In that case, the writer thread will free the rrd we just
3308 * pushed. Second, if we're interrupted; in that case, either it's the
3309 * first loop and drc->drc_rrd was never allocated, or it's later, and
3310 * drc->drc_rrd has been handed off to the writer thread who will free
3311 * it. Finally, if receive_read_record fails or we're at the end of the
3312 * stream, then we free drc->drc_rrd and exit.
3313 */
3314 while (rwa->err == 0) {
3315 if (issig(JUSTLOOKING) && issig(FORREAL)) {
3316 err = SET_ERROR(EINTR);
3317 break;
3318 }
3319
3320 ASSERT3P(drc->drc_rrd, ==, NULL);
3321 drc->drc_rrd = drc->drc_next_rrd;
3322 drc->drc_next_rrd = NULL;
3323 /* Allocates and loads header into drc->drc_next_rrd */
3324 err = receive_read_record(drc);
3325
3326 if (drc->drc_rrd->header.drr_type == DRR_END || err != 0) {
3327 kmem_free(drc->drc_rrd, sizeof (*drc->drc_rrd));
3328 drc->drc_rrd = NULL;
3329 break;
3330 }
3331
3332 bqueue_enqueue(&rwa->q, drc->drc_rrd,
3333 sizeof (struct receive_record_arg) +
3334 drc->drc_rrd->payload_size);
3335 drc->drc_rrd = NULL;
3336 }
3337
3338 ASSERT3P(drc->drc_rrd, ==, NULL);
3339 drc->drc_rrd = kmem_zalloc(sizeof (*drc->drc_rrd), KM_SLEEP);
3340 drc->drc_rrd->eos_marker = B_TRUE;
3341 bqueue_enqueue_flush(&rwa->q, drc->drc_rrd, 1);
3342
3343 mutex_enter(&rwa->mutex);
3344 while (!rwa->done) {
3345 /*
3346 * We need to use cv_wait_sig() so that any process that may
3347 * be sleeping here can still fork.
3348 */
3349 (void) cv_wait_sig(&rwa->cv, &rwa->mutex);
3350 }
3351 mutex_exit(&rwa->mutex);
3352
3353 /*
3354 * If we are receiving a full stream as a clone, all object IDs which
3355 * are greater than the maximum ID referenced in the stream are
3356 * by definition unused and must be freed.
3357 */
3358 if (drc->drc_clone && drc->drc_drrb->drr_fromguid == 0) {
3359 uint64_t obj = rwa->max_object + 1;
3360 int free_err = 0;
3361 int next_err = 0;
3362
3363 while (next_err == 0) {
3364 free_err = dmu_free_long_object(rwa->os, obj);
3365 if (free_err != 0 && free_err != ENOENT)
3366 break;
3367
3368 next_err = dmu_object_next(rwa->os, &obj, FALSE, 0);
3369 }
3370
3371 if (err == 0) {
3372 if (free_err != 0 && free_err != ENOENT)
3373 err = free_err;
3374 else if (next_err != ESRCH)
3375 err = next_err;
3376 }
3377 }
3378
3379 cv_destroy(&rwa->cv);
3380 mutex_destroy(&rwa->mutex);
3381 bqueue_destroy(&rwa->q);
3382 list_destroy(&rwa->write_batch);
3383 if (err == 0)
3384 err = rwa->err;
3385
3386 out:
3387 /*
3388 * If we hit an error before we started the receive_writer_thread
3389 * we need to clean up the next_rrd we create by processing the
3390 * DRR_BEGIN record.
3391 */
3392 if (drc->drc_next_rrd != NULL)
3393 kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd));
3394
3395 /*
3396 * The objset will be invalidated by dmu_recv_end() when we do
3397 * dsl_dataset_clone_swap_sync_impl().
3398 */
3399 drc->drc_os = NULL;
3400
3401 kmem_free(rwa, sizeof (*rwa));
3402 nvlist_free(drc->drc_begin_nvl);
3403
3404 if (err != 0) {
3405 /*
3406 * Clean up references. If receive is not resumable,
3407 * destroy what we created, so we don't leave it in
3408 * the inconsistent state.
3409 */
3410 dmu_recv_cleanup_ds(drc);
3411 nvlist_free(drc->drc_keynvl);
3412 }
3413
3414 objlist_destroy(drc->drc_ignore_objlist);
3415 drc->drc_ignore_objlist = NULL;
3416 *voffp = drc->drc_voff;
3417 return (err);
3418 }
3419
3420 static int
3421 dmu_recv_end_check(void *arg, dmu_tx_t *tx)
3422 {
3423 dmu_recv_cookie_t *drc = arg;
3424 dsl_pool_t *dp = dmu_tx_pool(tx);
3425 int error;
3426
3427 ASSERT3P(drc->drc_ds->ds_owner, ==, dmu_recv_tag);
3428
3429 if (drc->drc_heal) {
3430 error = 0;
3431 } else if (!drc->drc_newfs) {
3432 dsl_dataset_t *origin_head;
3433
3434 error = dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head);
3435 if (error != 0)
3436 return (error);
3437 if (drc->drc_force) {
3438 /*
3439 * We will destroy any snapshots in tofs (i.e. before
3440 * origin_head) that are after the origin (which is
3441 * the snap before drc_ds, because drc_ds can not
3442 * have any snaps of its own).
3443 */
3444 uint64_t obj;
3445
3446 obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
3447 while (obj !=
3448 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
3449 dsl_dataset_t *snap;
3450 error = dsl_dataset_hold_obj(dp, obj, FTAG,
3451 &snap);
3452 if (error != 0)
3453 break;
3454 if (snap->ds_dir != origin_head->ds_dir)
3455 error = SET_ERROR(EINVAL);
3456 if (error == 0) {
3457 error = dsl_destroy_snapshot_check_impl(
3458 snap, B_FALSE);
3459 }
3460 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
3461 dsl_dataset_rele(snap, FTAG);
3462 if (error != 0)
3463 break;
3464 }
3465 if (error != 0) {
3466 dsl_dataset_rele(origin_head, FTAG);
3467 return (error);
3468 }
3469 }
3470 if (drc->drc_keynvl != NULL) {
3471 error = dsl_crypto_recv_raw_key_check(drc->drc_ds,
3472 drc->drc_keynvl, tx);
3473 if (error != 0) {
3474 dsl_dataset_rele(origin_head, FTAG);
3475 return (error);
3476 }
3477 }
3478
3479 error = dsl_dataset_clone_swap_check_impl(drc->drc_ds,
3480 origin_head, drc->drc_force, drc->drc_owner, tx);
3481 if (error != 0) {
3482 dsl_dataset_rele(origin_head, FTAG);
3483 return (error);
3484 }
3485 error = dsl_dataset_snapshot_check_impl(origin_head,
3486 drc->drc_tosnap, tx, B_TRUE, 1,
3487 drc->drc_cred, drc->drc_proc);
3488 dsl_dataset_rele(origin_head, FTAG);
3489 if (error != 0)
3490 return (error);
3491
3492 error = dsl_destroy_head_check_impl(drc->drc_ds, 1);
3493 } else {
3494 error = dsl_dataset_snapshot_check_impl(drc->drc_ds,
3495 drc->drc_tosnap, tx, B_TRUE, 1,
3496 drc->drc_cred, drc->drc_proc);
3497 }
3498 return (error);
3499 }
3500
3501 static void
3502 dmu_recv_end_sync(void *arg, dmu_tx_t *tx)
3503 {
3504 dmu_recv_cookie_t *drc = arg;
3505 dsl_pool_t *dp = dmu_tx_pool(tx);
3506 boolean_t encrypted = drc->drc_ds->ds_dir->dd_crypto_obj != 0;
3507 uint64_t newsnapobj = 0;
3508
3509 spa_history_log_internal_ds(drc->drc_ds, "finish receiving",
3510 tx, "snap=%s", drc->drc_tosnap);
3511 drc->drc_ds->ds_objset->os_raw_receive = B_FALSE;
3512
3513 if (drc->drc_heal) {
3514 if (drc->drc_keynvl != NULL) {
3515 nvlist_free(drc->drc_keynvl);
3516 drc->drc_keynvl = NULL;
3517 }
3518 } else if (!drc->drc_newfs) {
3519 dsl_dataset_t *origin_head;
3520
3521 VERIFY0(dsl_dataset_hold(dp, drc->drc_tofs, FTAG,
3522 &origin_head));
3523
3524 if (drc->drc_force) {
3525 /*
3526 * Destroy any snapshots of drc_tofs (origin_head)
3527 * after the origin (the snap before drc_ds).
3528 */
3529 uint64_t obj;
3530
3531 obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
3532 while (obj !=
3533 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
3534 dsl_dataset_t *snap;
3535 VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG,
3536 &snap));
3537 ASSERT3P(snap->ds_dir, ==, origin_head->ds_dir);
3538 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
3539 dsl_destroy_snapshot_sync_impl(snap,
3540 B_FALSE, tx);
3541 dsl_dataset_rele(snap, FTAG);
3542 }
3543 }
3544 if (drc->drc_keynvl != NULL) {
3545 dsl_crypto_recv_raw_key_sync(drc->drc_ds,
3546 drc->drc_keynvl, tx);
3547 nvlist_free(drc->drc_keynvl);
3548 drc->drc_keynvl = NULL;
3549 }
3550
3551 VERIFY3P(drc->drc_ds->ds_prev, ==,
3552 origin_head->ds_prev);
3553
3554 dsl_dataset_clone_swap_sync_impl(drc->drc_ds,
3555 origin_head, tx);
3556 /*
3557 * The objset was evicted by dsl_dataset_clone_swap_sync_impl,
3558 * so drc_os is no longer valid.
3559 */
3560 drc->drc_os = NULL;
3561
3562 dsl_dataset_snapshot_sync_impl(origin_head,
3563 drc->drc_tosnap, tx);
3564
3565 /* set snapshot's creation time and guid */
3566 dmu_buf_will_dirty(origin_head->ds_prev->ds_dbuf, tx);
3567 dsl_dataset_phys(origin_head->ds_prev)->ds_creation_time =
3568 drc->drc_drrb->drr_creation_time;
3569 dsl_dataset_phys(origin_head->ds_prev)->ds_guid =
3570 drc->drc_drrb->drr_toguid;
3571 dsl_dataset_phys(origin_head->ds_prev)->ds_flags &=
3572 ~DS_FLAG_INCONSISTENT;
3573
3574 dmu_buf_will_dirty(origin_head->ds_dbuf, tx);
3575 dsl_dataset_phys(origin_head)->ds_flags &=
3576 ~DS_FLAG_INCONSISTENT;
3577
3578 newsnapobj =
3579 dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
3580
3581 dsl_dataset_rele(origin_head, FTAG);
3582 dsl_destroy_head_sync_impl(drc->drc_ds, tx);
3583
3584 if (drc->drc_owner != NULL)
3585 VERIFY3P(origin_head->ds_owner, ==, drc->drc_owner);
3586 } else {
3587 dsl_dataset_t *ds = drc->drc_ds;
3588
3589 dsl_dataset_snapshot_sync_impl(ds, drc->drc_tosnap, tx);
3590
3591 /* set snapshot's creation time and guid */
3592 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
3593 dsl_dataset_phys(ds->ds_prev)->ds_creation_time =
3594 drc->drc_drrb->drr_creation_time;
3595 dsl_dataset_phys(ds->ds_prev)->ds_guid =
3596 drc->drc_drrb->drr_toguid;
3597 dsl_dataset_phys(ds->ds_prev)->ds_flags &=
3598 ~DS_FLAG_INCONSISTENT;
3599
3600 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3601 dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT;
3602 if (dsl_dataset_has_resume_receive_state(ds)) {
3603 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3604 DS_FIELD_RESUME_FROMGUID, tx);
3605 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3606 DS_FIELD_RESUME_OBJECT, tx);
3607 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3608 DS_FIELD_RESUME_OFFSET, tx);
3609 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3610 DS_FIELD_RESUME_BYTES, tx);
3611 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3612 DS_FIELD_RESUME_TOGUID, tx);
3613 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3614 DS_FIELD_RESUME_TONAME, tx);
3615 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3616 DS_FIELD_RESUME_REDACT_BOOKMARK_SNAPS, tx);
3617 }
3618 newsnapobj =
3619 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj;
3620 }
3621
3622 /*
3623 * If this is a raw receive, the crypt_keydata nvlist will include
3624 * a to_ivset_guid for us to set on the new snapshot. This value
3625 * will override the value generated by the snapshot code. However,
3626 * this value may not be present, because older implementations of
3627 * the raw send code did not include this value, and we are still
3628 * allowed to receive them if the zfs_disable_ivset_guid_check
3629 * tunable is set, in which case we will leave the newly-generated
3630 * value.
3631 */
3632 if (!drc->drc_heal && drc->drc_raw && drc->drc_ivset_guid != 0) {
3633 dmu_object_zapify(dp->dp_meta_objset, newsnapobj,
3634 DMU_OT_DSL_DATASET, tx);
3635 VERIFY0(zap_update(dp->dp_meta_objset, newsnapobj,
3636 DS_FIELD_IVSET_GUID, sizeof (uint64_t), 1,
3637 &drc->drc_ivset_guid, tx));
3638 }
3639
3640 /*
3641 * Release the hold from dmu_recv_begin. This must be done before
3642 * we return to open context, so that when we free the dataset's dnode
3643 * we can evict its bonus buffer. Since the dataset may be destroyed
3644 * at this point (and therefore won't have a valid pointer to the spa)
3645 * we release the key mapping manually here while we do have a valid
3646 * pointer, if it exists.
3647 */
3648 if (!drc->drc_raw && encrypted) {
3649 (void) spa_keystore_remove_mapping(dmu_tx_pool(tx)->dp_spa,
3650 drc->drc_ds->ds_object, drc->drc_ds);
3651 }
3652 dsl_dataset_disown(drc->drc_ds, 0, dmu_recv_tag);
3653 drc->drc_ds = NULL;
3654 }
3655
3656 static int dmu_recv_end_modified_blocks = 3;
3657
3658 static int
3659 dmu_recv_existing_end(dmu_recv_cookie_t *drc)
3660 {
3661 #ifdef _KERNEL
3662 /*
3663 * We will be destroying the ds; make sure its origin is unmounted if
3664 * necessary.
3665 */
3666 char name[ZFS_MAX_DATASET_NAME_LEN];
3667 dsl_dataset_name(drc->drc_ds, name);
3668 zfs_destroy_unmount_origin(name);
3669 #endif
3670
3671 return (dsl_sync_task(drc->drc_tofs,
3672 dmu_recv_end_check, dmu_recv_end_sync, drc,
3673 dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL));
3674 }
3675
3676 static int
3677 dmu_recv_new_end(dmu_recv_cookie_t *drc)
3678 {
3679 return (dsl_sync_task(drc->drc_tofs,
3680 dmu_recv_end_check, dmu_recv_end_sync, drc,
3681 dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL));
3682 }
3683
3684 int
3685 dmu_recv_end(dmu_recv_cookie_t *drc, void *owner)
3686 {
3687 int error;
3688
3689 drc->drc_owner = owner;
3690
3691 if (drc->drc_newfs)
3692 error = dmu_recv_new_end(drc);
3693 else
3694 error = dmu_recv_existing_end(drc);
3695
3696 if (error != 0) {
3697 dmu_recv_cleanup_ds(drc);
3698 nvlist_free(drc->drc_keynvl);
3699 } else if (!drc->drc_heal) {
3700 if (drc->drc_newfs) {
3701 zvol_create_minor(drc->drc_tofs);
3702 }
3703 char *snapname = kmem_asprintf("%s@%s",
3704 drc->drc_tofs, drc->drc_tosnap);
3705 zvol_create_minor(snapname);
3706 kmem_strfree(snapname);
3707 }
3708 return (error);
3709 }
3710
3711 /*
3712 * Return TRUE if this objset is currently being received into.
3713 */
3714 boolean_t
3715 dmu_objset_is_receiving(objset_t *os)
3716 {
3717 return (os->os_dsl_dataset != NULL &&
3718 os->os_dsl_dataset->ds_owner == dmu_recv_tag);
3719 }
3720
3721 ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, queue_length, INT, ZMOD_RW,
3722 "Maximum receive queue length");
3723
3724 ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, queue_ff, INT, ZMOD_RW,
3725 "Receive queue fill fraction");
3726
3727 ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, write_batch_size, INT, ZMOD_RW,
3728 "Maximum amount of writes to batch into one transaction");
3729
3730 ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, best_effort_corrective, INT, ZMOD_RW,
3731 "Ignore errors during corrective receive");
3732 /* END CSTYLED */